From 7dbe9b580021310d3c9d7fcf8cd2745791a680ce Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 6 Jan 2026 15:30:54 +0100 Subject: [PATCH] dp: switch the lock to use sys_sem sys_sem semaphores don't invoke a syscall in uncongested cases. Switch over to it to reduce syscall number. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_dp_schedule.c | 29 +++++++++---------- src/schedule/zephyr_dp_schedule.h | 1 - src/schedule/zephyr_dp_schedule_application.c | 1 - src/schedule/zephyr_dp_schedule_thread.c | 1 - 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index c0306f0f599e..467a9c2efec2 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -33,15 +33,18 @@ SOF_DEFINE_REG_UUID(dp_sched); DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO); -#define DP_LOCK_INIT(i, _) Z_SEM_INITIALIZER(dp_lock[i], 1, 1) +#ifdef CONFIG_USERSPACE +#define SYS_SEM_INITIALIZER(obj, n_init, n_max) {.futex = {n_init}, .limit = n_max} +#else +#define SYS_SEM_INITIALIZER(obj, n_init, n_max) {Z_SEM_INITIALIZER(obj.kernel_sem, 1, 1)} +#endif + +#define DP_LOCK_INIT(i, _) SYS_SEM_INITIALIZER(dp_lock[i], 1, 1) #define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,)) -/* User threads don't need access to this array. Access is performed from - * the kernel space via a syscall. Array must be placed in special section - * to be qualified as initialized by the gen_kobject_list.py script. - */ -static -STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_lock, CONFIG_MP_MAX_NUM_CPUS) = { DP_LOCK_INIT_LIST }; +static struct sys_sem dp_lock[CONFIG_MP_MAX_NUM_CPUS] = { + DP_LOCK_INIT_LIST +}; /* Each per-core instance of DP scheduler has separate structures; hence, locks are per-core. * @@ -49,20 +52,14 @@ STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_lock, CONFIG_MP_MAX_NUM_CPUS) = { DP_LOC */ unsigned int scheduler_dp_lock(uint16_t core) { - k_sem_take(&dp_lock[core], K_FOREVER); + sys_sem_take(&dp_lock[core], K_FOREVER); + return core; } void scheduler_dp_unlock(unsigned int key) { - k_sem_give(&dp_lock[key]); -} - -void scheduler_dp_grant(k_tid_t thread_id, uint16_t core) -{ -#if CONFIG_USERSPACE - k_thread_access_grant(thread_id, &dp_lock[core]); -#endif + sys_sem_give(&dp_lock[key]); } /* dummy LL task - to start LL on secondary cores */ diff --git a/src/schedule/zephyr_dp_schedule.h b/src/schedule/zephyr_dp_schedule.h index 2ef6bde805c3..e550ecd3e6bc 100644 --- a/src/schedule/zephyr_dp_schedule.h +++ b/src/schedule/zephyr_dp_schedule.h @@ -55,7 +55,6 @@ void scheduler_dp_recalculate(struct scheduler_dp_data *dp_sch, bool is_ll_post_ void dp_thread_fn(void *p1, void *p2, void *p3); unsigned int scheduler_dp_lock(uint16_t core); void scheduler_dp_unlock(unsigned int key); -void scheduler_dp_grant(k_tid_t thread_id, uint16_t core); int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, const struct task_ops *ops, struct processing_module *mod, uint16_t core, size_t stack_size, uint32_t options); diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index 65f4c1ee9de1..22f192db3a3d 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -505,7 +505,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, #if CONFIG_USERSPACE k_thread_access_grant(pdata->thread_id, pdata->sem, &dp_sync[core]); - scheduler_dp_grant(pdata->thread_id, core); unsigned int pidx; size_t size; diff --git a/src/schedule/zephyr_dp_schedule_thread.c b/src/schedule/zephyr_dp_schedule_thread.c index c9874155893c..d371e13bc281 100644 --- a/src/schedule/zephyr_dp_schedule_thread.c +++ b/src/schedule/zephyr_dp_schedule_thread.c @@ -270,7 +270,6 @@ int scheduler_dp_task_init(struct task **task, CONFIG_DP_THREAD_PRIORITY, (*task)->flags, K_FOREVER); k_thread_access_grant(pdata->thread_id, pdata->event); - scheduler_dp_grant(pdata->thread_id, cpu_get_id()); /* pin the thread to specific core */ ret = k_thread_cpu_pin(pdata->thread_id, core);