From c0be31d82431370176eefa9452c6097a4d7ac4ed Mon Sep 17 00:00:00 2001 From: claudio Date: Tue, 18 Jul 2023 06:58:59 +0000 Subject: [PATCH] With the update of the sleep API the linux emulation of their wait API, schedule() and set_current_state() can be implemented in a much less hacky way. This should remove some possible race conditions in the wait API. Tested by many (kettenis, jsg, phessler, thfr) OK kettenis@ --- sys/dev/pci/drm/drm_linux.c | 94 ++++++++++++++------------- sys/dev/pci/drm/include/linux/wait.h | 86 ++++++++++-------------- sys/dev/pci/drm/radeon/radeon_fence.c | 1 - 3 files changed, 86 insertions(+), 95 deletions(-) diff --git a/sys/dev/pci/drm/drm_linux.c b/sys/dev/pci/drm/drm_linux.c index 3b34d4285e3..bf4f465b717 100644 --- a/sys/dev/pci/drm/drm_linux.c +++ b/sys/dev/pci/drm/drm_linux.c @@ -1,4 +1,4 @@ -/* $OpenBSD: drm_linux.c,v 1.100 2023/07/14 07:07:08 claudio Exp $ */ +/* $OpenBSD: drm_linux.c,v 1.101 2023/07/18 06:58:59 claudio Exp $ */ /* * Copyright (c) 2013 Jonathan Gray * Copyright (c) 2015, 2016 Mark Kettenis @@ -98,30 +98,30 @@ tasklet_run(void *arg) struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH); #endif -struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED); -volatile struct proc *sch_proc; -volatile void *sch_ident; -int sch_priority; - void set_current_state(int state) { - if (sch_ident != curproc) - mtx_enter(&sch_mtx); - MUTEX_ASSERT_LOCKED(&sch_mtx); - sch_ident = sch_proc = curproc; - sch_priority = state; + int prio = state; + + KASSERT(state != TASK_RUNNING); + /* check if already on the sleep list */ + if (curproc->p_wchan != NULL) + return; + sleep_setup(curproc, prio, "schto"); } void __set_current_state(int state) { + struct proc *p = curproc; + int s; + KASSERT(state == TASK_RUNNING); - if (sch_ident == curproc) { - MUTEX_ASSERT_LOCKED(&sch_mtx); - sch_ident = NULL; - mtx_leave(&sch_mtx); - } + SCHED_LOCK(s); + unsleep(p); + p->p_stat = SONPROC; + atomic_clearbits_int(&p->p_flag, P_WSLEEP); + SCHED_UNLOCK(s); } void @@ -134,32 +134,18 @@ long schedule_timeout(long timeout) { unsigned long deadline; - int wait, spl, prio, timo = 0; + int timo = 0; - MUTEX_ASSERT_LOCKED(&sch_mtx); KASSERT(!cold); if (timeout != MAX_SCHEDULE_TIMEOUT) timo = timeout; - prio = sch_priority; - sleep_setup(sch_ident, prio, "schto"); - - wait = (sch_proc == curproc && timeout > 0); - - spl = MUTEX_OLDIPL(&sch_mtx); - MUTEX_OLDIPL(&sch_mtx) = splsched(); - mtx_leave(&sch_mtx); - if (timeout != MAX_SCHEDULE_TIMEOUT) deadline = jiffies + timeout; - sleep_finish(timo, wait); + sleep_finish(timo, timeout > 0); if (timeout != MAX_SCHEDULE_TIMEOUT) timeout = deadline - jiffies; - mtx_enter(&sch_mtx); - MUTEX_OLDIPL(&sch_mtx) = spl; - sch_ident = curproc; - return timeout > 0 ? timeout : 0; } @@ -176,12 +162,43 @@ wake_up_process(struct proc *p) int s, rv; SCHED_LOCK(s); - atomic_cas_ptr(&sch_proc, p, NULL); rv = wakeup_proc(p, NULL, 0); SCHED_UNLOCK(s); return rv; } +int +autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode, + int sync, void *key) +{ + if (wqe->private) + wake_up_process(wqe->private); + list_del_init(&wqe->entry); + return 0; +} + +void +prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state) +{ + mtx_enter(&wqh->lock); + if (list_empty(&wqe->entry)) + __add_wait_queue(wqh, wqe); + mtx_leave(&wqh->lock); + + set_current_state(state); +} + +void +finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) +{ + __set_current_state(TASK_RUNNING); + + mtx_enter(&wqh->lock); + if (!list_empty(&wqe->entry)) + list_del_init(&wqe->entry); + mtx_leave(&wqh->lock); +} + void flush_workqueue(struct workqueue_struct *wq) { @@ -2622,17 +2639,6 @@ pcie_aspm_enabled(struct pci_dev *pdev) return false; } -int -autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode, - int sync, void *key) -{ - wakeup(wqe); - if (wqe->private) - wake_up_process(wqe->private); - list_del_init(&wqe->entry); - return 0; -} - static wait_queue_head_t bit_waitq; wait_queue_head_t var_waitq; struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY); diff --git a/sys/dev/pci/drm/include/linux/wait.h b/sys/dev/pci/drm/include/linux/wait.h index 758c0b1e5d5..e8b9f0c5c27 100644 --- a/sys/dev/pci/drm/include/linux/wait.h +++ b/sys/dev/pci/drm/include/linux/wait.h @@ -1,4 +1,4 @@ -/* $OpenBSD: wait.h,v 1.9 2023/01/01 01:34:58 jsg Exp $ */ +/* $OpenBSD: wait.h,v 1.10 2023/07/18 06:58:59 claudio Exp $ */ /* * Copyright (c) 2013, 2014, 2015 Mark Kettenis * Copyright (c) 2017 Martin Pieuchot @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -36,17 +37,15 @@ struct wait_queue_entry { typedef struct wait_queue_entry wait_queue_entry_t; -extern struct mutex sch_mtx; -extern volatile struct proc *sch_proc; -extern volatile void *sch_ident; -extern int sch_priority; - struct wait_queue_head { struct mutex lock; struct list_head head; }; typedef struct wait_queue_head wait_queue_head_t; +void prepare_to_wait(wait_queue_head_t *, wait_queue_entry_t *, int); +void finish_wait(wait_queue_head_t *, wait_queue_entry_t *); + static inline void init_waitqueue_head(wait_queue_head_t *wqh) { @@ -103,30 +102,36 @@ remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old) #define __wait_event_intr_timeout(wqh, condition, timo, prio) \ ({ \ - long ret = timo; \ + long __ret = timo; \ + struct wait_queue_entry __wq_entry; \ + \ + init_wait_entry(&__wq_entry, 0); \ do { \ - int __error; \ + int __error, __wait; \ unsigned long deadline; \ \ KASSERT(!cold); \ \ - mtx_enter(&sch_mtx); \ - deadline = jiffies + ret; \ - __error = msleep(&wqh, &sch_mtx, prio, "drmweti", ret); \ - ret = deadline - jiffies; \ + prepare_to_wait(&wqh, &__wq_entry, prio); \ + deadline = jiffies + __ret; \ + \ + __wait = !(condition); \ + \ + __error = sleep_finish(__ret, __wait); \ + if ((timo) > 0) \ + __ret = deadline - jiffies; \ + \ if (__error == ERESTART || __error == EINTR) { \ - ret = -ERESTARTSYS; \ - mtx_leave(&sch_mtx); \ + __ret = -ERESTARTSYS; \ break; \ } \ - if ((timo) > 0 && (ret <= 0 || __error == EWOULDBLOCK)) { \ - mtx_leave(&sch_mtx); \ - ret = ((condition)) ? 1 : 0; \ + if ((timo) > 0 && (__ret <= 0 || __error == EWOULDBLOCK)) { \ + __ret = ((condition)) ? 1 : 0; \ break; \ } \ - mtx_leave(&sch_mtx); \ - } while (ret > 0 && !(condition)); \ - ret; \ + } while (__ret > 0 && !(condition)); \ + finish_wait(&wqh, &__wq_entry); \ + __ret; \ }) /* @@ -194,15 +199,23 @@ do { \ #define __wait_event_lock_irq(wqh, condition, mtx) \ ({ \ + struct wait_queue_entry __wq_entry; \ + \ + init_wait_entry(&__wq_entry, 0); \ do { \ + int __wait; \ + \ KASSERT(!cold); \ \ + prepare_to_wait(&wqh, &__wq_entry, 0); \ + \ + __wait = !(condition); \ + \ mtx_leave(&(mtx)); \ - mtx_enter(&sch_mtx); \ - msleep(&wqh, &sch_mtx, 0, "drmweli", 0); \ - mtx_leave(&sch_mtx); \ + sleep_finish(0, __wait); \ mtx_enter(&(mtx)); \ } while (!(condition)); \ + finish_wait(&wqh, &__wq_entry); \ }) /* @@ -227,7 +240,6 @@ wake_up(wait_queue_head_t *wqh) if (wqe->func != NULL) wqe->func(wqe, 0, wqe->flags, NULL); } - wakeup(wqh); mtx_leave(&wqh->lock); } @@ -244,7 +256,6 @@ wake_up_all_locked(wait_queue_head_t *wqh) if (wqe->func != NULL) wqe->func(wqe, 0, wqe->flags, NULL); } - wakeup(wqh); } #define wake_up_interruptible(wqh) wake_up(wqh) @@ -257,29 +268,4 @@ wake_up_all_locked(wait_queue_head_t *wqh) .entry = LIST_HEAD_INIT((name).entry), \ } -static inline void -prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state) -{ - if (wqe->flags == 0) { - mtx_enter(&sch_mtx); - wqe->flags = 1; - } - MUTEX_ASSERT_LOCKED(&sch_mtx); - if (list_empty(&wqe->entry)) - __add_wait_queue(wqh, wqe); - sch_proc = curproc; - sch_ident = wqe; - sch_priority = state; -} - -static inline void -finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) -{ - MUTEX_ASSERT_LOCKED(&sch_mtx); - sch_ident = NULL; - if (!list_empty(&wqe->entry)) - list_del_init(&wqe->entry); - mtx_leave(&sch_mtx); -} - #endif diff --git a/sys/dev/pci/drm/radeon/radeon_fence.c b/sys/dev/pci/drm/radeon/radeon_fence.c index 70ccf92c86c..6d460e5ce74 100644 --- a/sys/dev/pci/drm/radeon/radeon_fence.c +++ b/sys/dev/pci/drm/radeon/radeon_fence.c @@ -1085,7 +1085,6 @@ static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, break; } - KASSERT(sch_ident != NULL); t = schedule_timeout(t); if (t > 0 && intr && signal_pending(current)) -- 2.20.1