From 436960cf02775967604f33beab159c0cf4ade5a0 Mon Sep 17 00:00:00 2001 From: mpi Date: Mon, 8 Feb 2021 08:18:45 +0000 Subject: [PATCH] Simplify sleep_setup API to two operations in preparation for splitting the SCHED_LOCK(). Putting a thread on a sleep queue is reduce to the following: sleep_setup(); /* check condition or release lock */ sleep_finish(); Previous version ok cheloha@, jmatthew@, ok claudio@ --- sys/dev/dt/dt_dev.c | 8 +- sys/dev/pci/drm/drm_linux.c | 12 +-- sys/dev/pci/if_myx.c | 4 +- sys/kern/kern_rwlock.c | 10 +- sys/kern/kern_sched.c | 4 +- sys/kern/kern_sig.c | 4 +- sys/kern/kern_synch.c | 185 +++++++++++++----------------------- sys/kern/kern_timeout.c | 4 +- sys/kern/subr_log.c | 8 +- sys/sys/proc.h | 4 +- sys/sys/systm.h | 11 +-- 11 files changed, 94 insertions(+), 160 deletions(-) diff --git a/sys/dev/dt/dt_dev.c b/sys/dev/dt/dt_dev.c index 51dc5e1cd81..bf96200b1a6 100644 --- a/sys/dev/dt/dt_dev.c +++ b/sys/dev/dt/dt_dev.c @@ -1,4 +1,4 @@ -/* $OpenBSD: dt_dev.c,v 1.10 2020/09/28 13:16:58 kettenis Exp $ */ +/* $OpenBSD: dt_dev.c,v 1.11 2021/02/08 08:18:45 mpi Exp $ */ /* * Copyright (c) 2019 Martin Pieuchot @@ -225,10 +225,8 @@ dtread(dev_t dev, struct uio *uio, int flags) return (EMSGSIZE); while (!sc->ds_evtcnt) { - sleep_setup(&sls, sc, PWAIT | PCATCH, "dtread"); - sleep_setup_signal(&sls); - sleep_finish(&sls, !sc->ds_evtcnt); - error = sleep_finish_signal(&sls); + sleep_setup(&sls, sc, PWAIT | PCATCH, "dtread", 0); + error = sleep_finish(&sls, !sc->ds_evtcnt); if (error == EINTR || error == ERESTART) break; } diff --git a/sys/dev/pci/drm/drm_linux.c b/sys/dev/pci/drm/drm_linux.c index 55462ad34a7..d400d72ebc9 100644 --- a/sys/dev/pci/drm/drm_linux.c +++ b/sys/dev/pci/drm/drm_linux.c @@ -1,4 +1,4 @@ -/* $OpenBSD: drm_linux.c,v 1.76 2021/01/13 01:04:49 jsg Exp $ */ +/* $OpenBSD: drm_linux.c,v 1.77 2021/02/08 08:18:45 mpi Exp $ */ /* * Copyright (c) 2013 Jonathan Gray * Copyright (c) 2015, 2016 Mark Kettenis @@ -110,14 +110,14 @@ schedule_timeout(long timeout) { struct sleep_state sls; unsigned long deadline; - int wait, spl; + int wait, spl, timo = 0; MUTEX_ASSERT_LOCKED(&sch_mtx); KASSERT(!cold); - sleep_setup(&sls, sch_ident, sch_priority, "schto"); if (timeout != MAX_SCHEDULE_TIMEOUT) - sleep_setup_timeout(&sls, timeout); + timo = timeout; + sleep_setup(&sls, sch_ident, sch_priority, "schto", timo); wait = (sch_proc == curproc && timeout > 0); @@ -125,11 +125,9 @@ schedule_timeout(long timeout) MUTEX_OLDIPL(&sch_mtx) = splsched(); mtx_leave(&sch_mtx); - sleep_setup_signal(&sls); - if (timeout != MAX_SCHEDULE_TIMEOUT) deadline = jiffies + timeout; - sleep_finish_all(&sls, wait); + sleep_finish(&sls, wait); if (timeout != MAX_SCHEDULE_TIMEOUT) timeout = deadline - jiffies; diff --git a/sys/dev/pci/if_myx.c b/sys/dev/pci/if_myx.c index 21b89c4789f..99a5d2a3b98 100644 --- a/sys/dev/pci/if_myx.c +++ b/sys/dev/pci/if_myx.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_myx.c,v 1.114 2021/01/17 02:52:21 dlg Exp $ */ +/* $OpenBSD: if_myx.c,v 1.115 2021/02/08 08:18:45 mpi Exp $ */ /* * Copyright (c) 2007 Reyk Floeter @@ -1397,7 +1397,7 @@ myx_down(struct myx_softc *sc) (void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL); while (sc->sc_state != MYX_S_OFF) { - sleep_setup(&sls, sts, PWAIT, "myxdown"); + sleep_setup(&sls, sts, PWAIT, "myxdown", 0); membar_consumer(); sleep_finish(&sls, sc->sc_state != MYX_S_OFF); } diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index c73b1b17fa1..d79b59748e8 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_rwlock.c,v 1.46 2021/01/11 18:49:38 mpi Exp $ */ +/* $OpenBSD: kern_rwlock.c,v 1.47 2021/02/08 08:18:45 mpi Exp $ */ /* * Copyright (c) 2002, 2003 Artur Grabowski @@ -279,15 +279,13 @@ retry: prio = op->wait_prio; if (flags & RW_INTR) prio |= PCATCH; - sleep_setup(&sls, rwl, prio, rwl->rwl_name); - if (flags & RW_INTR) - sleep_setup_signal(&sls); + sleep_setup(&sls, rwl, prio, rwl->rwl_name, 0); do_sleep = !rw_cas(&rwl->rwl_owner, o, set); - sleep_finish(&sls, do_sleep); + error = sleep_finish(&sls, do_sleep); if ((flags & RW_INTR) && - (error = sleep_finish_signal(&sls)) != 0) + (error != 0)) return (error); if (flags & RW_SLEEPFAIL) return (EAGAIN); diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index 24b4faa1fe2..758aac7af64 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sched.c,v 1.68 2021/01/09 20:57:46 gnezdo Exp $ */ +/* $OpenBSD: kern_sched.c,v 1.69 2021/02/08 08:18:45 mpi Exp $ */ /* * Copyright (c) 2007, 2008 Artur Grabowski * @@ -674,7 +674,7 @@ sched_stop_secondary_cpus(void) if (CPU_IS_PRIMARY(ci)) continue; while ((spc->spc_schedflags & SPCF_HALTED) == 0) { - sleep_setup(&sls, spc, PZERO, "schedstate"); + sleep_setup(&sls, spc, PZERO, "schedstate", 0); sleep_finish(&sls, (spc->spc_schedflags & SPCF_HALTED) == 0); } diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index aef3d64181d..ed3df27da58 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sig.c,v 1.270 2020/12/25 12:59:52 visa Exp $ */ +/* $OpenBSD: kern_sig.c,v 1.271 2021/02/08 08:18:45 mpi Exp $ */ /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ /* @@ -2106,7 +2106,7 @@ single_thread_wait(struct process *pr, int recheck) /* wait until they're all suspended */ wait = pr->ps_singlecount > 0; while (wait) { - sleep_setup(&sls, &pr->ps_singlecount, PWAIT, "suspend"); + sleep_setup(&sls, &pr->ps_singlecount, PWAIT, "suspend", 0); wait = pr->ps_singlecount > 0; sleep_finish(&sls, wait); if (!recheck) diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 9236cd8944c..c7f30eb92ba 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_synch.c,v 1.174 2021/01/11 13:55:53 claudio Exp $ */ +/* $OpenBSD: kern_synch.c,v 1.175 2021/02/08 08:18:45 mpi Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /* @@ -66,7 +66,7 @@ #include #endif -int sleep_signal_check(struct proc *); +int sleep_signal_check(void); int thrsleep(struct proc *, struct sys___thrsleep_args *); int thrsleep_unlock(void *); @@ -155,11 +155,8 @@ tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) return (0); } - sleep_setup(&sls, ident, priority, wmesg); - sleep_setup_timeout(&sls, timo); - sleep_setup_signal(&sls); - - return sleep_finish_all(&sls, 1); + sleep_setup(&sls, ident, priority, wmesg, timo); + return sleep_finish(&sls, 1); } int @@ -250,8 +247,7 @@ msleep(const volatile void *ident, struct mutex *mtx, int priority, return (0); } - sleep_setup(&sls, ident, priority, wmesg); - sleep_setup_timeout(&sls, timo); + sleep_setup(&sls, ident, priority, wmesg, timo); /* XXX - We need to make sure that the mutex doesn't * unblock splsched. This can be made a bit more @@ -261,9 +257,7 @@ msleep(const volatile void *ident, struct mutex *mtx, int priority, MUTEX_OLDIPL(mtx) = splsched(); mtx_leave(mtx); /* signal may stop the process, release mutex before that */ - sleep_setup_signal(&sls); - - error = sleep_finish_all(&sls, 1); + error = sleep_finish(&sls, 1); if ((priority & PNORELOCK) == 0) { mtx_enter(mtx); @@ -313,14 +307,11 @@ rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, rw_assert_anylock(rwl); status = rw_status(rwl); - sleep_setup(&sls, ident, priority, wmesg); - sleep_setup_timeout(&sls, timo); + sleep_setup(&sls, ident, priority, wmesg, timo); rw_exit(rwl); /* signal may stop the process, release rwlock before that */ - sleep_setup_signal(&sls); - - error = sleep_finish_all(&sls, 1); + error = sleep_finish(&sls, 1); if ((priority & PNORELOCK) == 0) rw_enter(rwl, status); @@ -353,7 +344,7 @@ rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, void sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, - const char *wmesg) + const char *wmesg, int timo) { struct proc *p = curproc; @@ -367,14 +358,12 @@ sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, #endif sls->sls_catch = prio & PCATCH; - sls->sls_do_sleep = 1; sls->sls_locked = 0; - sls->sls_sigerr = 0; sls->sls_timeout = 0; /* * The kernel has to be locked for signal processing. - * This is done here and not in sleep_setup_signal() because + * This is done here and not in sleep_finish() because * KERNEL_LOCK() has to be taken before SCHED_LOCK(). */ if (sls->sls_catch != 0) { @@ -391,35 +380,53 @@ sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, p->p_slptime = 0; p->p_slppri = prio & PRIMASK; TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); -} - -int -sleep_finish_all(struct sleep_state *sls, int do_sleep) -{ - int error, error1; - - sleep_finish(sls, do_sleep); - error1 = sleep_finish_timeout(sls); - error = sleep_finish_signal(sls); - /* Signal errors are higher priority than timeouts. */ - if (error == 0 && error1 != 0) - error = error1; - - return error; + KASSERT((p->p_flag & P_TIMEOUT) == 0); + if (timo) { + sls->sls_timeout = 1; + timeout_add(&p->p_sleep_to, timo); + } } -void +int sleep_finish(struct sleep_state *sls, int do_sleep) { struct proc *p = curproc; + int error = 0, error1 = 0; - if (sls->sls_do_sleep && do_sleep) { + if (sls->sls_catch != 0) { + /* sleep_setup() has locked the kernel. */ + KERNEL_ASSERT_LOCKED(); + + /* + * We put ourselves on the sleep queue and start our + * timeout before calling sleep_signal_check(), as we could + * stop there, and a wakeup or a SIGCONT (or both) could + * occur while we were stopped. A SIGCONT would cause + * us to be marked as SSLEEP without resuming us, thus + * we must be ready for sleep when sleep_signal_check() is + * called. + * If the wakeup happens while we're stopped, p->p_wchan + * will be NULL upon return from sleep_signal_check(). In + * that case we need to unwind immediately. + */ + atomic_setbits_int(&p->p_flag, P_SINTR); + if ((error = sleep_signal_check()) != 0) { + p->p_stat = SONPROC; + sls->sls_catch = 0; + do_sleep = 0; + } else if (p->p_wchan == NULL) { + sls->sls_catch = 0; + do_sleep = 0; + } + } + + if (do_sleep) { p->p_stat = SSLEEP; p->p_ru.ru_nvcsw++; SCHED_ASSERT_LOCKED(); mi_switch(); - } else if (!do_sleep) { + } else { unsleep(p); } @@ -436,30 +443,11 @@ sleep_finish(struct sleep_state *sls, int do_sleep) * we need to clear it before the ktrace. */ atomic_clearbits_int(&p->p_flag, P_SINTR); -} - -void -sleep_setup_timeout(struct sleep_state *sls, int timo) -{ - struct proc *p = curproc; - - KASSERT((p->p_flag & P_TIMEOUT) == 0); - - if (timo) { - sls->sls_timeout = 1; - timeout_add(&p->p_sleep_to, timo); - } -} - -int -sleep_finish_timeout(struct sleep_state *sls) -{ - struct proc *p = curproc; if (sls->sls_timeout) { if (p->p_flag & P_TIMEOUT) { atomic_clearbits_int(&p->p_flag, P_TIMEOUT); - return (EWOULDBLOCK); + error1 = EWOULDBLOCK; } else { /* This must not sleep. */ timeout_del_barrier(&p->p_sleep_to); @@ -467,12 +455,27 @@ sleep_finish_timeout(struct sleep_state *sls) } } - return (0); + /* Check if thread was woken up because of a unwind or signal */ + if (sls->sls_catch != 0) + error = sleep_signal_check(); + + if (sls->sls_locked) + KERNEL_UNLOCK(); + + /* Signal errors are higher priority than timeouts. */ + if (error == 0 && error1 != 0) + error = error1; + + return error; } +/* + * Check and handle signals and suspensions around a sleep cycle. + */ int -sleep_signal_check(struct proc *p) +sleep_signal_check(void) { + struct proc *p = curproc; int err, sig; if ((err = single_thread_check(p, 1)) != 0) @@ -486,61 +489,6 @@ sleep_signal_check(struct proc *p) return 0; } -void -sleep_setup_signal(struct sleep_state *sls) -{ - struct proc *p = curproc; - - if (sls->sls_catch == 0) - return; - - /* sleep_setup() has locked the kernel. */ - KERNEL_ASSERT_LOCKED(); - - /* - * We put ourselves on the sleep queue and start our timeout before - * calling single_thread_check or CURSIG, as we could stop there, and - * a wakeup or a SIGCONT (or both) could occur while we were stopped. - * A SIGCONT would cause us to be marked as SSLEEP without resuming us, - * thus we must be ready for sleep when CURSIG is called. If the - * wakeup happens while we're stopped, p->p_wchan will be 0 upon - * return from single_thread_check or CURSIG. In that case we should - * not go to sleep. If single_thread_check returns an error we need - * to unwind immediately. That's achieved by saving the return value - * in sls->sl_unwind and checking it later in sleep_finish_signal. - */ - atomic_setbits_int(&p->p_flag, P_SINTR); - if ((sls->sls_sigerr = sleep_signal_check(p)) != 0) { - unsleep(p); - p->p_stat = SONPROC; - sls->sls_do_sleep = 0; - } else if (p->p_wchan == 0) { - sls->sls_catch = 0; - sls->sls_do_sleep = 0; - } -} - -int -sleep_finish_signal(struct sleep_state *sls) -{ - struct proc *p = curproc; - int error = 0; - - if (sls->sls_catch != 0) { - KERNEL_ASSERT_LOCKED(); - - if (sls->sls_sigerr != 0) - error = sls->sls_sigerr; - else - error = sleep_signal_check(p); - } - - if (sls->sls_locked) - KERNEL_UNLOCK(); - - return (error); -} - int wakeup_proc(struct proc *p, const volatile void *chan) { @@ -560,6 +508,7 @@ wakeup_proc(struct proc *p, const volatile void *chan) return awakened; } + /* * Implement timeout for tsleep. * If process hasn't been awakened (wchan non-zero), @@ -895,7 +844,7 @@ refcnt_finalize(struct refcnt *r, const char *wmesg) refcnt = atomic_dec_int_nv(&r->refs); while (refcnt) { - sleep_setup(&sls, r, PWAIT, wmesg); + sleep_setup(&sls, r, PWAIT, wmesg, 0); refcnt = r->refs; sleep_finish(&sls, refcnt); } @@ -923,7 +872,7 @@ cond_wait(struct cond *c, const char *wmesg) wait = c->c_wait; while (wait) { - sleep_setup(&sls, c, PWAIT, wmesg); + sleep_setup(&sls, c, PWAIT, wmesg, 0); wait = c->c_wait; sleep_finish(&sls, wait); } diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 75007aa9611..acf752a8381 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_timeout.c,v 1.82 2020/10/20 22:37:12 cheloha Exp $ */ +/* $OpenBSD: kern_timeout.c,v 1.83 2021/02/08 08:18:45 mpi Exp $ */ /* * Copyright (c) 2001 Thomas Nordin * Copyright (c) 2000-2001 Artur Grabowski @@ -787,7 +787,7 @@ softclock_thread(void *arg) s = splsoftclock(); for (;;) { - sleep_setup(&sls, &timeout_proc, PSWP, "bored"); + sleep_setup(&sls, &timeout_proc, PSWP, "bored", 0); sleep_finish(&sls, CIRCQ_EMPTY(&timeout_proc)); mtx_enter(&timeout_mutex); diff --git a/sys/kern/subr_log.c b/sys/kern/subr_log.c index 07d7d764966..1ff754ab19d 100644 --- a/sys/kern/subr_log.c +++ b/sys/kern/subr_log.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_log.c,v 1.71 2021/01/08 11:23:57 visa Exp $ */ +/* $OpenBSD: subr_log.c,v 1.72 2021/02/08 08:18:45 mpi Exp $ */ /* $NetBSD: subr_log.c,v 1.11 1996/03/30 22:24:44 christos Exp $ */ /* @@ -246,10 +246,8 @@ logread(dev_t dev, struct uio *uio, int flag) * Set up and enter sleep manually instead of using msleep() * to keep log_mtx as a leaf lock. */ - sleep_setup(&sls, mbp, LOG_RDPRI | PCATCH, "klog"); - sleep_setup_signal(&sls); - sleep_finish(&sls, logsoftc.sc_state & LOG_RDWAIT); - error = sleep_finish_signal(&sls); + sleep_setup(&sls, mbp, LOG_RDPRI | PCATCH, "klog", 0); + error = sleep_finish(&sls, logsoftc.sc_state & LOG_RDWAIT); mtx_enter(&log_mtx); if (error) goto out; diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 411029e2602..c49015b2c8e 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -1,4 +1,4 @@ -/* $OpenBSD: proc.h,v 1.306 2021/01/18 18:47:05 mvs Exp $ */ +/* $OpenBSD: proc.h,v 1.307 2021/02/08 08:18:45 mpi Exp $ */ /* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */ /*- @@ -625,9 +625,7 @@ int proc_cansugid(struct proc *); struct sleep_state { int sls_s; int sls_catch; - int sls_do_sleep; int sls_locked; - int sls_sigerr; int sls_timeout; }; diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 6a1f63e2755..a26d7f98f21 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: systm.h,v 1.151 2021/02/01 15:55:07 visa Exp $ */ +/* $OpenBSD: systm.h,v 1.152 2021/02/08 08:18:45 mpi Exp $ */ /* $NetBSD: systm.h,v 1.50 1996/06/09 04:55:09 briggs Exp $ */ /*- @@ -252,13 +252,8 @@ void stop_periodic_resettodr(void); struct sleep_state; void sleep_setup(struct sleep_state *, const volatile void *, int, - const char *); -void sleep_setup_timeout(struct sleep_state *, int); -void sleep_setup_signal(struct sleep_state *); -void sleep_finish(struct sleep_state *, int); -int sleep_finish_timeout(struct sleep_state *); -int sleep_finish_signal(struct sleep_state *); -int sleep_finish_all(struct sleep_state *, int); + const char *, int); +int sleep_finish(struct sleep_state *, int); void sleep_queue_init(void); struct cond; -- 2.20.1