From: claudio Date: Fri, 8 Sep 2023 09:06:31 +0000 (+0000) Subject: Change how ps_threads and p_thr_link are locked away from using SCHED_LOCK. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=13095e6dd0735888515c168c112f29db29ff12a2;p=openbsd Change how ps_threads and p_thr_link are locked away from using SCHED_LOCK. The per process thread list can be traversed (read) by holding either the KERNEL_LOCK or the per process ps_mtx (instead of SCHED_LOCK). Abusing the SCHED_LOCK for this makes it impossible to split up the scheduler lock into something more fine grained. Tested by phessler@, ok mpi@ --- diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 7dbb63daca8..460833afc4f 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_exit.c,v 1.213 2023/09/04 13:18:41 claudio Exp $ */ +/* $OpenBSD: kern_exit.c,v 1.214 2023/09/08 09:06:31 claudio Exp $ */ /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ /* @@ -157,12 +157,10 @@ exit1(struct proc *p, int xexit, int xsig, int flags) } /* unlink ourselves from the active threads */ - SCHED_LOCK(s); - TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); - SCHED_UNLOCK(s); - mtx_enter(&pr->ps_mtx); + TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); pr->ps_threadcnt--; + wake = (pr->ps_single && pr->ps_singlecnt == pr->ps_threadcnt); mtx_leave(&pr->ps_mtx); if (wake) @@ -170,9 +168,11 @@ exit1(struct proc *p, int xexit, int xsig, int flags) if ((p->p_flag & P_THREAD) == 0) { /* main thread gotta wait because it has the pid, et al */ - /* XXX locking depends on kernel lock here. */ + mtx_enter(&pr->ps_mtx); while (pr->ps_threadcnt > 0) - tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP); + msleep_nsec(&pr->ps_threads, &pr->ps_mtx, PWAIT, + "thrdeath", INFSLP); + mtx_leave(&pr->ps_mtx); if (pr->ps_flags & PS_PROFIL) stopprofclock(pr); } @@ -345,8 +345,10 @@ exit1(struct proc *p, int xexit, int xsig, int flags) /* just a thread? detach it from its process */ if (p->p_flag & P_THREAD) { /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ + mtx_enter(&pr->ps_mtx); if (pr->ps_threadcnt == 0) wakeup(&pr->ps_threads); + mtx_leave(&pr->ps_mtx); } /* Release the thread's read reference of resource limit structure. */ diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 6d515011b11..f3eca8918e6 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_fork.c,v 1.250 2023/09/04 13:18:41 claudio Exp $ */ +/* $OpenBSD: kern_fork.c,v 1.251 2023/09/08 09:06:31 claudio Exp $ */ /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ /* @@ -519,7 +519,7 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr, struct proc *p; pid_t tid; vaddr_t uaddr; - int s, error; + int error; if (stack == NULL) return EINVAL; @@ -561,11 +561,8 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr, LIST_INSERT_HEAD(&allproc, p, p_list); LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash); - SCHED_LOCK(s); - TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); - SCHED_UNLOCK(s); - mtx_enter(&pr->ps_mtx); + TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); pr->ps_threadcnt++; /* diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 9e756af4026..2237ed4c749 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_resource.c,v 1.78 2023/08/29 16:19:34 claudio Exp $ */ +/* $OpenBSD: kern_resource.c,v 1.79 2023/09/08 09:06:31 claudio Exp $ */ /* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */ /*- @@ -212,11 +212,13 @@ donice(struct proc *curp, struct process *chgpr, int n) if (n < chgpr->ps_nice && suser(curp)) return (EACCES); chgpr->ps_nice = n; - SCHED_LOCK(s); + mtx_enter(&chgpr->ps_mtx); TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) { + SCHED_LOCK(s); setpriority(p, p->p_estcpu, n); + SCHED_UNLOCK(s); } - SCHED_UNLOCK(s); + mtx_leave(&chgpr->ps_mtx); return (0); } @@ -476,8 +478,9 @@ dogetrusage(struct proc *p, int who, struct rusage *rup) struct process *pr = p->p_p; struct proc *q; - switch (who) { + KERNEL_ASSERT_LOCKED(); + switch (who) { case RUSAGE_SELF: /* start with the sum of dead threads, if any */ if (pr->ps_ru != NULL) diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 75f48bb680b..9259f5a1fb4 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sig.c,v 1.314 2023/09/04 13:18:41 claudio Exp $ */ +/* $OpenBSD: kern_sig.c,v 1.315 2023/09/08 09:06:31 claudio Exp $ */ /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ /* @@ -2105,12 +2105,11 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait) } pr->ps_singlecnt = 1; /* count ourselfs in already */ pr->ps_single = p; - mtx_leave(&pr->ps_mtx); - SCHED_LOCK(s); TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { if (q == p) continue; + SCHED_LOCK(s); if (q->p_flag & P_WEXIT) { if (mode == SINGLE_EXIT && q->p_stat == SSTOP) setrunnable(q); @@ -2146,10 +2145,9 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait) signotify(q); break; } + SCHED_UNLOCK(s); } - SCHED_UNLOCK(s); - mtx_enter(&pr->ps_mtx); pr->ps_singlecnt += count; mtx_leave(&pr->ps_mtx); @@ -2194,11 +2192,10 @@ single_thread_clear(struct proc *p, int flag) KASSERT(pr->ps_single == p); KASSERT(curproc == p); - /* can do this without holding pr->ps_mtx since no concurrency */ + mtx_enter(&pr->ps_mtx); pr->ps_single = NULL; atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); - SCHED_LOCK(s); TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) continue; @@ -2209,6 +2206,7 @@ single_thread_clear(struct proc *p, int flag) * then clearing that either makes it runnable or puts * it back into some sleep queue */ + SCHED_LOCK(s); if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { if (q->p_wchan == NULL) setrunnable(q); @@ -2217,8 +2215,9 @@ single_thread_clear(struct proc *p, int flag) q->p_stat = SSLEEP; } } + SCHED_UNLOCK(s); } - SCHED_UNLOCK(s); + mtx_leave(&pr->ps_mtx); } void diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 624bf52b2b5..06db10f0488 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_synch.c,v 1.198 2023/08/16 07:55:52 claudio Exp $ */ +/* $OpenBSD: kern_synch.c,v 1.199 2023/09/08 09:06:31 claudio Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /* @@ -566,15 +566,18 @@ sys_sched_yield(struct proc *p, void *v, register_t *retval) uint8_t newprio; int s; - SCHED_LOCK(s); /* * If one of the threads of a multi-threaded process called * sched_yield(2), drop its priority to ensure its siblings * can make some progress. */ + mtx_enter(&p->p_p->ps_mtx); newprio = p->p_usrpri; TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) newprio = max(newprio, q->p_runpri); + mtx_leave(&p->p_p->ps_mtx); + + SCHED_LOCK(s); setrunqueue(p->p_cpu, p, newprio); p->p_ru.ru_nvcsw++; mi_switch(); diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 4328db19fa4..9484a8bdb16 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -1,4 +1,4 @@ -/* $OpenBSD: proc.h,v 1.349 2023/09/04 13:18:41 claudio Exp $ */ +/* $OpenBSD: proc.h,v 1.350 2023/09/08 09:06:31 claudio Exp $ */ /* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */ /*- @@ -138,7 +138,7 @@ struct process { struct ucred *ps_ucred; /* Process owner's identity. */ LIST_ENTRY(process) ps_list; /* List of all processes. */ - TAILQ_HEAD(,proc) ps_threads; /* [K|S] Threads in this process. */ + TAILQ_HEAD(,proc) ps_threads; /* [K|m] Threads in this process. */ LIST_ENTRY(process) ps_pglist; /* List of processes in pgrp. */ struct process *ps_pptr; /* Pointer to parent process. */ @@ -310,13 +310,14 @@ struct p_inentry { * U uidinfolk * l read only reference, see lim_read_enter() * o owned (read/modified only) by this thread + * m this proc's' `p->p_p->ps_mtx' */ struct proc { TAILQ_ENTRY(proc) p_runq; /* [S] current run/sleep queue */ LIST_ENTRY(proc) p_list; /* List of all threads. */ struct process *p_p; /* [I] The process of this thread. */ - TAILQ_ENTRY(proc) p_thr_link; /* Threads in a process linkage. */ + TAILQ_ENTRY(proc) p_thr_link; /* [K|m] Threads in a process linkage. */ TAILQ_ENTRY(proc) p_fut_link; /* Threads in a futex linkage. */ struct futex *p_futex; /* Current sleeping futex. */