-/* $OpenBSD: kern_exit.c,v 1.213 2023/09/04 13:18:41 claudio Exp $ */
+/* $OpenBSD: kern_exit.c,v 1.214 2023/09/08 09:06:31 claudio Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/*
}
/* unlink ourselves from the active threads */
- SCHED_LOCK(s);
- TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
- SCHED_UNLOCK(s);
-
mtx_enter(&pr->ps_mtx);
+ TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
pr->ps_threadcnt--;
+
wake = (pr->ps_single && pr->ps_singlecnt == pr->ps_threadcnt);
mtx_leave(&pr->ps_mtx);
if (wake)
if ((p->p_flag & P_THREAD) == 0) {
/* main thread gotta wait because it has the pid, et al */
- /* XXX locking depends on kernel lock here. */
+ mtx_enter(&pr->ps_mtx);
while (pr->ps_threadcnt > 0)
- tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP);
+ msleep_nsec(&pr->ps_threads, &pr->ps_mtx, PWAIT,
+ "thrdeath", INFSLP);
+ mtx_leave(&pr->ps_mtx);
if (pr->ps_flags & PS_PROFIL)
stopprofclock(pr);
}
/* just a thread? detach it from its process */
if (p->p_flag & P_THREAD) {
/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
+ mtx_enter(&pr->ps_mtx);
if (pr->ps_threadcnt == 0)
wakeup(&pr->ps_threads);
+ mtx_leave(&pr->ps_mtx);
}
/* Release the thread's read reference of resource limit structure. */
-/* $OpenBSD: kern_fork.c,v 1.250 2023/09/04 13:18:41 claudio Exp $ */
+/* $OpenBSD: kern_fork.c,v 1.251 2023/09/08 09:06:31 claudio Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
struct proc *p;
pid_t tid;
vaddr_t uaddr;
- int s, error;
+ int error;
if (stack == NULL)
return EINVAL;
LIST_INSERT_HEAD(&allproc, p, p_list);
LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
- SCHED_LOCK(s);
- TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
- SCHED_UNLOCK(s);
-
mtx_enter(&pr->ps_mtx);
+ TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
pr->ps_threadcnt++;
/*
-/* $OpenBSD: kern_resource.c,v 1.78 2023/08/29 16:19:34 claudio Exp $ */
+/* $OpenBSD: kern_resource.c,v 1.79 2023/09/08 09:06:31 claudio Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
if (n < chgpr->ps_nice && suser(curp))
return (EACCES);
chgpr->ps_nice = n;
- SCHED_LOCK(s);
+ mtx_enter(&chgpr->ps_mtx);
TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) {
+ SCHED_LOCK(s);
setpriority(p, p->p_estcpu, n);
+ SCHED_UNLOCK(s);
}
- SCHED_UNLOCK(s);
+ mtx_leave(&chgpr->ps_mtx);
return (0);
}
struct process *pr = p->p_p;
struct proc *q;
- switch (who) {
+ KERNEL_ASSERT_LOCKED();
+ switch (who) {
case RUSAGE_SELF:
/* start with the sum of dead threads, if any */
if (pr->ps_ru != NULL)
-/* $OpenBSD: kern_sig.c,v 1.314 2023/09/04 13:18:41 claudio Exp $ */
+/* $OpenBSD: kern_sig.c,v 1.315 2023/09/08 09:06:31 claudio Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
}
pr->ps_singlecnt = 1; /* count ourselfs in already */
pr->ps_single = p;
- mtx_leave(&pr->ps_mtx);
- SCHED_LOCK(s);
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
if (q == p)
continue;
+ SCHED_LOCK(s);
if (q->p_flag & P_WEXIT) {
if (mode == SINGLE_EXIT && q->p_stat == SSTOP)
setrunnable(q);
signotify(q);
break;
}
+ SCHED_UNLOCK(s);
}
- SCHED_UNLOCK(s);
- mtx_enter(&pr->ps_mtx);
pr->ps_singlecnt += count;
mtx_leave(&pr->ps_mtx);
KASSERT(pr->ps_single == p);
KASSERT(curproc == p);
- /* can do this without holding pr->ps_mtx since no concurrency */
+ mtx_enter(&pr->ps_mtx);
pr->ps_single = NULL;
atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT);
- SCHED_LOCK(s);
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
if (q == p || (q->p_flag & P_SUSPSINGLE) == 0)
continue;
* then clearing that either makes it runnable or puts
* it back into some sleep queue
*/
+ SCHED_LOCK(s);
if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
if (q->p_wchan == NULL)
setrunnable(q);
q->p_stat = SSLEEP;
}
}
+ SCHED_UNLOCK(s);
}
- SCHED_UNLOCK(s);
+ mtx_leave(&pr->ps_mtx);
}
void
-/* $OpenBSD: kern_synch.c,v 1.198 2023/08/16 07:55:52 claudio Exp $ */
+/* $OpenBSD: kern_synch.c,v 1.199 2023/09/08 09:06:31 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*
uint8_t newprio;
int s;
- SCHED_LOCK(s);
/*
* If one of the threads of a multi-threaded process called
* sched_yield(2), drop its priority to ensure its siblings
* can make some progress.
*/
+ mtx_enter(&p->p_p->ps_mtx);
newprio = p->p_usrpri;
TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
newprio = max(newprio, q->p_runpri);
+ mtx_leave(&p->p_p->ps_mtx);
+
+ SCHED_LOCK(s);
setrunqueue(p->p_cpu, p, newprio);
p->p_ru.ru_nvcsw++;
mi_switch();
-/* $OpenBSD: proc.h,v 1.349 2023/09/04 13:18:41 claudio Exp $ */
+/* $OpenBSD: proc.h,v 1.350 2023/09/08 09:06:31 claudio Exp $ */
/* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */
/*-
struct ucred *ps_ucred; /* Process owner's identity. */
LIST_ENTRY(process) ps_list; /* List of all processes. */
- TAILQ_HEAD(,proc) ps_threads; /* [K|S] Threads in this process. */
+ TAILQ_HEAD(,proc) ps_threads; /* [K|m] Threads in this process. */
LIST_ENTRY(process) ps_pglist; /* List of processes in pgrp. */
struct process *ps_pptr; /* Pointer to parent process. */
* U uidinfolk
* l read only reference, see lim_read_enter()
* o owned (read/modified only) by this thread
+ * m this proc's' `p->p_p->ps_mtx'
*/
struct proc {
TAILQ_ENTRY(proc) p_runq; /* [S] current run/sleep queue */
LIST_ENTRY(proc) p_list; /* List of all threads. */
struct process *p_p; /* [I] The process of this thread. */
- TAILQ_ENTRY(proc) p_thr_link; /* Threads in a process linkage. */
+ TAILQ_ENTRY(proc) p_thr_link; /* [K|m] Threads in a process linkage. */
TAILQ_ENTRY(proc) p_fut_link; /* Threads in a futex linkage. */
struct futex *p_futex; /* Current sleeping futex. */