-/* $OpenBSD: kern_exit.c,v 1.231 2024/08/11 15:10:53 mvs Exp $ */
+/* $OpenBSD: kern_exit.c,v 1.232 2024/08/16 16:19:03 mpi Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/*
{
crfree(p->p_ucred);
pool_put(&proc_pool, p);
- nthreads--;
+ atomic_dec_int(&nthreads);
}
/*
if (p->p_flag & P_THREAD) {
/* Just a thread */
- KERNEL_LOCK();
proc_free(p);
- KERNEL_UNLOCK();
} else {
struct process *pr = p->p_p;
-/* $OpenBSD: kern_fork.c,v 1.262 2024/08/11 15:10:53 mvs Exp $ */
+/* $OpenBSD: kern_fork.c,v 1.263 2024/08/16 16:19:03 mpi Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
#include <machine/tcb.h>
int nprocesses = 1; /* process 0 */
-int nthreads = 1; /* proc 0 */
+int nthreads = 1; /* [a] proc 0 */
struct forkstat forkstat;
void fork_return(void *);
int
fork_check_maxthread(uid_t uid)
{
+ int val;
+
/*
* Although process entries are dynamically created, we still keep
* a global limit on the maximum number we will create. We reserve
* the variable nthreads is the current number of procs, maxthread is
* the limit.
*/
- if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
+ val = atomic_inc_int_nv(&nthreads);
+ if ((val > maxthread - 5 && uid != 0) || val > maxthread) {
static struct timeval lasttfm;
if (ratecheck(&lasttfm, &fork_tfmrate))
tablefull("thread");
+ atomic_dec_int(&nthreads);
return EAGAIN;
}
- nthreads++;
return 0;
}
if (ratecheck(&lasttfm, &fork_tfmrate))
tablefull("process");
- nthreads--;
+ atomic_dec_int(&nthreads);
return EAGAIN;
}
nprocesses++;
if (uid != 0 && count > lim_cur(RLIMIT_NPROC)) {
(void)chgproccnt(uid, -1);
nprocesses--;
- nthreads--;
+ atomic_dec_int(&nthreads);
return EAGAIN;
}
if (uaddr == 0) {
(void)chgproccnt(uid, -1);
nprocesses--;
- nthreads--;
+ atomic_dec_int(&nthreads);
return (ENOMEM);
}
uaddr = uvm_uarea_alloc();
if (uaddr == 0) {
- nthreads--;
+ atomic_dec_int(&nthreads);
return ENOMEM;
}