From: claudio Date: Mon, 3 Jun 2024 12:48:25 +0000 (+0000) Subject: Remove the now unsued s argument to SCHED_LOCK and SCHED_UNLOCK. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=a09e95841b3ee2acbb8dccab10902d61ea0f0031;p=openbsd Remove the now unsued s argument to SCHED_LOCK and SCHED_UNLOCK. The SPL level is not tacked by the mutex and we no longer need to track this in the callers. OK miod@ mlarkin@ tb@ jca@ --- diff --git a/sys/dev/pci/drm/drm_linux.c b/sys/dev/pci/drm/drm_linux.c index 95947aebfd5..a2eded062e4 100644 --- a/sys/dev/pci/drm/drm_linux.c +++ b/sys/dev/pci/drm/drm_linux.c @@ -1,4 +1,4 @@ -/* $OpenBSD: drm_linux.c,v 1.112 2024/03/30 13:33:20 mpi Exp $ */ +/* $OpenBSD: drm_linux.c,v 1.113 2024/06/03 12:48:25 claudio Exp $ */ /* * Copyright (c) 2013 Jonathan Gray * Copyright (c) 2015, 2016 Mark Kettenis @@ -114,14 +114,13 @@ void __set_current_state(int state) { struct proc *p = curproc; - int s; KASSERT(state == TASK_RUNNING); - SCHED_LOCK(s); + SCHED_LOCK(); unsleep(p); p->p_stat = SONPROC; atomic_clearbits_int(&p->p_flag, P_WSLEEP); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } void @@ -159,11 +158,11 @@ schedule_timeout_uninterruptible(long timeout) int wake_up_process(struct proc *p) { - int s, rv; + int rv; - SCHED_LOCK(s); + SCHED_LOCK(); rv = wakeup_proc(p, 0); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); return rv; } diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 029b5930915..af34d278d3e 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_exit.c,v 1.221 2024/05/20 10:32:20 claudio Exp $ */ +/* $OpenBSD: kern_exit.c,v 1.222 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ /* @@ -119,7 +119,6 @@ exit1(struct proc *p, int xexit, int xsig, int flags) struct process *pr, *qr, *nqr; struct rusage *rup; struct timespec ts; - int s; atomic_setbits_int(&p->p_flag, P_WEXIT); @@ -329,9 +328,9 @@ exit1(struct proc *p, int xexit, int xsig, int flags) timespecclear(&ts); else timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &ts); - SCHED_LOCK(s); + SCHED_LOCK(); tuagg_locked(pr, p, &ts); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); /* * clear %cpu usage during swap diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 3ed90a60388..35632bbc936 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_fork.c,v 1.259 2024/05/29 18:55:45 claudio Exp $ */ +/* $OpenBSD: kern_fork.c,v 1.260 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ /* @@ -329,14 +329,13 @@ static inline void fork_thread_start(struct proc *p, struct proc *parent, int flags) { struct cpu_info *ci; - int s; - SCHED_LOCK(s); + SCHED_LOCK(); ci = sched_choosecpu_fork(parent, flags); TRACEPOINT(sched, fork, p->p_tid + THREAD_PID_OFFSET, p->p_p->ps_pid, CPU_INFO_UNIT(ci)); setrunqueue(ci, p, p->p_usrpri); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } int diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 6bc28d5aabc..c3e7a7a4f5c 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_resource.c,v 1.83 2024/05/22 09:20:22 claudio Exp $ */ +/* $OpenBSD: kern_resource.c,v 1.84 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */ /*- @@ -198,7 +198,6 @@ donice(struct proc *curp, struct process *chgpr, int n) { struct ucred *ucred = curp->p_ucred; struct proc *p; - int s; if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 && ucred->cr_uid != chgpr->ps_ucred->cr_uid && @@ -213,11 +212,11 @@ donice(struct proc *curp, struct process *chgpr, int n) return (EACCES); chgpr->ps_nice = n; mtx_enter(&chgpr->ps_mtx); - SCHED_LOCK(s); + SCHED_LOCK(); TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) { setpriority(p, p->p_estcpu, n); } - SCHED_UNLOCK(s); + SCHED_UNLOCK(); mtx_leave(&chgpr->ps_mtx); return (0); } @@ -396,11 +395,9 @@ tuagg_locked(struct process *pr, struct proc *p, const struct timespec *ts) void tuagg(struct process *pr, struct proc *p) { - int s; - - SCHED_LOCK(s); + SCHED_LOCK(); tuagg_locked(pr, p, NULL); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } /* @@ -537,13 +534,12 @@ rucheck(void *arg) struct rlimit rlim; struct process *pr = arg; time_t runtime; - int s; KERNEL_ASSERT_LOCKED(); - SCHED_LOCK(s); + SCHED_LOCK(); runtime = pr->ps_tu.tu_runtime.tv_sec; - SCHED_UNLOCK(s); + SCHED_UNLOCK(); mtx_enter(&pr->ps_mtx); rlim = pr->ps_limit->pl_rlimit[RLIMIT_CPU]; diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index dd54f98d4b8..39d1250dab2 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sched.c,v 1.95 2024/02/28 13:43:44 mpi Exp $ */ +/* $OpenBSD: kern_sched.c,v 1.96 2024/06/03 12:48:25 claudio Exp $ */ /* * Copyright (c) 2007, 2008 Artur Grabowski * @@ -137,7 +137,6 @@ sched_idle(void *v) struct schedstate_percpu *spc; struct proc *p = curproc; struct cpu_info *ci = v; - int s; KERNEL_UNLOCK(); @@ -147,14 +146,14 @@ sched_idle(void *v) * First time we enter here, we're not supposed to idle, * just go away for a while. */ - SCHED_LOCK(s); + SCHED_LOCK(); cpuset_add(&sched_idle_cpus, ci); p->p_stat = SSLEEP; p->p_cpu = ci; atomic_setbits_int(&p->p_flag, P_CPUPEG); mi_switch(); cpuset_del(&sched_idle_cpus, ci); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); KASSERT(ci == curcpu()); KASSERT(curproc == spc->spc_idleproc); @@ -163,10 +162,10 @@ sched_idle(void *v) while (!cpu_is_idle(curcpu())) { struct proc *dead; - SCHED_LOCK(s); + SCHED_LOCK(); p->p_stat = SSLEEP; mi_switch(); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); while ((dead = LIST_FIRST(&spc->spc_deadproc))) { LIST_REMOVE(dead, p_hash); @@ -185,10 +184,10 @@ sched_idle(void *v) if (spc->spc_schedflags & SPCF_SHOULDHALT && (spc->spc_schedflags & SPCF_HALTED) == 0) { cpuset_del(&sched_idle_cpus, ci); - SCHED_LOCK(s); + SCHED_LOCK(); atomic_setbits_int(&spc->spc_schedflags, spc->spc_whichqs ? 0 : SPCF_HALTED); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); wakeup(spc); } #endif @@ -226,7 +225,6 @@ sched_toidle(void) { struct schedstate_percpu *spc = &curcpu()->ci_schedstate; struct proc *idle; - int s; #ifdef MULTIPROCESSOR /* This process no longer needs to hold the kernel lock. */ @@ -245,8 +243,7 @@ sched_toidle(void) atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR); - SCHED_LOCK(s); - + SCHED_LOCK(); idle = spc->spc_idleproc; idle->p_stat = SRUN; @@ -627,14 +624,13 @@ void sched_peg_curproc(struct cpu_info *ci) { struct proc *p = curproc; - int s; - SCHED_LOCK(s); + SCHED_LOCK(); atomic_setbits_int(&p->p_flag, P_CPUPEG); setrunqueue(ci, p, p->p_usrpri); p->p_ru.ru_nvcsw++; mi_switch(); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } #ifdef MULTIPROCESSOR diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index a2a8f1b7ee2..6aad5bcb69a 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sig.c,v 1.329 2024/05/22 09:22:55 claudio Exp $ */ +/* $OpenBSD: kern_sig.c,v 1.330 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ /* @@ -843,14 +843,12 @@ trapsignal(struct proc *p, int signum, u_long trapno, int code, */ if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) && signum != SIGKILL && (p->p_sigmask & mask) != 0) { - int s; - single_thread_set(p, SINGLE_SUSPEND | SINGLE_NOWAIT); pr->ps_xsig = signum; - SCHED_LOCK(s); + SCHED_LOCK(); proc_stop(p, 1); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); signum = pr->ps_xsig; single_thread_clear(p, 0); @@ -920,7 +918,7 @@ psignal(struct proc *p, int signum) void ptsignal(struct proc *p, int signum, enum signal_type type) { - int s, prop; + int prop; sig_t action, altaction = SIG_DFL; sigset_t mask, sigmask; int *siglist; @@ -1063,7 +1061,7 @@ ptsignal(struct proc *p, int signum, enum signal_type type) if (q != p) ptsignal(q, signum, SPROPAGATED); - SCHED_LOCK(s); + SCHED_LOCK(); switch (p->p_stat) { @@ -1252,7 +1250,7 @@ out: atomic_clearbits_int(&p->p_flag, P_CONTINUED); } - SCHED_UNLOCK(s); + SCHED_UNLOCK(); if (wakeparent) wakeup(pr->ps_pptr); } @@ -1299,7 +1297,6 @@ cursig(struct proc *p, struct sigctx *sctx) struct process *pr = p->p_p; int signum, mask, prop; sigset_t ps_siglist; - int s; KASSERT(p == curproc); @@ -1340,9 +1337,9 @@ cursig(struct proc *p, struct sigctx *sctx) single_thread_set(p, SINGLE_SUSPEND | SINGLE_NOWAIT); pr->ps_xsig = signum; - SCHED_LOCK(s); + SCHED_LOCK(); proc_stop(p, 1); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); /* * re-take the signal before releasing @@ -1415,9 +1412,9 @@ cursig(struct proc *p, struct sigctx *sctx) prop & SA_TTYSTOP)) break; /* == ignore */ pr->ps_xsig = signum; - SCHED_LOCK(s); + SCHED_LOCK(); proc_stop(p, 1); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); break; } else if (prop & SA_IGNORE) { /* @@ -2064,7 +2061,6 @@ int single_thread_check_locked(struct proc *p, int deep) { struct process *pr = p->p_p; - int s; MUTEX_ASSERT_LOCKED(&pr->ps_mtx); @@ -2093,10 +2089,10 @@ single_thread_check_locked(struct proc *p, int deep) /* not exiting and don't need to unwind, so suspend */ mtx_leave(&pr->ps_mtx); - SCHED_LOCK(s); + SCHED_LOCK(); p->p_stat = SSTOP; mi_switch(); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); mtx_enter(&pr->ps_mtx); } while (pr->ps_single != NULL); @@ -2129,7 +2125,7 @@ single_thread_set(struct proc *p, int flags) { struct process *pr = p->p_p; struct proc *q; - int error, s, mode = flags & SINGLE_MASK; + int error, mode = flags & SINGLE_MASK; KASSERT(curproc == p); @@ -2161,7 +2157,7 @@ single_thread_set(struct proc *p, int flags) TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { if (q == p) continue; - SCHED_LOCK(s); + SCHED_LOCK(); atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); switch (q->p_stat) { case SIDL: @@ -2194,7 +2190,7 @@ single_thread_set(struct proc *p, int flags) case SRUN: break; } - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } /* count ourselfs out */ @@ -2235,7 +2231,6 @@ single_thread_clear(struct proc *p, int flag) { struct process *pr = p->p_p; struct proc *q; - int s; KASSERT(pr->ps_single == p); KASSERT(curproc == p); @@ -2254,7 +2249,7 @@ single_thread_clear(struct proc *p, int flag) * then clearing that either makes it runnable or puts * it back into some sleep queue */ - SCHED_LOCK(s); + SCHED_LOCK(); if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { if (q->p_wchan == NULL) setrunnable(q); @@ -2263,7 +2258,7 @@ single_thread_clear(struct proc *p, int flag) q->p_stat = SSLEEP; } } - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } mtx_leave(&pr->ps_mtx); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 21ee0c6fb21..16de8ad5d69 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_synch.c,v 1.204 2024/05/22 09:24:11 claudio Exp $ */ +/* $OpenBSD: kern_synch.c,v 1.205 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /* @@ -332,7 +332,6 @@ void sleep_setup(const volatile void *ident, int prio, const char *wmesg) { struct proc *p = curproc; - int s; #ifdef DIAGNOSTIC if (p->p_flag & P_CANTSLEEP) @@ -346,7 +345,7 @@ sleep_setup(const volatile void *ident, int prio, const char *wmesg) if (p->p_flag & P_WEXIT) CLR(prio, PCATCH); - SCHED_LOCK(s); + SCHED_LOCK(); TRACEPOINT(sched, sleep, NULL); @@ -360,14 +359,14 @@ sleep_setup(const volatile void *ident, int prio, const char *wmesg) atomic_setbits_int(&p->p_flag, P_SINTR); p->p_stat = SSLEEP; - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } int sleep_finish(int timo, int do_sleep) { struct proc *p = curproc; - int s, catch, error = 0, error1 = 0; + int catch, error = 0, error1 = 0; catch = p->p_flag & P_SINTR; @@ -392,7 +391,7 @@ sleep_finish(int timo, int do_sleep) } } - SCHED_LOCK(s); + SCHED_LOCK(); /* * If the wakeup happens while going to sleep, p->p_wchan * will be NULL. In that case unwind immediately but still @@ -419,7 +418,7 @@ sleep_finish(int timo, int do_sleep) #endif p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; - SCHED_UNLOCK(s); + SCHED_UNLOCK(); /* * Even though this belongs to the signal handling part of sleep, @@ -503,11 +502,10 @@ void endtsleep(void *arg) { struct proc *p = arg; - int s; - SCHED_LOCK(s); + SCHED_LOCK(); wakeup_proc(p, P_TIMEOUT); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } /* @@ -536,11 +534,10 @@ wakeup_n(const volatile void *ident, int n) struct slpque *qp, wakeq; struct proc *p; struct proc *pnext; - int s; TAILQ_INIT(&wakeq); - SCHED_LOCK(s); + SCHED_LOCK(); qp = &slpque[LOOKUP(ident)]; for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { pnext = TAILQ_NEXT(p, p_runq); @@ -564,7 +561,7 @@ wakeup_n(const volatile void *ident, int n) if (p->p_stat == SSLEEP) setrunnable(p); } - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } /* @@ -581,7 +578,6 @@ sys_sched_yield(struct proc *p, void *v, register_t *retval) { struct proc *q; uint8_t newprio; - int s; /* * If one of the threads of a multi-threaded process called @@ -594,11 +590,11 @@ sys_sched_yield(struct proc *p, void *v, register_t *retval) newprio = max(newprio, q->p_runpri); mtx_leave(&p->p_p->ps_mtx); - SCHED_LOCK(s); + SCHED_LOCK(); setrunqueue(p->p_cpu, p, newprio); p->p_ru.ru_nvcsw++; mi_switch(); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); return (0); } diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c index 1cfd32746f0..54ce3173e4e 100644 --- a/sys/kern/sched_bsd.c +++ b/sys/kern/sched_bsd.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sched_bsd.c,v 1.92 2024/05/29 18:55:45 claudio Exp $ */ +/* $OpenBSD: sched_bsd.c,v 1.93 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /*- @@ -230,7 +230,6 @@ schedcpu(void *unused) static struct timeout to = TIMEOUT_INITIALIZER(schedcpu, NULL); fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); struct proc *p; - int s; unsigned int newcpu; LIST_FOREACH(p, &allproc, p_list) { @@ -253,7 +252,7 @@ schedcpu(void *unused) */ if (p->p_slptime > 1) continue; - SCHED_LOCK(s); + SCHED_LOCK(); /* * p_pctcpu is only for diagnostic tools such as ps. */ @@ -275,7 +274,7 @@ schedcpu(void *unused) remrunqueue(p); setrunqueue(p->p_cpu, p, p->p_usrpri); } - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } wakeup(&lbolt); timeout_add_sec(&to, 1); @@ -313,13 +312,12 @@ void yield(void) { struct proc *p = curproc; - int s; - SCHED_LOCK(s); + SCHED_LOCK(); setrunqueue(p->p_cpu, p, p->p_usrpri); p->p_ru.ru_nvcsw++; mi_switch(); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } /* @@ -332,13 +330,12 @@ void preempt(void) { struct proc *p = curproc; - int s; - SCHED_LOCK(s); + SCHED_LOCK(); setrunqueue(p->p_cpu, p, p->p_usrpri); p->p_ru.ru_nivcsw++; mi_switch(); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } void @@ -349,7 +346,7 @@ mi_switch(void) struct proc *nextproc; struct process *pr = p->p_p; struct timespec ts; - int oldipl, s; + int oldipl; #ifdef MULTIPROCESSOR int hold_count; #endif @@ -427,7 +424,7 @@ mi_switch(void) /* Restore proc's IPL. */ MUTEX_OLDIPL(&sched_lock) = oldipl; - SCHED_UNLOCK(s); + SCHED_UNLOCK(); SCHED_ASSERT_UNLOCKED(); @@ -463,7 +460,7 @@ mi_switch(void) if (hold_count) __mp_acquire_count(&kernel_lock, hold_count); #endif - SCHED_LOCK(s); + SCHED_LOCK(); } /* @@ -551,15 +548,14 @@ schedclock(struct proc *p) struct cpu_info *ci = curcpu(); struct schedstate_percpu *spc = &ci->ci_schedstate; uint32_t newcpu; - int s; if (p == spc->spc_idleproc || spc->spc_spinning) return; - SCHED_LOCK(s); + SCHED_LOCK(); newcpu = ESTCPULIM(p->p_estcpu + 1); setpriority(p, newcpu, p->p_p->ps_nice); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } void (*cpu_setperf)(int); diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index a310b123da0..3e2b4586f59 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sys_process.c,v 1.97 2024/04/02 08:27:22 deraadt Exp $ */ +/* $OpenBSD: sys_process.c,v 1.98 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: sys_process.c,v 1.55 1996/05/15 06:17:47 tls Exp $ */ /*- @@ -283,7 +283,6 @@ ptrace_ctrl(struct proc *p, int req, pid_t pid, caddr_t addr, int data) struct proc *t; /* target thread */ struct process *tr; /* target process */ int error = 0; - int s; switch (req) { case PT_TRACE_ME: @@ -492,10 +491,10 @@ ptrace_ctrl(struct proc *p, int req, pid_t pid, caddr_t addr, int data) /* Finally, deliver the requested signal (or none). */ if (t->p_stat == SSTOP) { tr->ps_xsig = data; - SCHED_LOCK(s); + SCHED_LOCK(); unsleep(t); setrunnable(t); - SCHED_UNLOCK(s); + SCHED_UNLOCK(); } else { if (data != 0) psignal(t, data); diff --git a/sys/sys/sched.h b/sys/sys/sched.h index 593bd3e60ae..dcc9b636679 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -1,4 +1,4 @@ -/* $OpenBSD: sched.h,v 1.71 2024/05/29 18:55:45 claudio Exp $ */ +/* $OpenBSD: sched.h,v 1.72 2024/06/03 12:48:25 claudio Exp $ */ /* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */ /*- @@ -201,28 +201,12 @@ void remrunqueue(struct proc *); extern struct mutex sched_lock; -#define SCHED_ASSERT_LOCKED() \ -do { \ - MUTEX_ASSERT_LOCKED(&sched_lock); \ -} while (0) -#define SCHED_ASSERT_UNLOCKED() \ -do { \ - MUTEX_ASSERT_UNLOCKED(&sched_lock); \ -} while (0) +#define SCHED_ASSERT_LOCKED() MUTEX_ASSERT_LOCKED(&sched_lock) +#define SCHED_ASSERT_UNLOCKED() MUTEX_ASSERT_UNLOCKED(&sched_lock) #define SCHED_LOCK_INIT() mtx_init(&sched_lock, IPL_SCHED) - -#define SCHED_LOCK(s) \ -do { \ - (s) = 0; /* XXX cleanup useless argument */ \ - mtx_enter(&sched_lock); \ -} while (/* CONSTCOND */ 0) - -#define SCHED_UNLOCK(s) \ -do { \ - (void)s; /* XXX cleanup useless argument */ \ - mtx_leave(&sched_lock); \ -} while (/* CONSTCOND */ 0) +#define SCHED_LOCK() mtx_enter(&sched_lock) +#define SCHED_UNLOCK() mtx_leave(&sched_lock) #endif /* _KERNEL */ #endif /* _SYS_SCHED_H_ */