-/* $OpenBSD: kern_fork.c,v 1.258 2024/05/20 10:32:20 claudio Exp $ */
+/* $OpenBSD: kern_fork.c,v 1.259 2024/05/29 18:55:45 claudio Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
struct proc *p = curproc;
SCHED_ASSERT_LOCKED();
-
clear_resched(curcpu());
-
-#if defined(MULTIPROCESSOR)
- __mp_unlock(&sched_lock);
-#endif
+ mtx_leave(&sched_lock);
spl0();
SCHED_ASSERT_UNLOCKED();
-/* $OpenBSD: kern_lock.c,v 1.73 2024/03/26 18:18:30 bluhm Exp $ */
+/* $OpenBSD: kern_lock.c,v 1.74 2024/05/29 18:55:45 claudio Exp $ */
/*
* Copyright (c) 2017 Visa Hankala
if (mpl == &kernel_lock)
mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
- else if (mpl == &sched_lock)
- mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
- LO_RECURSABLE | (LO_CLASS_SCHED_LOCK << LO_CLASSSHIFT);
WITNESS_INIT(&mpl->mpl_lock_obj, type);
#endif
}
-/* $OpenBSD: sched_bsd.c,v 1.91 2024/03/30 13:33:20 mpi Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.92 2024/05/29 18:55:45 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
uint64_t roundrobin_period; /* [I] roundrobin period (ns) */
int lbolt; /* once a second sleep address */
-#ifdef MULTIPROCESSOR
-struct __mp_lock sched_lock;
-#endif
+struct mutex sched_lock;
void update_loadavg(void *);
void schedcpu(void *);
struct proc *nextproc;
struct process *pr = p->p_p;
struct timespec ts;
+ int oldipl, s;
#ifdef MULTIPROCESSOR
int hold_count;
- int sched_count;
#endif
- assertwaitok();
KASSERT(p->p_stat != SONPROC);
SCHED_ASSERT_LOCKED();
/*
* Release the kernel_lock, as we are about to yield the CPU.
*/
- sched_count = __mp_release_all_but_one(&sched_lock);
if (_kernel_lock_held())
hold_count = __mp_release_all(&kernel_lock);
else
nextproc = sched_chooseproc();
+ /* preserve old IPL level so we can switch back to that */
+ oldipl = MUTEX_OLDIPL(&sched_lock);
+
if (p != nextproc) {
uvmexp.swtch++;
TRACEPOINT(sched, off__cpu, nextproc->p_tid + THREAD_PID_OFFSET,
SCHED_ASSERT_LOCKED();
- /*
- * To preserve lock ordering, we need to release the sched lock
- * and grab it after we grab the big lock.
- * In the future, when the sched lock isn't recursive, we'll
- * just release it here.
- */
-#ifdef MULTIPROCESSOR
- __mp_unlock(&sched_lock);
-#endif
+ /* Restore proc's IPL. */
+ MUTEX_OLDIPL(&sched_lock) = oldipl;
+ SCHED_UNLOCK(s);
SCHED_ASSERT_UNLOCKED();
+ assertwaitok();
smr_idle();
/*
*/
if (hold_count)
__mp_acquire_count(&kernel_lock, hold_count);
- __mp_acquire_count(&sched_lock, sched_count + 1);
#endif
+ SCHED_LOCK(s);
}
/*
-/* $OpenBSD: sched.h,v 1.70 2024/01/24 19:23:38 cheloha Exp $ */
+/* $OpenBSD: sched.h,v 1.71 2024/05/29 18:55:45 claudio Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
func(); \
} while (0)
-#if defined(MULTIPROCESSOR)
-#include <sys/lock.h>
-
-/*
- * XXX Instead of using struct lock for the kernel lock and thus requiring us
- * XXX to implement simplelocks, causing all sorts of fine-grained locks all
- * XXX over our tree to be activated, the sched_lock is a different kind of
- * XXX lock to avoid introducing locking protocol bugs.
- */
-extern struct __mp_lock sched_lock;
+extern struct mutex sched_lock;
#define SCHED_ASSERT_LOCKED() \
do { \
- splassert(IPL_SCHED); \
- KASSERT(__mp_lock_held(&sched_lock, curcpu())); \
+ MUTEX_ASSERT_LOCKED(&sched_lock); \
} while (0)
#define SCHED_ASSERT_UNLOCKED() \
do { \
- KASSERT(__mp_lock_held(&sched_lock, curcpu()) == 0); \
+ MUTEX_ASSERT_UNLOCKED(&sched_lock); \
} while (0)
-#define SCHED_LOCK_INIT() __mp_lock_init(&sched_lock)
+#define SCHED_LOCK_INIT() mtx_init(&sched_lock, IPL_SCHED)
#define SCHED_LOCK(s) \
do { \
- s = splsched(); \
- __mp_lock(&sched_lock); \
+ (s) = 0; /* XXX cleanup useless argument */ \
+ mtx_enter(&sched_lock); \
} while (/* CONSTCOND */ 0)
#define SCHED_UNLOCK(s) \
do { \
- __mp_unlock(&sched_lock); \
- splx(s); \
+ (void)s; /* XXX cleanup useless argument */ \
+ mtx_leave(&sched_lock); \
} while (/* CONSTCOND */ 0)
-#else /* ! MULTIPROCESSOR */
-
-#define SCHED_ASSERT_LOCKED() splassert(IPL_SCHED);
-#define SCHED_ASSERT_UNLOCKED() /* nothing */
-
-#define SCHED_LOCK_INIT() /* nothing */
-
-#define SCHED_LOCK(s) s = splsched()
-#define SCHED_UNLOCK(s) splx(s)
-
-#endif /* MULTIPROCESSOR */
-
#endif /* _KERNEL */
#endif /* _SYS_SCHED_H_ */