Convert SCHED_LOCK from a recursive kernel lock to a mutex.
authorclaudio <claudio@openbsd.org>
Wed, 29 May 2024 18:55:45 +0000 (18:55 +0000)
committerclaudio <claudio@openbsd.org>
Wed, 29 May 2024 18:55:45 +0000 (18:55 +0000)
Over the last weeks the last SCHED_LOCK recursion was removed so this
is now possible and will allow to split up the SCHED_LOCK in a upcoming
step.

Instead of implementing an MP and SP version of SCHED_LOCK this just
always uses the mutex implementation.
While this makes the local s argument unused (the spl is now tracked by
the mutex itself) it is still there to keep this diff minimal.

Tested by many.
OK jca@ mpi@

sys/kern/kern_fork.c
sys/kern/kern_lock.c
sys/kern/sched_bsd.c
sys/sys/sched.h

index 1cfb84d..3ed90a6 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: kern_fork.c,v 1.258 2024/05/20 10:32:20 claudio Exp $ */
+/*     $OpenBSD: kern_fork.c,v 1.259 2024/05/29 18:55:45 claudio Exp $ */
 /*     $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $  */
 
 /*
@@ -688,12 +688,8 @@ proc_trampoline_mi(void)
        struct proc *p = curproc;
 
        SCHED_ASSERT_LOCKED();
-
        clear_resched(curcpu());
-
-#if defined(MULTIPROCESSOR)
-       __mp_unlock(&sched_lock);
-#endif
+       mtx_leave(&sched_lock);
        spl0();
 
        SCHED_ASSERT_UNLOCKED();
index 19f441b..fb38210 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: kern_lock.c,v 1.73 2024/03/26 18:18:30 bluhm Exp $    */
+/*     $OpenBSD: kern_lock.c,v 1.74 2024/05/29 18:55:45 claudio Exp $  */
 
 /*
  * Copyright (c) 2017 Visa Hankala
@@ -97,9 +97,6 @@ ___mp_lock_init(struct __mp_lock *mpl, const struct lock_type *type)
        if (mpl == &kernel_lock)
                mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
                    LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
-       else if (mpl == &sched_lock)
-               mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
-                   LO_RECURSABLE | (LO_CLASS_SCHED_LOCK << LO_CLASSSHIFT);
        WITNESS_INIT(&mpl->mpl_lock_obj, type);
 #endif
 }
index 25b221c..1cfd327 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: sched_bsd.c,v 1.91 2024/03/30 13:33:20 mpi Exp $      */
+/*     $OpenBSD: sched_bsd.c,v 1.92 2024/05/29 18:55:45 claudio Exp $  */
 /*     $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
 
 /*-
@@ -57,9 +57,7 @@
 uint64_t roundrobin_period;    /* [I] roundrobin period (ns) */
 int    lbolt;                  /* once a second sleep address */
 
-#ifdef MULTIPROCESSOR
-struct __mp_lock sched_lock;
-#endif
+struct mutex sched_lock;
 
 void                   update_loadavg(void *);
 void                   schedcpu(void *);
@@ -351,12 +349,11 @@ mi_switch(void)
        struct proc *nextproc;
        struct process *pr = p->p_p;
        struct timespec ts;
+       int oldipl, s;
 #ifdef MULTIPROCESSOR
        int hold_count;
-       int sched_count;
 #endif
 
-       assertwaitok();
        KASSERT(p->p_stat != SONPROC);
 
        SCHED_ASSERT_LOCKED();
@@ -365,7 +362,6 @@ mi_switch(void)
        /*
         * Release the kernel_lock, as we are about to yield the CPU.
         */
-       sched_count = __mp_release_all_but_one(&sched_lock);
        if (_kernel_lock_held())
                hold_count = __mp_release_all(&kernel_lock);
        else
@@ -411,6 +407,9 @@ mi_switch(void)
 
        nextproc = sched_chooseproc();
 
+       /* preserve old IPL level so we can switch back to that */
+       oldipl = MUTEX_OLDIPL(&sched_lock);
+
        if (p != nextproc) {
                uvmexp.swtch++;
                TRACEPOINT(sched, off__cpu, nextproc->p_tid + THREAD_PID_OFFSET,
@@ -426,18 +425,13 @@ mi_switch(void)
 
        SCHED_ASSERT_LOCKED();
 
-       /*
-        * To preserve lock ordering, we need to release the sched lock
-        * and grab it after we grab the big lock.
-        * In the future, when the sched lock isn't recursive, we'll
-        * just release it here.
-        */
-#ifdef MULTIPROCESSOR
-       __mp_unlock(&sched_lock);
-#endif
+       /* Restore proc's IPL. */
+       MUTEX_OLDIPL(&sched_lock) = oldipl;
+       SCHED_UNLOCK(s);
 
        SCHED_ASSERT_UNLOCKED();
 
+       assertwaitok();
        smr_idle();
 
        /*
@@ -468,8 +462,8 @@ mi_switch(void)
         */
        if (hold_count)
                __mp_acquire_count(&kernel_lock, hold_count);
-       __mp_acquire_count(&sched_lock, sched_count + 1);
 #endif
+       SCHED_LOCK(s);
 }
 
 /*
index ac6dad2..593bd3e 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: sched.h,v 1.70 2024/01/24 19:23:38 cheloha Exp $      */
+/*     $OpenBSD: sched.h,v 1.71 2024/05/29 18:55:45 claudio Exp $      */
 /* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
 
 /*-
@@ -199,52 +199,30 @@ void remrunqueue(struct proc *);
                func();                                                 \
 } while (0)
 
-#if defined(MULTIPROCESSOR)
-#include <sys/lock.h>
-
-/*
- * XXX Instead of using struct lock for the kernel lock and thus requiring us
- * XXX to implement simplelocks, causing all sorts of fine-grained locks all
- * XXX over our tree to be activated, the sched_lock is a different kind of
- * XXX lock to avoid introducing locking protocol bugs.
- */
-extern struct __mp_lock sched_lock;
+extern struct mutex sched_lock;
 
 #define        SCHED_ASSERT_LOCKED()                                           \
 do {                                                                   \
-       splassert(IPL_SCHED);                                           \
-       KASSERT(__mp_lock_held(&sched_lock, curcpu()));                 \
+       MUTEX_ASSERT_LOCKED(&sched_lock);                               \
 } while (0)
 #define        SCHED_ASSERT_UNLOCKED()                                         \
 do {                                                                   \
-       KASSERT(__mp_lock_held(&sched_lock, curcpu()) == 0);            \
+       MUTEX_ASSERT_UNLOCKED(&sched_lock);                             \
 } while (0)
 
-#define        SCHED_LOCK_INIT()       __mp_lock_init(&sched_lock)
+#define        SCHED_LOCK_INIT()       mtx_init(&sched_lock, IPL_SCHED)
 
 #define        SCHED_LOCK(s)                                                   \
 do {                                                                   \
-       s = splsched();                                                 \
-       __mp_lock(&sched_lock);                                         \
+       (s) = 0; /* XXX cleanup useless argument */                     \
+       mtx_enter(&sched_lock);                                         \
 } while (/* CONSTCOND */ 0)
 
 #define        SCHED_UNLOCK(s)                                                 \
 do {                                                                   \
-       __mp_unlock(&sched_lock);                                       \
-       splx(s);                                                        \
+       (void)s; /* XXX cleanup useless argument */                     \
+       mtx_leave(&sched_lock);                                         \
 } while (/* CONSTCOND */ 0)
 
-#else /* ! MULTIPROCESSOR */
-
-#define        SCHED_ASSERT_LOCKED()           splassert(IPL_SCHED);
-#define        SCHED_ASSERT_UNLOCKED()         /* nothing */
-
-#define        SCHED_LOCK_INIT()               /* nothing */
-
-#define        SCHED_LOCK(s)                   s = splsched()
-#define        SCHED_UNLOCK(s)                 splx(s)
-
-#endif /* MULTIPROCESSOR */
-
 #endif /* _KERNEL */
 #endif /* _SYS_SCHED_H_ */