clockintr_advance: tweak logic to eliminate early-return
authorcheloha <cheloha@openbsd.org>
Sat, 9 Sep 2023 17:07:59 +0000 (17:07 +0000)
committercheloha <cheloha@openbsd.org>
Sat, 9 Sep 2023 17:07:59 +0000 (17:07 +0000)
With the switch to clockintr_schedule_locked(), clockintr_advance() is
now much shorter and the early-return from the non-mutex path doesn't
make the function any easier to read.  Move the mutex path into the else
branch and always return 'count' at the end of the function.

sys/kern/kern_clockintr.c

index e6d6565..d941814 100644 (file)
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clockintr.c,v 1.44 2023/09/09 16:59:01 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.45 2023/09/09 17:07:59 cheloha Exp $ */
 /*
  * Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
  * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@@ -326,14 +326,13 @@ clockintr_advance(struct clockintr *cl, uint64_t period)
        if (cl == &cq->cq_shadow) {
                count = nsec_advance(&cl->cl_expiration, period, cq->cq_uptime);
                SET(cl->cl_flags, CLST_SHADOW_PENDING);
-               return count;
+       } else {
+               mtx_enter(&cq->cq_mtx);
+               expiration = cl->cl_expiration;
+               count = nsec_advance(&expiration, period, nsecuptime());
+               clockintr_schedule_locked(cl, expiration);
+               mtx_leave(&cq->cq_mtx);
        }
-
-       mtx_enter(&cq->cq_mtx);
-       expiration = cl->cl_expiration;
-       count = nsec_advance(&expiration, period, nsecuptime());
-       clockintr_schedule_locked(cl, expiration);
-       mtx_leave(&cq->cq_mtx);
        return count;
 }