clockintr: rename "clockintr_schedule_locked" to "clockqueue_pend_insert"
authorcheloha <cheloha@openbsd.org>
Fri, 8 Sep 2023 22:23:30 +0000 (22:23 +0000)
committercheloha <cheloha@openbsd.org>
Fri, 8 Sep 2023 22:23:30 +0000 (22:23 +0000)
The function clockintr_schedule_locked() is poorly named.  "Schedule" is
a broad word that connotes too much and "clockintr" implies that we are
mainly operating on the clockintr itself, which is untrue.

clockqueue_pend_insert() is a better name: it is clearer that we are
inserting an element into cq_pend.

sys/kern/kern_clockintr.c

index ab58c21..d22ad2b 100644 (file)
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clockintr.c,v 1.39 2023/09/08 22:14:57 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.40 2023/09/08 22:23:30 cheloha Exp $ */
 /*
  * Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
  * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@@ -45,12 +45,13 @@ uint32_t statclock_mask;            /* [I] set of allowed offsets */
 uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
 void clockintr_hardclock(struct clockintr *, void *);
 void clockintr_schedule(struct clockintr *, uint64_t);
-void clockintr_schedule_locked(struct clockintr *, uint64_t);
 void clockintr_statclock(struct clockintr *, void *);
 void clockqueue_intrclock_install(struct clockintr_queue *,
     const struct intrclock *);
 uint64_t clockqueue_next(const struct clockintr_queue *);
 void clockqueue_pend_delete(struct clockintr_queue *, struct clockintr *);
+void clockqueue_pend_insert(struct clockintr_queue *, struct clockintr *,
+    uint64_t);
 void clockqueue_reset_intrclock(struct clockintr_queue *);
 uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
 
@@ -270,7 +271,7 @@ clockintr_dispatch(void *frame)
                }
                if (ISSET(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING)) {
                        CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
-                       clockintr_schedule_locked(cl,
+                       clockqueue_pend_insert(cq, cl,
                            cq->cq_shadow.cl_expiration);
                }
                run++;
@@ -331,7 +332,7 @@ clockintr_advance(struct clockintr *cl, uint64_t period)
        count = nsec_advance(&expiration, period, nsecuptime());
        if (ISSET(cl->cl_flags, CLST_PENDING))
                clockqueue_pend_delete(cq, cl);
-       clockintr_schedule_locked(cl, expiration);
+       clockqueue_pend_insert(cq, cl, expiration);
        if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
                if (cl == TAILQ_FIRST(&cq->cq_pend)) {
                        if (cq == &curcpu()->ci_queue)
@@ -423,7 +424,7 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration)
        mtx_enter(&cq->cq_mtx);
        if (ISSET(cl->cl_flags, CLST_PENDING))
                clockqueue_pend_delete(cq, cl);
-       clockintr_schedule_locked(cl, expiration);
+       clockqueue_pend_insert(cq, cl, expiration);
        if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
                if (cl == TAILQ_FIRST(&cq->cq_pend)) {
                        if (cq == &curcpu()->ci_queue)
@@ -435,27 +436,6 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration)
        mtx_leave(&cq->cq_mtx);
 }
 
-void
-clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration)
-{
-       struct clockintr *elm;
-       struct clockintr_queue *cq = cl->cl_queue;
-
-       MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
-       KASSERT(!ISSET(cl->cl_flags, CLST_PENDING));
-
-       cl->cl_expiration = expiration;
-       TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink) {
-               if (cl->cl_expiration < elm->cl_expiration)
-                       break;
-       }
-       if (elm == NULL)
-               TAILQ_INSERT_TAIL(&cq->cq_pend, cl, cl_plink);
-       else
-               TAILQ_INSERT_BEFORE(elm, cl, cl_plink);
-       SET(cl->cl_flags, CLST_PENDING);
-}
-
 void
 clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t n,
     uint32_t count)
@@ -539,6 +519,28 @@ clockqueue_pend_delete(struct clockintr_queue *cq, struct clockintr *cl)
        CLR(cl->cl_flags, CLST_PENDING);
 }
 
+
+void
+clockqueue_pend_insert(struct clockintr_queue *cq, struct clockintr *cl,
+    uint64_t expiration)
+{
+       struct clockintr *elm;
+
+       MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
+       KASSERT(!ISSET(cl->cl_flags, CLST_PENDING));
+
+       cl->cl_expiration = expiration;
+       TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink) {
+               if (cl->cl_expiration < elm->cl_expiration)
+                       break;
+       }
+       if (elm == NULL)
+               TAILQ_INSERT_TAIL(&cq->cq_pend, cl, cl_plink);
+       else
+               TAILQ_INSERT_BEFORE(elm, cl, cl_plink);
+       SET(cl->cl_flags, CLST_PENDING);
+}
+
 void
 clockqueue_reset_intrclock(struct clockintr_queue *cq)
 {