-/* $OpenBSD: kern_clockintr.c,v 1.39 2023/09/08 22:14:57 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.40 2023/09/08 22:23:30 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_hardclock(struct clockintr *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
-void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *);
void clockqueue_intrclock_install(struct clockintr_queue *,
const struct intrclock *);
uint64_t clockqueue_next(const struct clockintr_queue *);
void clockqueue_pend_delete(struct clockintr_queue *, struct clockintr *);
+void clockqueue_pend_insert(struct clockintr_queue *, struct clockintr *,
+ uint64_t);
void clockqueue_reset_intrclock(struct clockintr_queue *);
uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
}
if (ISSET(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING)) {
CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
- clockintr_schedule_locked(cl,
+ clockqueue_pend_insert(cq, cl,
cq->cq_shadow.cl_expiration);
}
run++;
count = nsec_advance(&expiration, period, nsecuptime());
if (ISSET(cl->cl_flags, CLST_PENDING))
clockqueue_pend_delete(cq, cl);
- clockintr_schedule_locked(cl, expiration);
+ clockqueue_pend_insert(cq, cl, expiration);
if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
if (cl == TAILQ_FIRST(&cq->cq_pend)) {
if (cq == &curcpu()->ci_queue)
mtx_enter(&cq->cq_mtx);
if (ISSET(cl->cl_flags, CLST_PENDING))
clockqueue_pend_delete(cq, cl);
- clockintr_schedule_locked(cl, expiration);
+ clockqueue_pend_insert(cq, cl, expiration);
if (ISSET(cq->cq_flags, CQ_INTRCLOCK)) {
if (cl == TAILQ_FIRST(&cq->cq_pend)) {
if (cq == &curcpu()->ci_queue)
mtx_leave(&cq->cq_mtx);
}
-void
-clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration)
-{
- struct clockintr *elm;
- struct clockintr_queue *cq = cl->cl_queue;
-
- MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
- KASSERT(!ISSET(cl->cl_flags, CLST_PENDING));
-
- cl->cl_expiration = expiration;
- TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink) {
- if (cl->cl_expiration < elm->cl_expiration)
- break;
- }
- if (elm == NULL)
- TAILQ_INSERT_TAIL(&cq->cq_pend, cl, cl_plink);
- else
- TAILQ_INSERT_BEFORE(elm, cl, cl_plink);
- SET(cl->cl_flags, CLST_PENDING);
-}
-
void
clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t n,
uint32_t count)
CLR(cl->cl_flags, CLST_PENDING);
}
+
+void
+clockqueue_pend_insert(struct clockintr_queue *cq, struct clockintr *cl,
+ uint64_t expiration)
+{
+ struct clockintr *elm;
+
+ MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
+ KASSERT(!ISSET(cl->cl_flags, CLST_PENDING));
+
+ cl->cl_expiration = expiration;
+ TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink) {
+ if (cl->cl_expiration < elm->cl_expiration)
+ break;
+ }
+ if (elm == NULL)
+ TAILQ_INSERT_TAIL(&cq->cq_pend, cl, cl_plink);
+ else
+ TAILQ_INSERT_BEFORE(elm, cl, cl_plink);
+ SET(cl->cl_flags, CLST_PENDING);
+}
+
void
clockqueue_reset_intrclock(struct clockintr_queue *cq)
{