-/* $OpenBSD: kern_clockintr.c,v 1.5 2023/03/14 00:11:58 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.6 2023/04/03 00:20:24 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
uint64_t clockintr_nsecuptime(const struct clockintr *);
void clockintr_schedclock(struct clockintr *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
+void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *);
void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *);
uint64_t clockqueue_next(const struct clockintr_queue *);
KASSERT(ISSET(clockintr_flags, CL_INIT));
if (!ISSET(cq->cq_flags, CL_CPU_INIT)) {
+ mtx_init(&cq->cq_mtx, IPL_CLOCK);
TAILQ_INIT(&cq->cq_est);
TAILQ_INIT(&cq->cq_pend);
cq->cq_hardclock = clockintr_establish(cq, clockintr_hardclock);
splassert(IPL_CLOCK);
KASSERT(ISSET(cq->cq_flags, CL_CPU_INIT));
+ mtx_enter(&cq->cq_mtx);
+
/*
* If nothing is scheduled or we arrived too early, we have
* nothing to do.
TAILQ_REMOVE(&cq->cq_pend, cl, cl_plink);
CLR(cl->cl_flags, CLST_PENDING);
cq->cq_running = cl;
+ mtx_leave(&cq->cq_mtx);
cl->cl_func(cl, frame);
+ mtx_enter(&cq->cq_mtx);
cq->cq_running = NULL;
run++;
}
membar_producer();
cq->cq_gen = MAX(1, ogen + 1);
+ mtx_leave(&cq->cq_mtx);
+
if (cq->cq_dispatch != 1)
panic("%s: unexpected value: %u", __func__, cq->cq_dispatch);
cq->cq_dispatch = 0;
clockintr_advance(struct clockintr *cl, uint64_t period)
{
uint64_t count, expiration;
+ struct clockintr_queue *cq = cl->cl_queue;
+ mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
- count = nsec_advance(&expiration, period, cl->cl_queue->cq_uptime);
- clockintr_schedule(cl, expiration);
+ count = nsec_advance(&expiration, period, cq->cq_uptime);
+ clockintr_schedule_locked(cl, expiration);
+ mtx_leave(&cq->cq_mtx);
return count;
}
return NULL;
cl->cl_func = func;
cl->cl_queue = cq;
+
+ mtx_enter(&cq->cq_mtx);
TAILQ_INSERT_TAIL(&cq->cq_est, cl, cl_elink);
+ mtx_leave(&cq->cq_mtx);
return cl;
}
uint64_t
clockintr_expiration(const struct clockintr *cl)
{
- return cl->cl_expiration;
+ uint64_t expiration;
+ struct clockintr_queue *cq = cl->cl_queue;
+
+ mtx_enter(&cq->cq_mtx);
+ expiration = cl->cl_expiration;
+ mtx_leave(&cq->cq_mtx);
+ return expiration;
}
void
clockintr_schedule(struct clockintr *cl, uint64_t expiration)
+{
+ struct clockintr_queue *cq = cl->cl_queue;
+
+ mtx_enter(&cq->cq_mtx);
+ clockintr_schedule_locked(cl, expiration);
+ mtx_leave(&cq->cq_mtx);
+}
+
+void
+clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration)
{
struct clockintr *elm;
struct clockintr_queue *cq = cl->cl_queue;
+ MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
+
if (ISSET(cl->cl_flags, CLST_PENDING)) {
TAILQ_REMOVE(&cq->cq_pend, cl, cl_plink);
CLR(cl->cl_flags, CLST_PENDING);
uint64_t
clockqueue_next(const struct clockintr_queue *cq)
{
+ MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
return TAILQ_FIRST(&cq->cq_pend)->cl_expiration;
}
-/* $OpenBSD: clockintr.h,v 1.3 2023/03/09 03:50:38 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.4 2023/04/03 00:20:24 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
#ifdef _KERNEL
+#include <sys/mutex.h>
#include <sys/queue.h>
/*
* Struct member protections:
*
* I Immutable after initialization.
- * o Owned by a single CPU.
+ * m Parent queue mutex (cl_queue->cq_mtx).
*/
struct clockintr_queue;
struct clockintr {
- uint64_t cl_expiration; /* [o] dispatch time */
- TAILQ_ENTRY(clockintr) cl_elink; /* [o] cq_est glue */
- TAILQ_ENTRY(clockintr) cl_plink; /* [o] cq_pend glue */
+ uint64_t cl_expiration; /* [m] dispatch time */
+ TAILQ_ENTRY(clockintr) cl_elink; /* [m] cq_est glue */
+ TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */
void (*cl_func)(struct clockintr *, void *); /* [I] callback */
struct clockintr_queue *cl_queue; /* [I] parent queue */
- u_int cl_flags; /* [o] CLST_* flags */
+ u_int cl_flags; /* [m] CLST_* flags */
};
#define CLST_PENDING 0x00000001 /* scheduled to run */
*
* Struct member protections:
*
+ * a Modified atomically.
* I Immutable after initialization.
+ * m Per-queue mutex (cq_mtx).
* o Owned by a single CPU.
*/
struct clockintr_queue {
+ struct mutex cq_mtx; /* [a] per-queue mutex */
uint64_t cq_uptime; /* [o] cached uptime */
- TAILQ_HEAD(, clockintr) cq_est; /* [o] established clockintr list */
- TAILQ_HEAD(, clockintr) cq_pend;/* [o] pending clockintr list */
- struct clockintr *cq_running; /* [o] running clockintr */
+ TAILQ_HEAD(, clockintr) cq_est; /* [m] established clockintr list */
+ TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
+ struct clockintr *cq_running; /* [m] running clockintr */
struct clockintr *cq_hardclock; /* [o] hardclock handle */
struct clockintr *cq_schedclock;/* [o] schedclock handle, if any */
struct clockintr *cq_statclock; /* [o] statclock handle */