-/* $OpenBSD: kern_clockintr.c,v 1.9 2023/04/05 00:23:06 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.10 2023/04/16 21:19:26 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
KASSERT(ISSET(clockintr_flags, CL_INIT));
if (!ISSET(cq->cq_flags, CL_CPU_INIT)) {
+ cq->cq_shadow.cl_queue = cq;
mtx_init(&cq->cq_mtx, IPL_CLOCK);
TAILQ_INIT(&cq->cq_est);
TAILQ_INIT(&cq->cq_pend);
break;
}
clockintr_cancel_locked(cl);
+ cq->cq_shadow.cl_expiration = cl->cl_expiration;
cq->cq_running = cl;
mtx_leave(&cq->cq_mtx);
- cl->cl_func(cl, frame);
+ cl->cl_func(&cq->cq_shadow, frame);
mtx_enter(&cq->cq_mtx);
cq->cq_running = NULL;
+ if (ISSET(cl->cl_flags, CLST_IGNORE_SHADOW)) {
+ CLR(cl->cl_flags, CLST_IGNORE_SHADOW);
+ CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
+ }
+ if (ISSET(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING)) {
+ CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
+ clockintr_schedule_locked(cl,
+ cq->cq_shadow.cl_expiration);
+ }
run++;
}
uint64_t count, expiration;
struct clockintr_queue *cq = cl->cl_queue;
+ if (cl == &cq->cq_shadow) {
+ count = nsec_advance(&cl->cl_expiration, period, cq->cq_uptime);
+ SET(cl->cl_flags, CLST_SHADOW_PENDING);
+ return count;
+ }
+
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
count = nsec_advance(&expiration, period, cq->cq_uptime);
if (ISSET(cl->cl_flags, CLST_PENDING))
clockintr_cancel_locked(cl);
clockintr_schedule_locked(cl, expiration);
+ if (cl == cq->cq_running)
+ SET(cl->cl_flags, CLST_IGNORE_SHADOW);
mtx_leave(&cq->cq_mtx);
return count;
}
{
struct clockintr_queue *cq = cl->cl_queue;
+ if (cl == &cq->cq_shadow) {
+ CLR(cl->cl_flags, CLST_SHADOW_PENDING);
+ return;
+ }
+
mtx_enter(&cq->cq_mtx);
if (ISSET(cl->cl_flags, CLST_PENDING))
clockintr_cancel_locked(cl);
+ if (cl == cq->cq_running)
+ SET(cl->cl_flags, CLST_IGNORE_SHADOW);
mtx_leave(&cq->cq_mtx);
}
uint64_t expiration;
struct clockintr_queue *cq = cl->cl_queue;
+ if (cl == &cq->cq_shadow)
+ return cl->cl_expiration;
+
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
mtx_leave(&cq->cq_mtx);
{
struct clockintr_queue *cq = cl->cl_queue;
+ if (cl == &cq->cq_shadow) {
+ cl->cl_expiration = expiration;
+ SET(cl->cl_flags, CLST_SHADOW_PENDING);
+ return;
+ }
+
mtx_enter(&cq->cq_mtx);
if (ISSET(cl->cl_flags, CLST_PENDING))
clockintr_cancel_locked(cl);
clockintr_schedule_locked(cl, expiration);
+ if (cl == cq->cq_running)
+ SET(cl->cl_flags, CLST_IGNORE_SHADOW);
mtx_leave(&cq->cq_mtx);
}
uint64_t
clockintr_nsecuptime(const struct clockintr *cl)
{
+ KASSERT(cl == &cl->cl_queue->cq_shadow);
return cl->cl_queue->cq_uptime;
}
-/* $OpenBSD: clockintr.h,v 1.4 2023/04/03 00:20:24 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.5 2023/04/16 21:19:26 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
u_int cl_flags; /* [m] CLST_* flags */
};
-#define CLST_PENDING 0x00000001 /* scheduled to run */
+#define CLST_PENDING 0x00000001 /* scheduled to run */
+#define CLST_SHADOW_PENDING 0x00000002 /* shadow is scheduled to run */
+#define CLST_IGNORE_SHADOW 0x00000004 /* ignore shadow copy */
/*
* Per-CPU clock interrupt state.
* o Owned by a single CPU.
*/
struct clockintr_queue {
+ struct clockintr cq_shadow; /* [o] copy of running clockintr */
struct mutex cq_mtx; /* [a] per-queue mutex */
uint64_t cq_uptime; /* [o] cached uptime */
TAILQ_HEAD(, clockintr) cq_est; /* [m] established clockintr list */