From 0d53143dca7c4d83472683f12d7e8cfeb031a881 Mon Sep 17 00:00:00 2001 From: cheloha Date: Fri, 9 Feb 2024 16:52:58 +0000 Subject: [PATCH] clockintr: add clockintr_unbind() The clockintr_unbind() function cancels any pending execution of the given clock interrupt object's callback and severs the binding between the object and its host CPU. Upon return from clockintr_unbind(), the clock interrupt object may be rebound with a call to clockintr_bind(). The optional CL_BARRIER flag tells clockintr_unbind() to block if the clockintr's callback function is executing at the moment of the call. This is useful when the clockintr's arg is a shared reference and the caller needs to be certain the reference is inactive. Now that clockintrs can be bound and unbound repeatedly, there is more room for error. To help catch programmer errors, clockintr_unbind() sets cl_queue to NULL. Calls to other API functions after a clockintr is unbound will then fault on a NULL dereference. clockintr_bind() also KASSERTs that cl_queue is NULL to ensure the clockintr is not already bound. These checks are not perfect, but they do catch some common errors. With input from mpi@. Thread: https://marc.info/?l=openbsd-tech&m=170629367121800&w=2 ok mpi@ --- sys/kern/kern_clockintr.c | 38 +++++++++++++++++++++++++++++++++++--- sys/sys/clockintr.h | 9 +++++++-- 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index c39e38d69e1..4141a2bbeba 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.65 2024/02/09 15:06:23 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.66 2024/02/09 16:52:58 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -227,6 +227,12 @@ clockintr_dispatch(void *frame) CLR(request->cr_flags, CR_RESCHEDULE); clockqueue_pend_insert(cq, cl, request->cr_expiration); } + if (ISSET(cq->cq_flags, CQ_NEED_WAKEUP)) { + CLR(cq->cq_flags, CQ_NEED_WAKEUP); + mtx_leave(&cq->cq_mtx); + wakeup(&cq->cq_running); + mtx_enter(&cq->cq_mtx); + } run++; } @@ -352,15 +358,41 @@ clockintr_bind(struct clockintr *cl, struct cpu_info *ci, { struct clockintr_queue *cq = &ci->ci_queue; + splassert(IPL_NONE); + KASSERT(cl->cl_queue == NULL); + + mtx_enter(&cq->cq_mtx); cl->cl_arg = arg; cl->cl_func = func; cl->cl_queue = cq; - - mtx_enter(&cq->cq_mtx); TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink); mtx_leave(&cq->cq_mtx); } +void +clockintr_unbind(struct clockintr *cl, uint32_t flags) +{ + struct clockintr_queue *cq = cl->cl_queue; + + KASSERT(!ISSET(flags, ~CL_FLAG_MASK)); + + mtx_enter(&cq->cq_mtx); + + clockintr_cancel_locked(cl); + + cl->cl_arg = NULL; + cl->cl_func = NULL; + cl->cl_queue = NULL; + TAILQ_REMOVE(&cq->cq_all, cl, cl_alink); + + if (ISSET(flags, CL_BARRIER) && cl == cq->cq_running) { + SET(cq->cq_flags, CQ_NEED_WAKEUP); + msleep_nsec(&cq->cq_running, &cq->cq_mtx, PWAIT | PNORELOCK, + "clkbar", INFSLP); + } else + mtx_leave(&cq->cq_mtx); +} + void clockintr_schedule(struct clockintr *cl, uint64_t expiration) { diff --git a/sys/sys/clockintr.h b/sys/sys/clockintr.h index b2955c9d15d..f79db03a45c 100644 --- a/sys/sys/clockintr.h +++ b/sys/sys/clockintr.h @@ -1,4 +1,4 @@ -/* $OpenBSD: clockintr.h,v 1.25 2024/01/24 19:23:38 cheloha Exp $ */ +/* $OpenBSD: clockintr.h,v 1.26 2024/02/09 16:52:58 cheloha Exp $ */ /* * Copyright (c) 2020-2022 Scott Cheloha * @@ -113,7 +113,8 @@ struct clockintr_queue { #define CQ_INIT 0x00000001 /* clockintr_cpu_init() done */ #define CQ_INTRCLOCK 0x00000002 /* intrclock installed */ #define CQ_IGNORE_REQUEST 0x00000004 /* ignore callback requests */ -#define CQ_STATE_MASK 0x00000007 +#define CQ_NEED_WAKEUP 0x00000008 /* caller at barrier */ +#define CQ_STATE_MASK 0x0000000f void clockintr_cpu_init(const struct intrclock *); int clockintr_dispatch(void *); @@ -123,12 +124,16 @@ void clockintr_trigger(void); * Kernel API */ +#define CL_BARRIER 0x00000001 /* block if callback is running */ +#define CL_FLAG_MASK 0x00000001 + uint64_t clockintr_advance(struct clockintr *, uint64_t); void clockintr_bind(struct clockintr *, struct cpu_info *, void (*)(struct clockrequest *, void *, void *), void *); void clockintr_cancel(struct clockintr *); void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t); +void clockintr_unbind(struct clockintr *, uint32_t); uint64_t clockrequest_advance(struct clockrequest *, uint64_t); uint64_t clockrequest_advance_random(struct clockrequest *, uint64_t, uint32_t); void clockqueue_init(struct clockintr_queue *); -- 2.20.1