From 9d1f3b8426885134448e284b1934bdb244b5e59f Mon Sep 17 00:00:00 2001 From: cheloha Date: Wed, 5 Apr 2023 00:23:06 +0000 Subject: [PATCH] clockintr: add clockintr_cancel() As the name suggests, clockintr_cancel() cancels any pending expiration of the given clockintr. I think we will need this in the near future. --- sys/kern/kern_clockintr.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index 29eb31925d2..eb1351c37bb 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.8 2023/04/04 21:49:10 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.9 2023/04/05 00:23:06 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -54,6 +54,7 @@ uint32_t prof_min; /* [I] minimum profhz period (ns) */ uint32_t prof_mask; /* [I] set of allowed offsets */ uint64_t clockintr_advance(struct clockintr *, uint64_t); +void clockintr_cancel(struct clockintr *); void clockintr_cancel_locked(struct clockintr *); struct clockintr *clockintr_establish(struct clockintr_queue *, void (*)(struct clockintr *, void *)); @@ -302,6 +303,17 @@ clockintr_advance(struct clockintr *cl, uint64_t period) return count; } +void +clockintr_cancel(struct clockintr *cl) +{ + struct clockintr_queue *cq = cl->cl_queue; + + mtx_enter(&cq->cq_mtx); + if (ISSET(cl->cl_flags, CLST_PENDING)) + clockintr_cancel_locked(cl); + mtx_leave(&cq->cq_mtx); +} + void clockintr_cancel_locked(struct clockintr *cl) { -- 2.20.1