From b441a2d80426b78925de2578c3b88e2d61cf4966 Mon Sep 17 00:00:00 2001 From: cheloha Date: Tue, 5 Sep 2023 22:25:41 +0000 Subject: [PATCH] clockintr: add clockintr_advance_random() Add clockintr_advance_random(). Its sole purpose is to simplify the implementation of statclock's pseudorandom period. Ideally, nothing else will use it and we will get rid of it and the pseudorandom statclock period at some point in the near future. Suggested by mpi@. Thread: https://marc.info/?l=openbsd-tech&m=169392340028978&w=2 ok mpi@ mlarkin@ --- sys/kern/kern_clockintr.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index 1f234f15b97..c32643615c3 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.33 2023/08/26 22:21:00 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.34 2023/09/05 22:25:41 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -42,6 +42,7 @@ uint32_t statclock_avg; /* [I] average statclock period (ns) */ uint32_t statclock_min; /* [I] minimum statclock period (ns) */ uint32_t statclock_mask; /* [I] set of allowed offsets */ +uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t); void clockintr_cancel_locked(struct clockintr *); uint64_t clockintr_expiration(const struct clockintr *); void clockintr_hardclock(struct clockintr *, void *); @@ -345,6 +346,25 @@ clockintr_advance(struct clockintr *cl, uint64_t period) return count; } +uint64_t +clockintr_advance_random(struct clockintr *cl, uint64_t min, uint32_t mask) +{ + uint64_t count = 0; + struct clockintr_queue *cq = cl->cl_queue; + uint32_t off; + + KASSERT(cl == &cq->cq_shadow); + + while (cl->cl_expiration <= cq->cq_uptime) { + while ((off = (random() & mask)) == 0) + continue; + cl->cl_expiration += min + off; + count++; + } + SET(cl->cl_flags, CLST_SHADOW_PENDING); + return count; +} + void clockintr_cancel(struct clockintr *cl) { @@ -498,20 +518,11 @@ clockintr_hardclock(struct clockintr *cl, void *frame) void clockintr_statclock(struct clockintr *cl, void *frame) { - uint64_t count, expiration, i, uptime; - uint32_t off; + uint64_t count, i; if (ISSET(clockintr_flags, CL_RNDSTAT)) { - count = 0; - expiration = clockintr_expiration(cl); - uptime = clockintr_nsecuptime(cl); - while (expiration <= uptime) { - while ((off = (random() & statclock_mask)) == 0) - continue; - expiration += statclock_min + off; - count++; - } - clockintr_schedule(cl, expiration); + count = clockintr_advance_random(cl, statclock_min, + statclock_mask); } else { count = clockintr_advance(cl, statclock_avg); } -- 2.20.1