From f36eae229c7167bc31605e12863e63abe756c8c8 Mon Sep 17 00:00:00 2001 From: cheloha Date: Thu, 14 Sep 2023 20:58:51 +0000 Subject: [PATCH] clockintr, statclock: eliminate clockintr_statclock() wrapper - Move remaining statclock variables from kern_clockintr.c to kern_clock.c. Move statclock variable initialization from clockintr_init() into initclocks(). - Change statclock() prototype to make it a legal clockintr callback function and establish the handle with statclock() instead clockintr_statclock(). - Merge the contents of clockintr_statclock() into statclock(). statclock() can now reschedule itself and handles multiple expirations transparently. - Make statclock_avg visible from sys/systm.h so that clockintr_cpu_init() can use it to advance the statclock across suspend/hibernate. Thread: https://marc.info/?l=openbsd-tech&m=169428749720476&w=2 --- sys/kern/kern_clock.c | 61 ++++++++++++++++++++++++++++++--------- sys/kern/kern_clockintr.c | 44 ++-------------------------- sys/sys/systm.h | 7 +++-- 3 files changed, 55 insertions(+), 57 deletions(-) diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index e23ab3e9833..b3fbbb49da6 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clock.c,v 1.117 2023/09/14 19:39:47 cheloha Exp $ */ +/* $OpenBSD: kern_clock.c,v 1.118 2023/09/14 20:58:51 cheloha Exp $ */ /* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */ /*- @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -86,6 +87,9 @@ int ticks = INT_MAX - (15 * 60 * HZ); /* Don't force early wrap around, triggers bug in inteldrm */ volatile unsigned long jiffies; +uint32_t statclock_avg; /* [I] average statclock period (ns) */ +uint32_t statclock_min; /* [I] minimum statclock period (ns) */ +uint32_t statclock_mask; /* [I] set of allowed offsets */ int statclock_is_randomized; /* [I] fixed or pseudorandom period? */ /* @@ -94,11 +98,31 @@ int statclock_is_randomized; /* [I] fixed or pseudorandom period? */ void initclocks(void) { + uint32_t half_avg, var; + /* * Let the machine-specific code do its bit. */ cpu_initclocks(); + KASSERT(stathz >= 1 && stathz <= 1000000000); + + /* + * Compute the average statclock() period. Then find var, the + * largest power of two such that var <= statclock_avg / 2. + */ + statclock_avg = 1000000000 / stathz; + half_avg = statclock_avg / 2; + for (var = 1U << 31; var > half_avg; var /= 2) + continue; + + /* + * Set a lower bound for the range using statclock_avg and var. + * The mask for that range is just (var - 1). + */ + statclock_min = statclock_avg - (var / 2); + statclock_mask = var - 1; + KASSERT(profhz >= stathz && profhz <= 1000000000); KASSERT(profhz % stathz == 0); profclock_period = 1000000000 / profhz; @@ -247,24 +271,33 @@ stopprofclock(struct process *pr) * do process and kernel statistics. */ void -statclock(struct clockframe *frame) +statclock(struct clockintr *cl, void *cf, void *arg) { + uint64_t count, i; + struct clockframe *frame = cf; struct cpu_info *ci = curcpu(); struct schedstate_percpu *spc = &ci->ci_schedstate; struct proc *p = curproc; struct process *pr; + if (statclock_is_randomized) { + count = clockintr_advance_random(cl, statclock_min, + statclock_mask); + } else { + count = clockintr_advance(cl, statclock_avg); + } + if (CLKF_USERMODE(frame)) { pr = p->p_p; /* * Came from user mode; CPU was in user state. * If this process is being profiled record the tick. */ - p->p_uticks++; + p->p_uticks += count; if (pr->ps_nice > NZERO) - spc->spc_cp_time[CP_NICE]++; + spc->spc_cp_time[CP_NICE] += count; else - spc->spc_cp_time[CP_USER]++; + spc->spc_cp_time[CP_USER] += count; } else { /* * Came from kernel mode, so we were: @@ -281,25 +314,27 @@ statclock(struct clockframe *frame) */ if (CLKF_INTR(frame)) { if (p != NULL) - p->p_iticks++; + p->p_iticks += count; spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_INTR]++; + CP_SPIN : CP_INTR] += count; } else if (p != NULL && p != spc->spc_idleproc) { - p->p_sticks++; + p->p_sticks += count; spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_SYS]++; + CP_SPIN : CP_SYS] += count; } else spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_IDLE]++; + CP_SPIN : CP_IDLE] += count; } if (p != NULL) { - p->p_cpticks++; + p->p_cpticks += count; /* * schedclock() runs every fourth statclock(). */ - if ((++spc->spc_schedticks & 3) == 0) - schedclock(p); + for (i = 0; i < count; i++) { + if ((++spc->spc_schedticks & 3) == 0) + schedclock(p); + } } } diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index 7c41b9d5fe4..62bb81a6ba0 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.49 2023/09/14 19:51:17 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.50 2023/09/14 20:58:51 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -38,14 +38,10 @@ */ uint32_t clockintr_flags; /* [I] global state + behavior flags */ uint32_t hardclock_period; /* [I] hardclock period (ns) */ -uint32_t statclock_avg; /* [I] average statclock period (ns) */ -uint32_t statclock_min; /* [I] minimum statclock period (ns) */ -uint32_t statclock_mask; /* [I] set of allowed offsets */ void clockintr_hardclock(struct clockintr *, void *, void *); void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_schedule_locked(struct clockintr *, uint64_t); -void clockintr_statclock(struct clockintr *, void *, void *); void clockqueue_intrclock_install(struct clockintr_queue *, const struct intrclock *); uint64_t clockqueue_next(const struct clockintr_queue *); @@ -61,8 +57,6 @@ uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t); void clockintr_init(uint32_t flags) { - uint32_t half_avg, var; - KASSERT(CPU_IS_PRIMARY(curcpu())); KASSERT(clockintr_flags == 0); KASSERT(!ISSET(flags, ~CL_FLAG_MASK)); @@ -71,24 +65,6 @@ clockintr_init(uint32_t flags) hardclock_period = 1000000000 / hz; roundrobin_period = hardclock_period * 10; - KASSERT(stathz >= 1 && stathz <= 1000000000); - - /* - * Compute the average statclock() period. Then find var, the - * largest power of two such that var <= statclock_avg / 2. - */ - statclock_avg = 1000000000 / stathz; - half_avg = statclock_avg / 2; - for (var = 1U << 31; var > half_avg; var /= 2) - continue; - - /* - * Set a lower bound for the range using statclock_avg and var. - * The mask for that range is just (var - 1). - */ - statclock_min = statclock_avg - (var / 2); - statclock_mask = var - 1; - SET(clockintr_flags, flags | CL_INIT); } @@ -119,8 +95,7 @@ clockintr_cpu_init(const struct intrclock *ic) panic("%s: failed to establish hardclock", __func__); } if (cq->cq_statclock == NULL) { - cq->cq_statclock = clockintr_establish(ci, clockintr_statclock, - NULL); + cq->cq_statclock = clockintr_establish(ci, statclock, NULL); if (cq->cq_statclock == NULL) panic("%s: failed to establish statclock", __func__); } @@ -469,21 +444,6 @@ clockintr_hardclock(struct clockintr *cl, void *frame, void *arg) hardclock(frame); } -void -clockintr_statclock(struct clockintr *cl, void *frame, void *arg) -{ - uint64_t count, i; - - if (statclock_is_randomized) { - count = clockintr_advance_random(cl, statclock_min, - statclock_mask); - } else { - count = clockintr_advance(cl, statclock_avg); - } - for (i = 0; i < count; i++) - statclock(frame); -} - void clockqueue_init(struct clockintr_queue *cq) { diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 6d8a65bdb77..868cd688431 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: systm.h,v 1.166 2023/09/14 19:39:47 cheloha Exp $ */ +/* $OpenBSD: systm.h,v 1.167 2023/09/14 20:58:51 cheloha Exp $ */ /* $NetBSD: systm.h,v 1.50 1996/06/09 04:55:09 briggs Exp $ */ /*- @@ -234,11 +234,14 @@ int tstohz(const struct timespec *); void realitexpire(void *); extern uint32_t hardclock_period; +extern uint32_t statclock_avg; extern int statclock_is_randomized; struct clockframe; void hardclock(struct clockframe *); -void statclock(struct clockframe *); + +struct clockintr; +void statclock(struct clockintr *, void *, void *); void initclocks(void); void inittodr(time_t); -- 2.20.1