From 106c68c47e20736b207a9473162ce188a63d16e7 Mon Sep 17 00:00:00 2001 From: cheloha Date: Tue, 17 Oct 2023 00:04:02 +0000 Subject: [PATCH] clockintr: move callback-specific API behaviors to "clockrequest" namespace The API's behavior when invoked from a callback function is impossible to document. Move the special behavior into a distinct namespace, "clockrequest". - Add a 'struct clockrequest'. Basically a stripped-down 'struct clockintr' for exclusive use during clockintr_dispatch(). - In clockintr_queue, replace the "cq_shadow" clockintr with a "cq_request" clockrequest. They serve the same purpose. - CLST_SHADOW_PENDING -> CR_RESCHEDULE; different namespace, same meaning. - CLST_IGNORE_SHADOW -> CLST_IGNORE_REQUEST; same meaning. - Move shadow branch in clockintr_advance() to clockrequest_advance(). - clockintr_request_random() becomes clockrequest_advance_random(). - Delete dead shadow branches in clockintr_cancel(), clockintr_schedule(). - Callback functions now get a clockrequest pointer instead of a special clockintr pointer: update all prototypes, callers. No functional change intended. --- sys/kern/kern_clock.c | 8 +-- sys/kern/kern_clockintr.c | 101 +++++++++++++++++++------------------- sys/kern/kern_time.c | 6 +-- sys/kern/sched_bsd.c | 6 +-- sys/kern/subr_prof.c | 12 ++--- sys/sys/clockintr.h | 31 +++++++++--- sys/sys/resourcevar.h | 7 ++- sys/sys/sched.h | 5 +- sys/sys/systm.h | 6 +-- sys/sys/time.h | 6 +-- 10 files changed, 102 insertions(+), 86 deletions(-) diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index d11ac4fe901..e1669bafed4 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clock.c,v 1.120 2023/10/11 15:42:44 cheloha Exp $ */ +/* $OpenBSD: kern_clock.c,v 1.121 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */ /*- @@ -277,7 +277,7 @@ stopprofclock(struct process *pr) * do process and kernel statistics. */ void -statclock(struct clockintr *cl, void *cf, void *arg) +statclock(struct clockrequest *cr, void *cf, void *arg) { uint64_t count, i; struct clockframe *frame = cf; @@ -287,10 +287,10 @@ statclock(struct clockintr *cl, void *cf, void *arg) struct process *pr; if (statclock_is_randomized) { - count = clockintr_advance_random(cl, statclock_min, + count = clockrequest_advance_random(cr, statclock_min, statclock_mask); } else { - count = clockintr_advance(cl, statclock_avg); + count = clockrequest_advance(cr, statclock_avg); } if (CLKF_USERMODE(frame)) { diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index b8e45d4d24e..c9f5fd2cd49 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.61 2023/10/11 15:07:04 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.62 2023/10/17 00:04:02 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -31,7 +31,7 @@ #include #include -void clockintr_hardclock(struct clockintr *, void *, void *); +void clockintr_hardclock(struct clockrequest *, void *, void *); void clockintr_schedule_locked(struct clockintr *, uint64_t); void clockqueue_intrclock_install(struct clockintr_queue *, const struct intrclock *); @@ -164,8 +164,11 @@ clockintr_dispatch(void *frame) { uint64_t lateness, run = 0, start; struct cpu_info *ci = curcpu(); - struct clockintr *cl, *shadow; + struct clockintr *cl; struct clockintr_queue *cq = &ci->ci_queue; + struct clockrequest *request = &cq->cq_request; + void *arg; + void (*func)(struct clockrequest *, void *, void *); uint32_t ogen; if (cq->cq_dispatch != 0) @@ -204,28 +207,26 @@ clockintr_dispatch(void *frame) } /* - * This clockintr has expired. Initialize a shadow copy - * and execute it. + * This clockintr has expired. Execute it. */ clockqueue_pend_delete(cq, cl); - shadow = &cq->cq_shadow; - shadow->cl_expiration = cl->cl_expiration; - shadow->cl_arg = cl->cl_arg; - shadow->cl_func = cl->cl_func; + request->cr_expiration = cl->cl_expiration; + arg = cl->cl_arg; + func = cl->cl_func; cq->cq_running = cl; mtx_leave(&cq->cq_mtx); - shadow->cl_func(shadow, frame, shadow->cl_arg); + func(request, frame, arg); mtx_enter(&cq->cq_mtx); cq->cq_running = NULL; - if (ISSET(cl->cl_flags, CLST_IGNORE_SHADOW)) { - CLR(cl->cl_flags, CLST_IGNORE_SHADOW); - CLR(shadow->cl_flags, CLST_SHADOW_PENDING); + if (ISSET(cl->cl_flags, CLST_IGNORE_REQUEST)) { + CLR(cl->cl_flags, CLST_IGNORE_REQUEST); + CLR(request->cr_flags, CR_RESCHEDULE); } - if (ISSET(shadow->cl_flags, CLST_SHADOW_PENDING)) { - CLR(shadow->cl_flags, CLST_SHADOW_PENDING); - clockqueue_pend_insert(cq, cl, shadow->cl_expiration); + if (ISSET(request->cr_flags, CR_RESCHEDULE)) { + CLR(request->cr_flags, CR_RESCHEDULE); + clockqueue_pend_insert(cq, cl, request->cr_expiration); } run++; } @@ -274,35 +275,43 @@ clockintr_advance(struct clockintr *cl, uint64_t period) uint64_t count, expiration; struct clockintr_queue *cq = cl->cl_queue; - if (cl == &cq->cq_shadow) { - count = nsec_advance(&cl->cl_expiration, period, cq->cq_uptime); - SET(cl->cl_flags, CLST_SHADOW_PENDING); - } else { - mtx_enter(&cq->cq_mtx); - expiration = cl->cl_expiration; - count = nsec_advance(&expiration, period, nsecuptime()); - clockintr_schedule_locked(cl, expiration); - mtx_leave(&cq->cq_mtx); - } + mtx_enter(&cq->cq_mtx); + expiration = cl->cl_expiration; + count = nsec_advance(&expiration, period, nsecuptime()); + clockintr_schedule_locked(cl, expiration); + mtx_leave(&cq->cq_mtx); + return count; } uint64_t -clockintr_advance_random(struct clockintr *cl, uint64_t min, uint32_t mask) +clockrequest_advance(struct clockrequest *cr, uint64_t period) +{ + struct clockintr_queue *cq = cr->cr_queue; + + KASSERT(cr == &cq->cq_request); + + SET(cr->cr_flags, CR_RESCHEDULE); + return nsec_advance(&cr->cr_expiration, period, cq->cq_uptime); +} + +uint64_t +clockrequest_advance_random(struct clockrequest *cr, uint64_t min, + uint32_t mask) { uint64_t count = 0; - struct clockintr_queue *cq = cl->cl_queue; + struct clockintr_queue *cq = cr->cr_queue; uint32_t off; - KASSERT(cl == &cq->cq_shadow); + KASSERT(cr == &cq->cq_request); - while (cl->cl_expiration <= cq->cq_uptime) { + while (cr->cr_expiration <= cq->cq_uptime) { while ((off = (random() & mask)) == 0) continue; - cl->cl_expiration += min + off; + cr->cr_expiration += min + off; count++; } - SET(cl->cl_flags, CLST_SHADOW_PENDING); + SET(cr->cr_flags, CR_RESCHEDULE); return count; } @@ -312,11 +321,6 @@ clockintr_cancel(struct clockintr *cl) struct clockintr_queue *cq = cl->cl_queue; int was_next; - if (cl == &cq->cq_shadow) { - CLR(cl->cl_flags, CLST_SHADOW_PENDING); - return; - } - mtx_enter(&cq->cq_mtx); if (ISSET(cl->cl_flags, CLST_PENDING)) { was_next = cl == TAILQ_FIRST(&cq->cq_pend); @@ -329,13 +333,13 @@ clockintr_cancel(struct clockintr *cl) } } if (cl == cq->cq_running) - SET(cl->cl_flags, CLST_IGNORE_SHADOW); + SET(cl->cl_flags, CLST_IGNORE_REQUEST); mtx_leave(&cq->cq_mtx); } struct clockintr * clockintr_establish(struct cpu_info *ci, - void (*func)(struct clockintr *, void *, void *), void *arg) + void (*func)(struct clockrequest *, void *, void *), void *arg) { struct clockintr *cl; struct clockintr_queue *cq = &ci->ci_queue; @@ -358,14 +362,9 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration) { struct clockintr_queue *cq = cl->cl_queue; - if (cl == &cq->cq_shadow) { - cl->cl_expiration = expiration; - SET(cl->cl_flags, CLST_SHADOW_PENDING); - } else { - mtx_enter(&cq->cq_mtx); - clockintr_schedule_locked(cl, expiration); - mtx_leave(&cq->cq_mtx); - } + mtx_enter(&cq->cq_mtx); + clockintr_schedule_locked(cl, expiration); + mtx_leave(&cq->cq_mtx); } void @@ -385,7 +384,7 @@ clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration) } } if (cl == cq->cq_running) - SET(cl->cl_flags, CLST_IGNORE_SHADOW); + SET(cl->cl_flags, CLST_IGNORE_REQUEST); } void @@ -404,11 +403,11 @@ clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t numer, } void -clockintr_hardclock(struct clockintr *cl, void *frame, void *arg) +clockintr_hardclock(struct clockrequest *cr, void *frame, void *arg) { uint64_t count, i; - count = clockintr_advance(cl, hardclock_period); + count = clockrequest_advance(cr, hardclock_period); for (i = 0; i < count; i++) hardclock(frame); } @@ -419,7 +418,7 @@ clockqueue_init(struct clockintr_queue *cq) if (ISSET(cq->cq_flags, CQ_INIT)) return; - cq->cq_shadow.cl_queue = cq; + cq->cq_request.cr_queue = cq; mtx_init(&cq->cq_mtx, IPL_CLOCK); TAILQ_INIT(&cq->cq_all); TAILQ_INIT(&cq->cq_pend); diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c index 1687ee61d14..052c6316fd4 100644 --- a/sys/kern/kern_time.c +++ b/sys/kern/kern_time.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_time.c,v 1.166 2023/09/10 03:08:05 cheloha Exp $ */ +/* $OpenBSD: kern_time.c,v 1.167 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */ /* @@ -755,7 +755,7 @@ itimerdecr(struct itimerspec *itp, const struct timespec *decrement) } void -itimer_update(struct clockintr *cl, void *cf, void *arg) +itimer_update(struct clockrequest *cr, void *cf, void *arg) { struct timespec elapsed; uint64_t nsecs; @@ -770,7 +770,7 @@ itimer_update(struct clockintr *cl, void *cf, void *arg) if (!ISSET(pr->ps_flags, PS_ITIMER)) return; - nsecs = clockintr_advance(cl, hardclock_period) * hardclock_period; + nsecs = clockrequest_advance(cr, hardclock_period) * hardclock_period; NSEC_TO_TIMESPEC(nsecs, &elapsed); mtx_enter(&itimer_mtx); diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c index 8f1db1ec3b0..82fb73f6d62 100644 --- a/sys/kern/sched_bsd.c +++ b/sys/kern/sched_bsd.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sched_bsd.c,v 1.88 2023/10/11 15:42:44 cheloha Exp $ */ +/* $OpenBSD: sched_bsd.c,v 1.89 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /*- @@ -83,13 +83,13 @@ struct loadavg averunnable; * Force switch among equal priority processes every 100ms. */ void -roundrobin(struct clockintr *cl, void *cf, void *arg) +roundrobin(struct clockrequest *cr, void *cf, void *arg) { uint64_t count; struct cpu_info *ci = curcpu(); struct schedstate_percpu *spc = &ci->ci_schedstate; - count = clockintr_advance(cl, roundrobin_period); + count = clockrequest_advance(cr, roundrobin_period); if (ci->ci_curproc != NULL) { if (spc->spc_schedflags & SPCF_SEENRR || count >= 2) { diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index 52292b30481..19eb3cc6fdd 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_prof.c,v 1.39 2023/10/11 15:42:44 cheloha Exp $ */ +/* $OpenBSD: subr_prof.c,v 1.40 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */ /*- @@ -64,7 +64,7 @@ u_int gmon_cpu_count; /* [K] number of CPUs with profiling enabled */ extern char etext[]; -void gmonclock(struct clockintr *, void *, void *); +void gmonclock(struct clockrequest *, void *, void *); void prof_init(void) @@ -236,14 +236,14 @@ sysctl_doprof(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, } void -gmonclock(struct clockintr *cl, void *cf, void *arg) +gmonclock(struct clockrequest *cr, void *cf, void *arg) { uint64_t count; struct clockframe *frame = cf; struct gmonparam *g = curcpu()->ci_gmon; u_long i; - count = clockintr_advance(cl, profclock_period); + count = clockrequest_advance(cr, profclock_period); if (count > ULONG_MAX) count = ULONG_MAX; @@ -307,13 +307,13 @@ sys_profil(struct proc *p, void *v, register_t *retval) } void -profclock(struct clockintr *cl, void *cf, void *arg) +profclock(struct clockrequest *cr, void *cf, void *arg) { uint64_t count; struct clockframe *frame = cf; struct proc *p = curproc; - count = clockintr_advance(cl, profclock_period); + count = clockrequest_advance(cr, profclock_period); if (count > ULONG_MAX) count = ULONG_MAX; diff --git a/sys/sys/clockintr.h b/sys/sys/clockintr.h index e9987f54175..967e0f9f95c 100644 --- a/sys/sys/clockintr.h +++ b/sys/sys/clockintr.h @@ -1,4 +1,4 @@ -/* $OpenBSD: clockintr.h,v 1.22 2023/10/11 15:07:04 cheloha Exp $ */ +/* $OpenBSD: clockintr.h,v 1.23 2023/10/17 00:04:02 cheloha Exp $ */ /* * Copyright (c) 2020-2022 Scott Cheloha * @@ -35,6 +35,7 @@ struct clockintr_stat { #include #include +struct clockrequest; struct cpu_info; /* @@ -61,14 +62,29 @@ struct clockintr { TAILQ_ENTRY(clockintr) cl_alink; /* [m] cq_all glue */ TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */ void *cl_arg; /* [I] argument */ - void (*cl_func)(struct clockintr *, void *, void *); /* [I] callback */ + void (*cl_func)(struct clockrequest *, void*, void*); /* [I] callback */ struct clockintr_queue *cl_queue; /* [I] parent queue */ uint32_t cl_flags; /* [m] CLST_* flags */ }; #define CLST_PENDING 0x00000001 /* scheduled to run */ -#define CLST_SHADOW_PENDING 0x00000002 /* shadow is scheduled to run */ -#define CLST_IGNORE_SHADOW 0x00000004 /* ignore shadow copy */ +#define CLST_IGNORE_REQUEST 0x00000002 /* ignore callback requests */ + +/* + * Interface for callback rescheduling requests. + * + * Struct member protections: + * + * I Immutable after initialization. + * o Owned by a single CPU. + */ +struct clockrequest { + uint64_t cr_expiration; /* [o] copy of dispatch time */ + struct clockintr_queue *cr_queue; /* [I] enclosing queue */ + uint32_t cr_flags; /* [o] CR_* flags */ +}; + +#define CR_RESCHEDULE 0x00000001 /* reschedule upon return */ /* * Per-CPU clock interrupt state. @@ -81,7 +97,7 @@ struct clockintr { * o Owned by a single CPU. */ struct clockintr_queue { - struct clockintr cq_shadow; /* [o] copy of running clockintr */ + struct clockrequest cq_request; /* [o] callback request object */ struct mutex cq_mtx; /* [a] per-queue mutex */ uint64_t cq_uptime; /* [o] cached uptime */ TAILQ_HEAD(, clockintr) cq_all; /* [m] established clockintr list */ @@ -108,12 +124,13 @@ void clockintr_trigger(void); */ uint64_t clockintr_advance(struct clockintr *, uint64_t); -uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t); void clockintr_cancel(struct clockintr *); struct clockintr *clockintr_establish(struct cpu_info *, - void (*)(struct clockintr *, void *, void *), void *); + void (*)(struct clockrequest *, void *, void *), void *); void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t); +uint64_t clockrequest_advance(struct clockrequest *, uint64_t); +uint64_t clockrequest_advance_random(struct clockrequest *, uint64_t, uint32_t); void clockqueue_init(struct clockintr_queue *); int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t); diff --git a/sys/sys/resourcevar.h b/sys/sys/resourcevar.h index b8dff124920..07201edcff7 100644 --- a/sys/sys/resourcevar.h +++ b/sys/sys/resourcevar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: resourcevar.h,v 1.30 2023/10/11 15:42:44 cheloha Exp $ */ +/* $OpenBSD: resourcevar.h,v 1.31 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: resourcevar.h,v 1.12 1995/11/22 23:01:53 cgd Exp $ */ /* @@ -60,13 +60,12 @@ do { \ #include /* for KASSERT() */ -struct clockintr; - extern uint64_t profclock_period; void addupc_intr(struct proc *, u_long, u_long); void addupc_task(struct proc *, u_long, u_int); -void profclock(struct clockintr *, void *, void *); +struct clockrequest; +void profclock(struct clockrequest *, void *, void *); void tuagg_locked(struct process *, struct proc *, const struct timespec *); void tuagg(struct process *, struct proc *); struct tusage; diff --git a/sys/sys/sched.h b/sys/sys/sched.h index 6f4cbb66aef..10d7d40ce49 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -1,4 +1,4 @@ -/* $OpenBSD: sched.h,v 1.65 2023/10/11 15:42:44 cheloha Exp $ */ +/* $OpenBSD: sched.h,v 1.66 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */ /*- @@ -150,7 +150,8 @@ extern uint64_t roundrobin_period; struct proc; void schedclock(struct proc *); -void roundrobin(struct clockintr *, void *, void *); +struct clockrequest; +void roundrobin(struct clockrequest *, void *, void *); void scheduler_start(void); void userret(struct proc *p); diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 42a1c9ffac6..dfea5999881 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: systm.h,v 1.168 2023/10/11 15:42:44 cheloha Exp $ */ +/* $OpenBSD: systm.h,v 1.169 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: systm.h,v 1.50 1996/06/09 04:55:09 briggs Exp $ */ /*- @@ -240,8 +240,8 @@ extern int statclock_is_randomized; struct clockframe; void hardclock(struct clockframe *); -struct clockintr; -void statclock(struct clockintr *, void *, void *); +struct clockrequest; +void statclock(struct clockrequest *, void *, void *); void initclocks(void); void inittodr(time_t); diff --git a/sys/sys/time.h b/sys/sys/time.h index cef9583c880..418380277e4 100644 --- a/sys/sys/time.h +++ b/sys/sys/time.h @@ -1,4 +1,4 @@ -/* $OpenBSD: time.h,v 1.65 2023/09/10 03:08:05 cheloha Exp $ */ +/* $OpenBSD: time.h,v 1.66 2023/10/17 00:04:02 cheloha Exp $ */ /* $NetBSD: time.h,v 1.18 1996/04/23 10:29:33 mycroft Exp $ */ /* @@ -330,8 +330,8 @@ uint64_t getnsecuptime(void); struct proc; int clock_gettime(struct proc *, clockid_t, struct timespec *); -struct clockintr; -void itimer_update(struct clockintr *, void *, void *); +struct clockrequest; +void itimer_update(struct clockrequest *, void *, void *); void cancel_all_itimers(void); int settime(const struct timespec *); -- 2.20.1