From 1d970828bcdfaa6f1de83da4a110fd1536b535fe Mon Sep 17 00:00:00 2001 From: cheloha Date: Wed, 24 Jan 2024 19:23:38 +0000 Subject: [PATCH] clockintr: switch from callee- to caller-allocated clockintr structs Currently, clockintr_establish() calls malloc(9) to allocate a clockintr struct on behalf of the caller. mpi@ says this behavior is incompatible with dt(4). In particular, calling malloc(9) during the initialization of a PCB outside of dt_pcb_alloc() is (a) awkward and (b) may conflict with future changes/optimizations to PCB allocation. To side-step the problem, this patch changes the clockintr subsystem to use caller-allocated clockintr structs instead of callee-allocated structs. clockintr_establish() is named after softintr_establish(), which uses malloc(9) internally to create softintr objects. The clockintr subsystem is no longer using malloc(9), so the "establish" naming is no longer apt. To avoid confusion, this patch also renames "clockintr_establish" to "clockintr_bind". Requested by mpi@. Tweaked by mpi@. Thread: https://marc.info/?l=openbsd-tech&m=170597126103504&w=2 ok claudio@ mlarkin@ mpi@ --- sys/arch/alpha/include/cpu.h | 4 +-- sys/arch/amd64/include/cpu.h | 4 +-- sys/arch/arm/include/cpu.h | 4 +-- sys/arch/arm64/include/cpu.h | 4 +-- sys/arch/hppa/include/cpu.h | 4 +-- sys/arch/i386/include/cpu.h | 4 +-- sys/arch/m88k/include/cpu.h | 4 +-- sys/arch/mips64/include/cpu.h | 4 +-- sys/arch/powerpc/include/cpu.h | 4 +-- sys/arch/riscv64/include/cpu.h | 4 +-- sys/arch/sh/include/cpu.h | 4 +-- sys/arch/sparc64/include/cpu.h | 4 +-- sys/kern/kern_clockintr.c | 49 +++++++++++++++------------------- sys/kern/kern_fork.c | 6 ++--- sys/kern/kern_sched.c | 22 +++++---------- sys/kern/sched_bsd.c | 10 +++---- sys/kern/subr_prof.c | 17 +++++------- sys/sys/clockintr.h | 8 +++--- sys/sys/sched.h | 12 ++++----- 19 files changed, 77 insertions(+), 95 deletions(-) diff --git a/sys/arch/alpha/include/cpu.h b/sys/arch/alpha/include/cpu.h index b2a438f69e7..e03cd31325b 100644 --- a/sys/arch/alpha/include/cpu.h +++ b/sys/arch/alpha/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.70 2023/07/25 18:16:19 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.71 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.45 2000/08/21 02:03:12 thorpej Exp $ */ /*- @@ -212,7 +212,7 @@ struct cpu_info { #endif #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif struct clockintr_queue ci_queue; char ci_panicbuf[512]; diff --git a/sys/arch/amd64/include/cpu.h b/sys/arch/amd64/include/cpu.h index 9069c3df8a8..6c7ae1de4df 100644 --- a/sys/arch/amd64/include/cpu.h +++ b/sys/arch/amd64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.159 2023/08/23 01:55:46 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.160 2024/01/24 19:23:39 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */ /*- @@ -208,7 +208,7 @@ struct cpu_info { u_int64_t ci_hz_aperf; #if defined(GPROF) || defined(DDBPROF) struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif u_int32_t ci_vmm_flags; #define CI_VMM_VMX (1 << 0) diff --git a/sys/arch/arm/include/cpu.h b/sys/arch/arm/include/cpu.h index 1ced238694e..aa02a42fc2d 100644 --- a/sys/arch/arm/include/cpu.h +++ b/sys/arch/arm/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.64 2023/08/23 01:55:46 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.65 2024/01/24 19:23:39 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.34 2003/06/23 11:01:08 martin Exp $ */ /* @@ -198,7 +198,7 @@ struct cpu_info { #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif struct clockintr_queue ci_queue; char ci_panicbuf[512]; diff --git a/sys/arch/arm64/include/cpu.h b/sys/arch/arm64/include/cpu.h index 37ae5c40279..937f5fa0e41 100644 --- a/sys/arch/arm64/include/cpu.h +++ b/sys/arch/arm64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.41 2024/01/15 11:58:45 kettenis Exp $ */ +/* $OpenBSD: cpu.h,v 1.42 2024/01/24 19:23:39 cheloha Exp $ */ /* * Copyright (c) 2016 Dale Rahn * @@ -174,7 +174,7 @@ struct cpu_info { #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif struct clockintr_queue ci_queue; char ci_panicbuf[512]; diff --git a/sys/arch/hppa/include/cpu.h b/sys/arch/hppa/include/cpu.h index cbb6e403c40..092bc6e7447 100644 --- a/sys/arch/hppa/include/cpu.h +++ b/sys/arch/hppa/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.102 2023/11/05 16:33:50 miod Exp $ */ +/* $OpenBSD: cpu.h,v 1.103 2024/01/24 19:23:39 cheloha Exp $ */ /* * Copyright (c) 2000-2004 Michael Shalayeff @@ -113,7 +113,7 @@ struct cpu_info { #endif #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif struct clockintr_queue ci_queue; char ci_panicbuf[512]; diff --git a/sys/arch/i386/include/cpu.h b/sys/arch/i386/include/cpu.h index 297344bace9..82da25e54e8 100644 --- a/sys/arch/i386/include/cpu.h +++ b/sys/arch/i386/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.183 2023/08/23 01:55:46 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.184 2024/01/24 19:23:39 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.35 1996/05/05 19:29:26 christos Exp $ */ /*- @@ -168,7 +168,7 @@ struct cpu_info { struct ksensor ci_sensor; #if defined(GPROF) || defined(DDBPROF) struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif struct clockintr_queue ci_queue; char ci_panicbuf[512]; diff --git a/sys/arch/m88k/include/cpu.h b/sys/arch/m88k/include/cpu.h index 578e71414d1..29644e94ad8 100644 --- a/sys/arch/m88k/include/cpu.h +++ b/sys/arch/m88k/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.73 2023/07/25 18:16:20 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.74 2024/01/24 19:23:39 cheloha Exp $ */ /* * Copyright (c) 1996 Nivas Madhur * Copyright (c) 1992, 1993 @@ -177,7 +177,7 @@ struct cpu_info { #endif #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif struct clockintr_queue ci_queue; char ci_panicbuf[512]; diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h index d647dbb1964..c7ea46300c1 100644 --- a/sys/arch/mips64/include/cpu.h +++ b/sys/arch/mips64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.144 2023/08/23 01:55:47 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.145 2024/01/24 19:23:39 cheloha Exp $ */ /*- * Copyright (c) 1992, 1993 @@ -200,7 +200,7 @@ struct cpu_info { #endif #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif char ci_panicbuf[512]; }; diff --git a/sys/arch/powerpc/include/cpu.h b/sys/arch/powerpc/include/cpu.h index 4eee78a2af3..d6fd15c7027 100644 --- a/sys/arch/powerpc/include/cpu.h +++ b/sys/arch/powerpc/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.75 2023/07/25 18:16:20 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.76 2024/01/24 19:23:39 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $ */ /* @@ -89,7 +89,7 @@ struct cpu_info { #endif #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif char ci_panicbuf[512]; }; diff --git a/sys/arch/riscv64/include/cpu.h b/sys/arch/riscv64/include/cpu.h index 7ebbeeb1900..282cece0448 100644 --- a/sys/arch/riscv64/include/cpu.h +++ b/sys/arch/riscv64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.19 2023/09/19 19:20:33 kettenis Exp $ */ +/* $OpenBSD: cpu.h,v 1.20 2024/01/24 19:23:39 cheloha Exp $ */ /* * Copyright (c) 2019 Mike Larkin @@ -124,7 +124,7 @@ struct cpu_info { #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif char ci_panicbuf[512]; diff --git a/sys/arch/sh/include/cpu.h b/sys/arch/sh/include/cpu.h index a080751e833..1d3ec9a33ea 100644 --- a/sys/arch/sh/include/cpu.h +++ b/sys/arch/sh/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.35 2023/07/25 18:16:21 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.36 2024/01/24 19:23:39 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.41 2006/01/21 04:24:12 uwe Exp $ */ /*- @@ -68,7 +68,7 @@ struct cpu_info { #endif #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif int ci_want_resched; diff --git a/sys/arch/sparc64/include/cpu.h b/sys/arch/sparc64/include/cpu.h index 995203c5633..212e27e1436 100644 --- a/sys/arch/sparc64/include/cpu.h +++ b/sys/arch/sparc64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.102 2023/07/25 18:16:21 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.103 2024/01/24 19:23:39 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.28 2001/06/14 22:56:58 thorpej Exp $ */ /* @@ -165,7 +165,7 @@ struct cpu_info { #endif #ifdef GPROF struct gmonparam *ci_gmon; - struct clockintr *ci_gmonclock; + struct clockintr ci_gmonclock; #endif char ci_panicbuf[512]; }; diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index 6e771a043ba..f52ddc37f68 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.63 2024/01/15 01:15:37 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.64 2024/01/24 19:23:38 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -62,11 +62,9 @@ clockintr_cpu_init(const struct intrclock *ic) clockqueue_intrclock_install(cq, ic); /* TODO: Remove this from struct clockintr_queue. */ - if (cq->cq_hardclock == NULL) { - cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock, + if (cq->cq_hardclock.cl_expiration == 0) { + clockintr_bind(&cq->cq_hardclock, ci, clockintr_hardclock, NULL); - if (cq->cq_hardclock == NULL) - panic("%s: failed to establish hardclock", __func__); } /* @@ -96,16 +94,16 @@ clockintr_cpu_init(const struct intrclock *ic) * behalf. */ if (CPU_IS_PRIMARY(ci)) { - if (cq->cq_hardclock->cl_expiration == 0) - clockintr_schedule(cq->cq_hardclock, 0); + if (cq->cq_hardclock.cl_expiration == 0) + clockintr_schedule(&cq->cq_hardclock, 0); else - clockintr_advance(cq->cq_hardclock, hardclock_period); + clockintr_advance(&cq->cq_hardclock, hardclock_period); } else { - if (cq->cq_hardclock->cl_expiration == 0) { - clockintr_stagger(cq->cq_hardclock, hardclock_period, + if (cq->cq_hardclock.cl_expiration == 0) { + clockintr_stagger(&cq->cq_hardclock, hardclock_period, multiplier, MAXCPUS); } - clockintr_advance(cq->cq_hardclock, hardclock_period); + clockintr_advance(&cq->cq_hardclock, hardclock_period); } /* @@ -113,30 +111,30 @@ clockintr_cpu_init(const struct intrclock *ic) * stagger a randomized statclock. */ if (!statclock_is_randomized) { - if (spc->spc_statclock->cl_expiration == 0) { - clockintr_stagger(spc->spc_statclock, statclock_avg, + if (spc->spc_statclock.cl_expiration == 0) { + clockintr_stagger(&spc->spc_statclock, statclock_avg, multiplier, MAXCPUS); } } - clockintr_advance(spc->spc_statclock, statclock_avg); + clockintr_advance(&spc->spc_statclock, statclock_avg); /* * XXX Need to find a better place to do this. We can't do it in * sched_init_cpu() because initclocks() runs after it. */ - if (spc->spc_itimer->cl_expiration == 0) { - clockintr_stagger(spc->spc_itimer, hardclock_period, + if (spc->spc_itimer.cl_expiration == 0) { + clockintr_stagger(&spc->spc_itimer, hardclock_period, multiplier, MAXCPUS); } - if (spc->spc_profclock->cl_expiration == 0) { - clockintr_stagger(spc->spc_profclock, profclock_period, + if (spc->spc_profclock.cl_expiration == 0) { + clockintr_stagger(&spc->spc_profclock, profclock_period, multiplier, MAXCPUS); } - if (spc->spc_roundrobin->cl_expiration == 0) { - clockintr_stagger(spc->spc_roundrobin, hardclock_period, + if (spc->spc_roundrobin.cl_expiration == 0) { + clockintr_stagger(&spc->spc_roundrobin, hardclock_period, multiplier, MAXCPUS); } - clockintr_advance(spc->spc_roundrobin, roundrobin_period); + clockintr_advance(&spc->spc_roundrobin, roundrobin_period); if (reset_cq_intrclock) SET(cq->cq_flags, CQ_INTRCLOCK); @@ -337,16 +335,12 @@ clockintr_cancel(struct clockintr *cl) mtx_leave(&cq->cq_mtx); } -struct clockintr * -clockintr_establish(struct cpu_info *ci, +void +clockintr_bind(struct clockintr *cl, struct cpu_info *ci, void (*func)(struct clockrequest *, void *, void *), void *arg) { - struct clockintr *cl; struct clockintr_queue *cq = &ci->ci_queue; - cl = malloc(sizeof *cl, M_DEVBUF, M_NOWAIT | M_ZERO); - if (cl == NULL) - return NULL; cl->cl_arg = arg; cl->cl_func = func; cl->cl_queue = cq; @@ -354,7 +348,6 @@ clockintr_establish(struct cpu_info *ci, mtx_enter(&cq->cq_mtx); TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink); mtx_leave(&cq->cq_mtx); - return cl; } void diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index b7b93d86556..90acfdc09b2 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_fork.c,v 1.256 2024/01/19 01:43:26 bluhm Exp $ */ +/* $OpenBSD: kern_fork.c,v 1.257 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ /* @@ -704,11 +704,11 @@ proc_trampoline_mi(void) /* Start any optional clock interrupts needed by the thread. */ if (ISSET(p->p_p->ps_flags, PS_ITIMER)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_advance(spc->spc_itimer, hardclock_period); + clockintr_advance(&spc->spc_itimer, hardclock_period); } if (ISSET(p->p_p->ps_flags, PS_PROFIL)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_advance(spc->spc_profclock, profclock_period); + clockintr_advance(&spc->spc_profclock, profclock_period); } nanouptime(&spc->spc_runtime); diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index b10a64c5e80..731c615284d 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sched.c,v 1.93 2023/10/24 13:20:11 claudio Exp $ */ +/* $OpenBSD: kern_sched.c,v 1.94 2024/01/24 19:23:38 cheloha Exp $ */ /* * Copyright (c) 2007, 2008 Artur Grabowski * @@ -88,18 +88,10 @@ sched_init_cpu(struct cpu_info *ci) spc->spc_idleproc = NULL; - spc->spc_itimer = clockintr_establish(ci, itimer_update, NULL); - if (spc->spc_itimer == NULL) - panic("%s: clockintr_establish itimer_update", __func__); - spc->spc_profclock = clockintr_establish(ci, profclock, NULL); - if (spc->spc_profclock == NULL) - panic("%s: clockintr_establish profclock", __func__); - spc->spc_roundrobin = clockintr_establish(ci, roundrobin, NULL); - if (spc->spc_roundrobin == NULL) - panic("%s: clockintr_establish roundrobin", __func__); - spc->spc_statclock = clockintr_establish(ci, statclock, NULL); - if (spc->spc_statclock == NULL) - panic("%s: clockintr_establish statclock", __func__); + clockintr_bind(&spc->spc_itimer, ci, itimer_update, NULL); + clockintr_bind(&spc->spc_profclock, ci, profclock, NULL); + clockintr_bind(&spc->spc_roundrobin, ci, roundrobin, NULL); + clockintr_bind(&spc->spc_statclock, ci, statclock, NULL); kthread_create_deferred(sched_kthreads_create, ci); @@ -244,11 +236,11 @@ sched_toidle(void) if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_cancel(spc->spc_itimer); + clockintr_cancel(&spc->spc_itimer); } if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_cancel(spc->spc_profclock); + clockintr_cancel(&spc->spc_profclock); } atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR); diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c index 82fb73f6d62..89d58c6528a 100644 --- a/sys/kern/sched_bsd.c +++ b/sys/kern/sched_bsd.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sched_bsd.c,v 1.89 2023/10/17 00:04:02 cheloha Exp $ */ +/* $OpenBSD: sched_bsd.c,v 1.90 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /*- @@ -396,11 +396,11 @@ mi_switch(void) /* Stop any optional clock interrupts. */ if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_cancel(spc->spc_itimer); + clockintr_cancel(&spc->spc_itimer); } if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) { atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_cancel(spc->spc_profclock); + clockintr_cancel(&spc->spc_profclock); } /* @@ -451,11 +451,11 @@ mi_switch(void) /* Start any optional clock interrupts needed by the thread. */ if (ISSET(p->p_p->ps_flags, PS_ITIMER)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER); - clockintr_advance(spc->spc_itimer, hardclock_period); + clockintr_advance(&spc->spc_itimer, hardclock_period); } if (ISSET(p->p_p->ps_flags, PS_PROFIL)) { atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); - clockintr_advance(spc->spc_profclock, profclock_period); + clockintr_advance(&spc->spc_profclock, profclock_period); } nanouptime(&spc->spc_runtime); diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index 19eb3cc6fdd..906c15d9706 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_prof.c,v 1.40 2023/10/17 00:04:02 cheloha Exp $ */ +/* $OpenBSD: subr_prof.c,v 1.41 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */ /*- @@ -101,19 +101,16 @@ prof_init(void) /* Allocate and initialize one profiling buffer per CPU. */ CPU_INFO_FOREACH(cii, ci) { - ci->ci_gmonclock = clockintr_establish(ci, gmonclock, NULL); - if (ci->ci_gmonclock == NULL) { - printf("%s: clockintr_establish gmonclock\n", __func__); - return; - } - clockintr_stagger(ci->ci_gmonclock, profclock_period, - CPU_INFO_UNIT(ci), MAXCPUS); cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait); if (cp == NULL) { printf("No memory for profiling.\n"); return; } + clockintr_bind(&ci->ci_gmonclock, ci, gmonclock, NULL); + clockintr_stagger(&ci->ci_gmonclock, profclock_period, + CPU_INFO_UNIT(ci), MAXCPUS); + p = (struct gmonparam *)cp; cp += sizeof(*p); p->tos = (struct tostruct *)cp; @@ -159,7 +156,7 @@ prof_state_toggle(struct cpu_info *ci, int oldstate) if (error == 0) { if (++gmon_cpu_count == 1) startprofclock(&process0); - clockintr_advance(ci->ci_gmonclock, profclock_period); + clockintr_advance(&ci->ci_gmonclock, profclock_period); } break; default: @@ -167,7 +164,7 @@ prof_state_toggle(struct cpu_info *ci, int oldstate) gp->state = GMON_PROF_OFF; /* FALLTHROUGH */ case GMON_PROF_OFF: - clockintr_cancel(ci->ci_gmonclock); + clockintr_cancel(&ci->ci_gmonclock); if (--gmon_cpu_count == 0) stopprofclock(&process0); #if !defined(GPROF) diff --git a/sys/sys/clockintr.h b/sys/sys/clockintr.h index 350694edcf3..b2955c9d15d 100644 --- a/sys/sys/clockintr.h +++ b/sys/sys/clockintr.h @@ -1,4 +1,4 @@ -/* $OpenBSD: clockintr.h,v 1.24 2024/01/15 01:15:37 cheloha Exp $ */ +/* $OpenBSD: clockintr.h,v 1.25 2024/01/24 19:23:38 cheloha Exp $ */ /* * Copyright (c) 2020-2022 Scott Cheloha * @@ -102,7 +102,7 @@ struct clockintr_queue { TAILQ_HEAD(, clockintr) cq_all; /* [m] established clockintr list */ TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */ struct clockintr *cq_running; /* [m] running clockintr */ - struct clockintr *cq_hardclock; /* [o] hardclock handle */ + struct clockintr cq_hardclock; /* [o] hardclock handle */ struct intrclock cq_intrclock; /* [I] local interrupt clock */ struct clockintr_stat cq_stat; /* [o] dispatch statistics */ volatile uint32_t cq_gen; /* [o] cq_stat update generation */ @@ -124,9 +124,9 @@ void clockintr_trigger(void); */ uint64_t clockintr_advance(struct clockintr *, uint64_t); -void clockintr_cancel(struct clockintr *); -struct clockintr *clockintr_establish(struct cpu_info *, +void clockintr_bind(struct clockintr *, struct cpu_info *, void (*)(struct clockrequest *, void *, void *), void *); +void clockintr_cancel(struct clockintr *); void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t); uint64_t clockrequest_advance(struct clockrequest *, uint64_t); diff --git a/sys/sys/sched.h b/sys/sys/sched.h index a2158fcea5d..ac6dad2a8bb 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -1,4 +1,4 @@ -/* $OpenBSD: sched.h,v 1.69 2024/01/14 17:23:56 cheloha Exp $ */ +/* $OpenBSD: sched.h,v 1.70 2024/01/24 19:23:38 cheloha Exp $ */ /* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */ /*- @@ -95,11 +95,11 @@ struct cpustats { #ifdef _KERNEL +#include #include #define SCHED_NQS 32 /* 32 run queues. */ -struct clockintr; struct smr_entry; /* @@ -115,10 +115,10 @@ struct schedstate_percpu { u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */ u_char spc_curpriority; /* usrpri of curproc */ - struct clockintr *spc_itimer; /* [o] itimer_update handle */ - struct clockintr *spc_profclock; /* [o] profclock handle */ - struct clockintr *spc_roundrobin; /* [o] roundrobin handle */ - struct clockintr *spc_statclock; /* [o] statclock handle */ + struct clockintr spc_itimer; /* [o] itimer_update handle */ + struct clockintr spc_profclock; /* [o] profclock handle */ + struct clockintr spc_roundrobin;/* [o] roundrobin handle */ + struct clockintr spc_statclock; /* [o] statclock handle */ u_int spc_nrun; /* procs on the run queues */ -- 2.20.1