-/* $OpenBSD: kern_clockintr.c,v 1.50 2023/09/14 20:58:51 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.51 2023/09/14 22:07:11 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
if (ic != NULL)
clockqueue_intrclock_install(cq, ic);
- /* TODO: Remove these from struct clockintr_queue. */
+ /* TODO: Remove this from struct clockintr_queue. */
if (cq->cq_hardclock == NULL) {
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock,
NULL);
if (cq->cq_hardclock == NULL)
panic("%s: failed to establish hardclock", __func__);
}
- if (cq->cq_statclock == NULL) {
- cq->cq_statclock = clockintr_establish(ci, statclock, NULL);
- if (cq->cq_statclock == NULL)
- panic("%s: failed to establish statclock", __func__);
- }
/*
* Mask CQ_INTRCLOCK while we're advancing the internal clock
* stagger a randomized statclock.
*/
if (!statclock_is_randomized) {
- if (cq->cq_statclock->cl_expiration == 0) {
- clockintr_stagger(cq->cq_statclock, statclock_avg,
+ if (spc->spc_statclock->cl_expiration == 0) {
+ clockintr_stagger(spc->spc_statclock, statclock_avg,
multiplier, MAXCPUS);
}
}
- clockintr_advance(cq->cq_statclock, statclock_avg);
+ clockintr_advance(spc->spc_statclock, statclock_avg);
/*
* XXX Need to find a better place to do this. We can't do it in
-/* $OpenBSD: kern_sched.c,v 1.90 2023/09/10 03:08:05 cheloha Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.91 2023/09/14 22:07:11 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
spc->spc_roundrobin = clockintr_establish(ci, roundrobin, NULL);
if (spc->spc_roundrobin == NULL)
panic("%s: clockintr_establish roundrobin", __func__);
+ spc->spc_statclock = clockintr_establish(ci, statclock, NULL);
+ if (spc->spc_statclock == NULL)
+ panic("%s: clockintr_establish statclock", __func__);
kthread_create_deferred(sched_kthreads_create, ci);
-/* $OpenBSD: clockintr.h,v 1.15 2023/09/14 19:51:18 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.16 2023/09/14 22:07:11 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
struct clockintr *cq_running; /* [m] running clockintr */
struct clockintr *cq_hardclock; /* [o] hardclock handle */
- struct clockintr *cq_statclock; /* [o] statclock handle */
struct intrclock cq_intrclock; /* [I] local interrupt clock */
struct clockintr_stat cq_stat; /* [o] dispatch statistics */
volatile uint32_t cq_gen; /* [o] cq_stat update generation */
-/* $OpenBSD: sched.h,v 1.63 2023/09/10 03:08:05 cheloha Exp $ */
+/* $OpenBSD: sched.h,v 1.64 2023/09/14 22:07:11 cheloha Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
struct clockintr *spc_itimer; /* [o] itimer_update handle */
struct clockintr *spc_profclock; /* [o] profclock handle */
struct clockintr *spc_roundrobin; /* [o] roundrobin handle */
+ struct clockintr *spc_statclock; /* [o] statclock handle */
u_int spc_nrun; /* procs on the run queues */