From a3d0b92bc4266f2023b7d0053a62769db183702f Mon Sep 17 00:00:00 2001 From: cheloha Date: Sun, 2 Jul 2023 00:55:18 +0000 Subject: [PATCH] clockintr_cpu_init: stagger clock interrupts by MAXCPUS During clockintr_cpu_init(), we can't stagger by ncpus because not every platform has fully incremented it yet. Instead, stagger by MAXCPUS. The resulting intervals are smaller, but are probably still sufficiently large to avoid aggravating lock contention, even on platforms where MAXCPUS is large. While here, don't bother staggering the statclock if it is randomized. With input from claudio@. --- sys/kern/kern_clockintr.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index 21fad46338b..187898caf07 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.25 2023/06/22 16:23:50 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.26 2023/07/02 00:55:18 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -107,7 +107,7 @@ clockintr_init(u_int flags) void clockintr_cpu_init(const struct intrclock *ic) { - uint64_t multiplier = 0, offset; + uint64_t multiplier = 0; struct cpu_info *ci = curcpu(); struct clockintr_queue *cq = &ci->ci_queue; int reset_cq_intrclock = 0; @@ -170,21 +170,28 @@ clockintr_cpu_init(const struct intrclock *ic) clockintr_advance(cq->cq_hardclock, hardclock_period); } else { if (cq->cq_hardclock->cl_expiration == 0) { - offset = hardclock_period / ncpus * multiplier; - cq->cq_hardclock->cl_expiration = offset; + clockintr_stagger(cq->cq_hardclock, hardclock_period, + multiplier, MAXCPUS); } clockintr_advance(cq->cq_hardclock, hardclock_period); } /* * We can always advance the statclock and schedclock. + * There is no reason to stagger a randomized statclock. */ - offset = statclock_avg / ncpus * multiplier; - clockintr_schedule(cq->cq_statclock, offset); + if (!ISSET(clockintr_flags, CL_RNDSTAT)) { + if (cq->cq_statclock->cl_expiration == 0) { + clockintr_stagger(cq->cq_statclock, statclock_avg, + multiplier, MAXCPUS); + } + } clockintr_advance(cq->cq_statclock, statclock_avg); if (schedhz != 0) { - offset = schedclock_period / ncpus * multiplier; - clockintr_schedule(cq->cq_schedclock, offset); + if (cq->cq_schedclock->cl_expiration == 0) { + clockintr_stagger(cq->cq_schedclock, schedclock_period, + multiplier, MAXCPUS); + } clockintr_advance(cq->cq_schedclock, schedclock_period); } -- 2.20.1