From 2ee1c8eb6b992d8851615c557cb4ca6814d07259 Mon Sep 17 00:00:00 2001 From: cheloha Date: Fri, 21 Apr 2023 15:33:00 +0000 Subject: [PATCH] clockintr_cpu_init: avoid CQ_INIT flag when scheduling cq_hardclock The meaning of the CQ_INIT flag is about to change. Soon, we won't be able to use it to decide whether a given clockintr_cpu_init() call is the first on a given CPU. Instead, use the value of cl_expiration. If it's zero, we know this is the first clockintr_cpu_init() call on this CPU. --- sys/kern/kern_clockintr.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index af6198cb373..bc4e4686877 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.15 2023/04/21 03:03:50 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.16 2023/04/21 15:33:00 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -169,10 +169,18 @@ clockintr_cpu_init(const struct intrclock *ic) * the global tick value is advanced during inittodr(9) on our * behalf. */ - offset = hardclock_period / ncpus * multiplier; - clockintr_schedule(cq->cq_hardclock, offset); - if (!CPU_IS_PRIMARY(ci) || ISSET(cq->cq_flags, CQ_INIT)) + if (CPU_IS_PRIMARY(ci)) { + if (cq->cq_hardclock->cl_expiration == 0) + clockintr_schedule(cq->cq_hardclock, 0); + else + clockintr_advance(cq->cq_hardclock, hardclock_period); + } else { + if (cq->cq_hardclock->cl_expiration == 0) { + offset = hardclock_period / ncpus * multiplier; + cq->cq_hardclock->cl_expiration = offset; + } clockintr_advance(cq->cq_hardclock, hardclock_period); + } /* * We can always advance the statclock and schedclock. -- 2.20.1