-/* $OpenBSD: kern_clockintr.c,v 1.25 2023/06/22 16:23:50 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.26 2023/07/02 00:55:18 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
void
clockintr_cpu_init(const struct intrclock *ic)
{
- uint64_t multiplier = 0, offset;
+ uint64_t multiplier = 0;
struct cpu_info *ci = curcpu();
struct clockintr_queue *cq = &ci->ci_queue;
int reset_cq_intrclock = 0;
clockintr_advance(cq->cq_hardclock, hardclock_period);
} else {
if (cq->cq_hardclock->cl_expiration == 0) {
- offset = hardclock_period / ncpus * multiplier;
- cq->cq_hardclock->cl_expiration = offset;
+ clockintr_stagger(cq->cq_hardclock, hardclock_period,
+ multiplier, MAXCPUS);
}
clockintr_advance(cq->cq_hardclock, hardclock_period);
}
/*
* We can always advance the statclock and schedclock.
+ * There is no reason to stagger a randomized statclock.
*/
- offset = statclock_avg / ncpus * multiplier;
- clockintr_schedule(cq->cq_statclock, offset);
+ if (!ISSET(clockintr_flags, CL_RNDSTAT)) {
+ if (cq->cq_statclock->cl_expiration == 0) {
+ clockintr_stagger(cq->cq_statclock, statclock_avg,
+ multiplier, MAXCPUS);
+ }
+ }
clockintr_advance(cq->cq_statclock, statclock_avg);
if (schedhz != 0) {
- offset = schedclock_period / ncpus * multiplier;
- clockintr_schedule(cq->cq_schedclock, offset);
+ if (cq->cq_schedclock->cl_expiration == 0) {
+ clockintr_stagger(cq->cq_schedclock, schedclock_period,
+ multiplier, MAXCPUS);
+ }
clockintr_advance(cq->cq_schedclock, schedclock_period);
}