- Move remaining statclock variables from kern_clockintr.c to
kern_clock.c. Move statclock variable initialization from
clockintr_init() into initclocks().
- Change statclock() prototype to make it a legal clockintr
callback function and establish the handle with statclock()
instead clockintr_statclock().
- Merge the contents of clockintr_statclock() into statclock().
statclock() can now reschedule itself and handles multiple
expirations transparently.
- Make statclock_avg visible from sys/systm.h so that clockintr_cpu_init()
can use it to advance the statclock across suspend/hibernate.
Thread: https://marc.info/?l=openbsd-tech&m=
169428749720476&w=2
-/* $OpenBSD: kern_clock.c,v 1.117 2023/09/14 19:39:47 cheloha Exp $ */
+/* $OpenBSD: kern_clock.c,v 1.118 2023/09/14 20:58:51 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/clockintr.h>
#include <sys/timeout.h>
#include <sys/kernel.h>
#include <sys/limits.h>
/* Don't force early wrap around, triggers bug in inteldrm */
volatile unsigned long jiffies;
+uint32_t statclock_avg; /* [I] average statclock period (ns) */
+uint32_t statclock_min; /* [I] minimum statclock period (ns) */
+uint32_t statclock_mask; /* [I] set of allowed offsets */
int statclock_is_randomized; /* [I] fixed or pseudorandom period? */
/*
void
initclocks(void)
{
+ uint32_t half_avg, var;
+
/*
* Let the machine-specific code do its bit.
*/
cpu_initclocks();
+ KASSERT(stathz >= 1 && stathz <= 1000000000);
+
+ /*
+ * Compute the average statclock() period. Then find var, the
+ * largest power of two such that var <= statclock_avg / 2.
+ */
+ statclock_avg = 1000000000 / stathz;
+ half_avg = statclock_avg / 2;
+ for (var = 1U << 31; var > half_avg; var /= 2)
+ continue;
+
+ /*
+ * Set a lower bound for the range using statclock_avg and var.
+ * The mask for that range is just (var - 1).
+ */
+ statclock_min = statclock_avg - (var / 2);
+ statclock_mask = var - 1;
+
KASSERT(profhz >= stathz && profhz <= 1000000000);
KASSERT(profhz % stathz == 0);
profclock_period = 1000000000 / profhz;
* do process and kernel statistics.
*/
void
-statclock(struct clockframe *frame)
+statclock(struct clockintr *cl, void *cf, void *arg)
{
+ uint64_t count, i;
+ struct clockframe *frame = cf;
struct cpu_info *ci = curcpu();
struct schedstate_percpu *spc = &ci->ci_schedstate;
struct proc *p = curproc;
struct process *pr;
+ if (statclock_is_randomized) {
+ count = clockintr_advance_random(cl, statclock_min,
+ statclock_mask);
+ } else {
+ count = clockintr_advance(cl, statclock_avg);
+ }
+
if (CLKF_USERMODE(frame)) {
pr = p->p_p;
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled record the tick.
*/
- p->p_uticks++;
+ p->p_uticks += count;
if (pr->ps_nice > NZERO)
- spc->spc_cp_time[CP_NICE]++;
+ spc->spc_cp_time[CP_NICE] += count;
else
- spc->spc_cp_time[CP_USER]++;
+ spc->spc_cp_time[CP_USER] += count;
} else {
/*
* Came from kernel mode, so we were:
*/
if (CLKF_INTR(frame)) {
if (p != NULL)
- p->p_iticks++;
+ p->p_iticks += count;
spc->spc_cp_time[spc->spc_spinning ?
- CP_SPIN : CP_INTR]++;
+ CP_SPIN : CP_INTR] += count;
} else if (p != NULL && p != spc->spc_idleproc) {
- p->p_sticks++;
+ p->p_sticks += count;
spc->spc_cp_time[spc->spc_spinning ?
- CP_SPIN : CP_SYS]++;
+ CP_SPIN : CP_SYS] += count;
} else
spc->spc_cp_time[spc->spc_spinning ?
- CP_SPIN : CP_IDLE]++;
+ CP_SPIN : CP_IDLE] += count;
}
if (p != NULL) {
- p->p_cpticks++;
+ p->p_cpticks += count;
/*
* schedclock() runs every fourth statclock().
*/
- if ((++spc->spc_schedticks & 3) == 0)
- schedclock(p);
+ for (i = 0; i < count; i++) {
+ if ((++spc->spc_schedticks & 3) == 0)
+ schedclock(p);
+ }
}
}
-/* $OpenBSD: kern_clockintr.c,v 1.49 2023/09/14 19:51:17 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.50 2023/09/14 20:58:51 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
*/
uint32_t clockintr_flags; /* [I] global state + behavior flags */
uint32_t hardclock_period; /* [I] hardclock period (ns) */
-uint32_t statclock_avg; /* [I] average statclock period (ns) */
-uint32_t statclock_min; /* [I] minimum statclock period (ns) */
-uint32_t statclock_mask; /* [I] set of allowed offsets */
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
-void clockintr_statclock(struct clockintr *, void *, void *);
void clockqueue_intrclock_install(struct clockintr_queue *,
const struct intrclock *);
uint64_t clockqueue_next(const struct clockintr_queue *);
void
clockintr_init(uint32_t flags)
{
- uint32_t half_avg, var;
-
KASSERT(CPU_IS_PRIMARY(curcpu()));
KASSERT(clockintr_flags == 0);
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
hardclock_period = 1000000000 / hz;
roundrobin_period = hardclock_period * 10;
- KASSERT(stathz >= 1 && stathz <= 1000000000);
-
- /*
- * Compute the average statclock() period. Then find var, the
- * largest power of two such that var <= statclock_avg / 2.
- */
- statclock_avg = 1000000000 / stathz;
- half_avg = statclock_avg / 2;
- for (var = 1U << 31; var > half_avg; var /= 2)
- continue;
-
- /*
- * Set a lower bound for the range using statclock_avg and var.
- * The mask for that range is just (var - 1).
- */
- statclock_min = statclock_avg - (var / 2);
- statclock_mask = var - 1;
-
SET(clockintr_flags, flags | CL_INIT);
}
panic("%s: failed to establish hardclock", __func__);
}
if (cq->cq_statclock == NULL) {
- cq->cq_statclock = clockintr_establish(ci, clockintr_statclock,
- NULL);
+ cq->cq_statclock = clockintr_establish(ci, statclock, NULL);
if (cq->cq_statclock == NULL)
panic("%s: failed to establish statclock", __func__);
}
hardclock(frame);
}
-void
-clockintr_statclock(struct clockintr *cl, void *frame, void *arg)
-{
- uint64_t count, i;
-
- if (statclock_is_randomized) {
- count = clockintr_advance_random(cl, statclock_min,
- statclock_mask);
- } else {
- count = clockintr_advance(cl, statclock_avg);
- }
- for (i = 0; i < count; i++)
- statclock(frame);
-}
-
void
clockqueue_init(struct clockintr_queue *cq)
{
-/* $OpenBSD: systm.h,v 1.166 2023/09/14 19:39:47 cheloha Exp $ */
+/* $OpenBSD: systm.h,v 1.167 2023/09/14 20:58:51 cheloha Exp $ */
/* $NetBSD: systm.h,v 1.50 1996/06/09 04:55:09 briggs Exp $ */
/*-
void realitexpire(void *);
extern uint32_t hardclock_period;
+extern uint32_t statclock_avg;
extern int statclock_is_randomized;
struct clockframe;
void hardclock(struct clockframe *);
-void statclock(struct clockframe *);
+
+struct clockintr;
+void statclock(struct clockintr *, void *, void *);
void initclocks(void);
void inittodr(time_t);