-/* $OpenBSD: clock.c,v 1.27 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.28 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.29 2000/06/05 21:47:10 thorpej Exp $ */
/*
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
-/* $OpenBSD: cpu.h,v 1.69 2023/01/31 15:18:53 deraadt Exp $ */
+/* $OpenBSD: cpu.h,v 1.70 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.45 2000/08/21 02:03:12 thorpej Exp $ */
/*-
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
-/* $OpenBSD: cpu.h,v 1.156 2023/07/25 06:48:37 guenther Exp $ */
+/* $OpenBSD: cpu.h,v 1.157 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
u_int64_t ci_hz_aperf;
#if defined(GPROF) || defined(DDBPROF)
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
u_int32_t ci_vmm_flags;
#define CI_VMM_VMX (1 << 0)
-/* $OpenBSD: clock.c,v 1.39 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.40 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.1 2003/04/26 18:39:50 fvdl Exp $ */
/*-
mc146818_write(NULL, MC_REGA,
MC_BASE_32_KHz | MC_RATE_1024_Hz);
}
- clockintr_setstatclockrate(arg);
}
void
-/* $OpenBSD: agtimer.c,v 1.17 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: agtimer.c,v 1.18 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
void
agtimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
-/* $OpenBSD: amptimer.c,v 1.16 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: amptimer.c,v 1.17 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
*
void
amptimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
-/* $OpenBSD: cpu.h,v 1.62 2023/01/17 02:27:14 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.63 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.34 2003/06/23 11:01:08 martin Exp $ */
/*
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
-/* $OpenBSD: agtimer.c,v 1.22 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: agtimer.c,v 1.23 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
void
agtimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
-/* $OpenBSD: cpu.h,v 1.37 2023/07/13 08:33:36 kettenis Exp $ */
+/* $OpenBSD: cpu.h,v 1.38 2023/07/25 18:16:20 cheloha Exp $ */
/*
* Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
*
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
-/* $OpenBSD: dmtimer.c,v 1.18 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: dmtimer.c,v 1.19 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Raphael Graf <r@undefined.ch>
void
dmtimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
-/* $OpenBSD: gptimer.c,v 1.19 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: gptimer.c,v 1.20 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
void
gptimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
-/* $OpenBSD: sxitimer.c,v 1.20 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: sxitimer.c,v 1.21 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Raphael Graf <r@undefined.ch>
void
sxitimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
-/* $OpenBSD: clock.c,v 1.35 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.36 2023/07/25 18:16:20 cheloha Exp $ */
/*
* Copyright (c) 1998-2003 Michael Shalayeff
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
-/* $OpenBSD: cpu.h,v 1.99 2023/01/31 15:18:54 deraadt Exp $ */
+/* $OpenBSD: cpu.h,v 1.100 2023/07/25 18:16:20 cheloha Exp $ */
/*
* Copyright (c) 2000-2004 Michael Shalayeff
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
-/* $OpenBSD: cpu.h,v 1.181 2022/12/06 01:56:44 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.182 2023/07/25 18:16:20 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.35 1996/05/05 19:29:26 christos Exp $ */
/*-
struct ksensor ci_sensor;
#if defined(GPROF) || defined(DDBPROF)
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
-/* $OpenBSD: clock.c,v 1.64 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.65 2023/07/25 18:16:20 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.39 1996/05/12 23:11:54 mycroft Exp $ */
/*-
mc146818_write(NULL, MC_REGA,
MC_BASE_32_KHz | MC_RATE_1024_Hz);
}
- clockintr_setstatclockrate(arg);
}
void
-/* $OpenBSD: clock.c,v 1.16 2022/12/06 00:56:52 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.17 2023/07/25 18:16:20 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.2 2000/01/11 10:29:35 nisimura Exp $ */
/*
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
/*
-/* $OpenBSD: cpu.h,v 1.72 2023/01/31 15:18:54 deraadt Exp $ */
+/* $OpenBSD: cpu.h,v 1.73 2023/07/25 18:16:20 cheloha Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* Copyright (c) 1992, 1993
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
-/* $OpenBSD: clock.c,v 1.54 2023/02/04 23:17:05 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.55 2023/07/25 18:16:20 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.1 1996/09/30 16:34:40 ws Exp $ */
/*
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
-/* $OpenBSD: cpu.h,v 1.141 2023/01/11 03:19:52 visa Exp $ */
+/* $OpenBSD: cpu.h,v 1.142 2023/07/25 18:16:20 cheloha Exp $ */
/*-
* Copyright (c) 1992, 1993
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
};
-/* $OpenBSD: mips64_machdep.c,v 1.41 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: mips64_machdep.c,v 1.42 2023/07/25 18:16:20 cheloha Exp $ */
/*
* Copyright (c) 2009, 2010, 2012 Miodrag Vallat.
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
/*
-/* $OpenBSD: cpu.h,v 1.74 2022/11/29 00:58:05 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.75 2023/07/25 18:16:20 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $ */
/*
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
};
-/* $OpenBSD: clock.c,v 1.10 2023/02/04 23:20:54 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.11 2023/07/25 18:16:21 cheloha Exp $ */
/*
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
-/* $OpenBSD: cpu.h,v 1.15 2022/11/19 16:02:37 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.16 2023/07/25 18:16:21 cheloha Exp $ */
/*
* Copyright (c) 2019 Mike Larkin <mlarkin@openbsd.org>
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
-/* $OpenBSD: clock.c,v 1.9 2023/02/04 19:19:37 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.10 2023/07/25 18:16:21 cheloha Exp $ */
/*
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
-/* $OpenBSD: cpu.h,v 1.34 2022/12/06 01:19:35 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.35 2023/07/25 18:16:21 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.41 2006/01/21 04:24:12 uwe Exp $ */
/*-
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
int ci_want_resched;
-/* $OpenBSD: clock.c,v 1.14 2023/04/10 04:21:20 jsg Exp $ */
+/* $OpenBSD: clock.c,v 1.15 2023/07/25 18:16:21 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.32 2006/09/05 11:09:36 uwe Exp $ */
/*-
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
-/* $OpenBSD: cpu.h,v 1.101 2023/01/13 03:22:18 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.102 2023/07/25 18:16:21 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.28 2001/06/14 22:56:58 thorpej Exp $ */
/*
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
};
-/* $OpenBSD: clock.c,v 1.77 2023/04/28 18:27:55 cheloha Exp $ */
+/* $OpenBSD: clock.c,v 1.78 2023/07/25 18:16:21 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.41 2001/07/24 19:29:25 eeh Exp $ */
/*
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
/*
-/* $OpenBSD: kern_clock.c,v 1.108 2023/04/25 00:58:47 cheloha Exp $ */
+/* $OpenBSD: kern_clock.c,v 1.109 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
#include <sys/sched.h>
#include <sys/timetc.h>
-#if defined(GPROF) || defined(DDBPROF)
-#include <sys/gmon.h>
-#endif
-
#include "dt.h"
#if NDT > 0
#include <dev/dt/dtvar.h>
int profhz;
int profprocs;
int ticks = INT_MAX - (15 * 60 * HZ);
-static int psdiv, pscnt; /* prof => stat divider */
-int psratio; /* ratio: prof / stat */
volatile unsigned long jiffies = ULONG_MAX - (10 * 60 * HZ);
initclocks(void)
{
/*
- * Set divisors to 1 (normal case) and let the machine-specific
- * code do its bit.
+ * Let the machine-specific code do its bit.
*/
- psdiv = pscnt = 1;
cpu_initclocks();
- /*
- * Compute profhz/stathz.
- */
- psratio = profhz / stathz;
+ KASSERT(profhz >= stathz && profhz <= 1000000000);
+ KASSERT(profhz % stathz == 0);
+ profclock_period = 1000000000 / profhz;
inittimecounter();
}
atomic_setbits_int(&pr->ps_flags, PS_PROFIL);
if (++profprocs == 1) {
s = splstatclock();
- psdiv = pscnt = psratio;
setstatclockrate(profhz);
splx(s);
}
atomic_clearbits_int(&pr->ps_flags, PS_PROFIL);
if (--profprocs == 0) {
s = splstatclock();
- psdiv = pscnt = 1;
setstatclockrate(stathz);
splx(s);
}
void
statclock(struct clockframe *frame)
{
-#if defined(GPROF) || defined(DDBPROF)
- struct gmonparam *g;
- u_long i;
-#endif
struct cpu_info *ci = curcpu();
struct schedstate_percpu *spc = &ci->ci_schedstate;
struct proc *p = curproc;
struct process *pr;
- /*
- * Notice changes in divisor frequency, and adjust clock
- * frequency accordingly.
- */
- if (spc->spc_psdiv != psdiv) {
- spc->spc_psdiv = psdiv;
- spc->spc_pscnt = psdiv;
- if (psdiv == 1) {
- setstatclockrate(stathz);
- } else {
- setstatclockrate(profhz);
- }
- }
-
if (CLKF_USERMODE(frame)) {
pr = p->p_p;
- if (pr->ps_flags & PS_PROFIL)
- addupc_intr(p, CLKF_PC(frame), 1);
- if (--spc->spc_pscnt > 0)
- return;
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled record the tick.
else
spc->spc_cp_time[CP_USER]++;
} else {
-#if defined(GPROF) || defined(DDBPROF)
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = ci->ci_gmon;
- if (g != NULL && g->state == GMON_PROF_ON) {
- i = CLKF_PC(frame) - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (p != NULL && p->p_p->ps_flags & PS_PROFIL)
- addupc_intr(p, PROC_PC(p), 1);
- if (--spc->spc_pscnt > 0)
- return;
/*
* Came from kernel mode, so we were:
* - spinning on a lock
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_IDLE]++;
}
- spc->spc_pscnt = psdiv;
if (p != NULL) {
p->p_cpticks++;
-/* $OpenBSD: kern_clockintr.c,v 1.27 2023/07/02 19:02:27 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.28 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
/*
* Protection for global variables in this file:
*
- * C Global clockintr configuration mutex (clockintr_mtx).
* I Immutable after initialization.
*/
-struct mutex clockintr_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
-
u_int clockintr_flags; /* [I] global state + behavior flags */
uint32_t hardclock_period; /* [I] hardclock period (ns) */
uint32_t schedclock_period; /* [I] schedclock period (ns) */
-volatile u_int statclock_gen = 1; /* [C] statclock update generation */
-volatile uint32_t statclock_avg; /* [C] average statclock period (ns) */
-uint32_t statclock_min; /* [C] minimum statclock period (ns) */
-uint32_t statclock_mask; /* [C] set of allowed offsets */
-uint32_t stat_avg; /* [I] average stathz period (ns) */
-uint32_t stat_min; /* [I] set of allowed offsets */
-uint32_t stat_mask; /* [I] max offset from minimum (ns) */
-uint32_t prof_avg; /* [I] average profhz period (ns) */
-uint32_t prof_min; /* [I] minimum profhz period (ns) */
-uint32_t prof_mask; /* [I] set of allowed offsets */
-
-uint64_t clockintr_advance(struct clockintr *, uint64_t);
-void clockintr_cancel(struct clockintr *);
+uint32_t statclock_avg; /* [I] average statclock period (ns) */
+uint32_t statclock_min; /* [I] minimum statclock period (ns) */
+uint32_t statclock_mask; /* [I] set of allowed offsets */
+
void clockintr_cancel_locked(struct clockintr *);
-struct clockintr *clockintr_establish(struct clockintr_queue *,
- void (*)(struct clockintr *, void *));
uint64_t clockintr_expiration(const struct clockintr *);
void clockintr_hardclock(struct clockintr *, void *);
uint64_t clockintr_nsecuptime(const struct clockintr *);
void clockintr_schedclock(struct clockintr *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
-void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int);
void clockintr_statclock(struct clockintr *, void *);
-void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *);
uint64_t clockqueue_next(const struct clockintr_queue *);
void clockqueue_reset_intrclock(struct clockintr_queue *);
uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
void
clockintr_init(u_int flags)
{
+ uint32_t half_avg, var;
+
KASSERT(CPU_IS_PRIMARY(curcpu()));
KASSERT(clockintr_flags == 0);
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
hardclock_period = 1000000000 / hz;
KASSERT(stathz >= 1 && stathz <= 1000000000);
- KASSERT(profhz >= stathz && profhz <= 1000000000);
- KASSERT(profhz % stathz == 0);
- clockintr_statvar_init(stathz, &stat_avg, &stat_min, &stat_mask);
- clockintr_statvar_init(profhz, &prof_avg, &prof_min, &prof_mask);
- SET(clockintr_flags, CL_STATCLOCK);
- clockintr_setstatclockrate(stathz);
+
+ /*
+ * Compute the average statclock() period. Then find var, the
+ * largest power of two such that var <= statclock_avg / 2.
+ */
+ statclock_avg = 1000000000 / stathz;
+ half_avg = statclock_avg / 2;
+ for (var = 1U << 31; var > half_avg; var /= 2)
+ continue;
+
+ /*
+ * Set a lower bound for the range using statclock_avg and var.
+ * The mask for that range is just (var - 1).
+ */
+ statclock_min = statclock_avg - (var / 2);
+ statclock_mask = var - 1;
KASSERT(schedhz >= 0 && schedhz <= 1000000000);
if (schedhz != 0)
mtx_leave(&cq->cq_mtx);
}
-/*
- * Compute the period (avg) for the given frequency and a range around
- * that period. The range is [min + 1, min + mask]. The range is used
- * during dispatch to choose a new pseudorandom deadline for each statclock
- * event.
- */
-void
-clockintr_statvar_init(int freq, uint32_t *avg, uint32_t *min, uint32_t *mask)
-{
- uint32_t half_avg, var;
-
- KASSERT(!ISSET(clockintr_flags, CL_INIT | CL_STATCLOCK));
- KASSERT(freq > 0 && freq <= 1000000000);
-
- /* Compute avg, the average period. */
- *avg = 1000000000 / freq;
-
- /* Find var, the largest power of two such that var <= avg / 2. */
- half_avg = *avg / 2;
- for (var = 1U << 31; var > half_avg; var /= 2)
- continue;
-
- /* Using avg and var, set a lower bound for the range. */
- *min = *avg - (var / 2);
-
- /* The mask is just (var - 1). */
- *mask = var - 1;
-}
-
-/*
- * Update the statclock_* variables according to the given frequency.
- * Must only be called after clockintr_statvar_init() initializes both
- * stathz_* and profhz_*.
- */
-void
-clockintr_setstatclockrate(int freq)
-{
- u_int ogen;
-
- KASSERT(ISSET(clockintr_flags, CL_STATCLOCK));
-
- mtx_enter(&clockintr_mtx);
-
- ogen = statclock_gen;
- statclock_gen = 0;
- membar_producer();
- if (freq == stathz) {
- statclock_avg = stat_avg;
- statclock_min = stat_min;
- statclock_mask = stat_mask;
- } else if (freq == profhz) {
- statclock_avg = prof_avg;
- statclock_min = prof_min;
- statclock_mask = prof_mask;
- } else {
- panic("%s: frequency is not stathz (%d) or profhz (%d): %d",
- __func__, stathz, profhz, freq);
- }
- membar_producer();
- statclock_gen = MAX(1, ogen + 1);
-
- mtx_leave(&clockintr_mtx);
-}
-
uint64_t
clockintr_nsecuptime(const struct clockintr *cl)
{
clockintr_statclock(struct clockintr *cl, void *frame)
{
uint64_t count, expiration, i, uptime;
- uint32_t mask, min, off;
- u_int gen;
+ uint32_t off;
if (ISSET(clockintr_flags, CL_RNDSTAT)) {
- do {
- gen = statclock_gen;
- membar_consumer();
- min = statclock_min;
- mask = statclock_mask;
- membar_consumer();
- } while (gen == 0 || gen != statclock_gen);
count = 0;
expiration = clockintr_expiration(cl);
uptime = clockintr_nsecuptime(cl);
while (expiration <= uptime) {
- while ((off = (random() & mask)) == 0)
+ while ((off = (random() & statclock_mask)) == 0)
continue;
- expiration += min + off;
+ expiration += statclock_min + off;
count++;
}
clockintr_schedule(cl, expiration);
-/* $OpenBSD: kern_sched.c,v 1.79 2023/07/14 07:07:08 claudio Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.80 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
#include <sys/proc.h>
#include <sys/kthread.h>
#include <sys/systm.h>
+#include <sys/clockintr.h>
+#include <sys/resourcevar.h>
#include <sys/task.h>
#include <sys/smr.h>
#include <sys/tracepoint.h>
spc->spc_idleproc = NULL;
+ if (spc->spc_profclock == NULL) {
+ spc->spc_profclock = clockintr_establish(&ci->ci_queue,
+ profclock);
+ if (spc->spc_profclock == NULL)
+ panic("%s: clockintr_establish profclock", __func__);
+ clockintr_stagger(spc->spc_profclock, profclock_period,
+ CPU_INFO_UNIT(ci), MAXCPUS);
+ }
+
kthread_create_deferred(sched_kthreads_create, ci);
LIST_INIT(&spc->spc_deadproc);
timespecsub(&ts, &spc->spc_runtime, &ts);
timespecadd(&p->p_rtime, &ts, &p->p_rtime);
+ if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
+ atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
+ clockintr_cancel(spc->spc_profclock);
+ }
+
LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
#ifdef MULTIPROCESSOR
-/* $OpenBSD: sched_bsd.c,v 1.77 2023/07/11 07:02:43 claudio Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.78 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/clockintr.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
/* add the time counts for this thread to the process's total */
tuagg_unlocked(pr, p);
+ /* Stop the profclock if it's running. */
+ if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
+ atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
+ clockintr_cancel(spc->spc_profclock);
+ }
+
/*
* Process is about to yield the CPU; clear the appropriate
* scheduling flags.
*/
KASSERT(p->p_cpu == curcpu());
+ /* Start the profclock if profil(2) is enabled. */
+ if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
+ atomic_setbits_int(&p->p_cpu->ci_schedstate.spc_schedflags,
+ SPCF_PROFCLOCK);
+ clockintr_advance(p->p_cpu->ci_schedstate.spc_profclock,
+ profclock_period);
+ }
+
nanouptime(&p->p_cpu->ci_schedstate.spc_runtime);
#ifdef MULTIPROCESSOR
-/* $OpenBSD: subr_prof.c,v 1.35 2023/06/02 17:44:29 cheloha Exp $ */
+/* $OpenBSD: subr_prof.c,v 1.36 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */
/*-
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/atomic.h>
+#include <sys/clockintr.h>
#include <sys/pledge.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/mount.h>
#include <sys/sysctl.h>
#include <sys/syscallargs.h>
+#include <sys/user.h>
+uint32_t profclock_period;
#if defined(GPROF) || defined(DDBPROF)
#include <sys/malloc.h>
extern char etext[];
+void gmonclock(struct clockintr *, void *);
+
void
prof_init(void)
{
/* Allocate and initialize one profiling buffer per CPU. */
CPU_INFO_FOREACH(cii, ci) {
+ ci->ci_gmonclock = clockintr_establish(&ci->ci_queue,
+ gmonclock);
+ if (ci->ci_gmonclock == NULL) {
+ printf("%s: clockintr_establish gmonclock\n", __func__);
+ return;
+ }
+ clockintr_stagger(ci->ci_gmonclock, profclock_period,
+ CPU_INFO_UNIT(ci), MAXCPUS);
cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait);
if (cp == NULL) {
printf("No memory for profiling.\n");
}
int
-prof_state_toggle(struct gmonparam *gp, int oldstate)
+prof_state_toggle(struct cpu_info *ci, int oldstate)
{
+ struct gmonparam *gp = ci->ci_gmon;
int error = 0;
KERNEL_ASSERT_LOCKED();
if (error == 0) {
if (++gmon_cpu_count == 1)
startprofclock(&process0);
+ clockintr_advance(ci->ci_gmonclock, profclock_period);
}
break;
default:
gp->state = GMON_PROF_OFF;
/* FALLTHROUGH */
case GMON_PROF_OFF:
+ clockintr_cancel(ci->ci_gmonclock);
if (--gmon_cpu_count == 0)
stopprofclock(&process0);
#if !defined(GPROF)
error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
if (error)
return (error);
- return (prof_state_toggle(gp, state));
+ return prof_state_toggle(ci, state);
case GPROF_COUNT:
return (sysctl_struct(oldp, oldlenp, newp, newlen,
gp->kcount, gp->kcountsize));
}
/* NOTREACHED */
}
+
+void
+gmonclock(struct clockintr *cl, void *cf)
+{
+ uint64_t count;
+ struct clockframe *frame = cf;
+ struct gmonparam *g = curcpu()->ci_gmon;
+ u_long i;
+
+ count = clockintr_advance(cl, profclock_period);
+ if (count > ULONG_MAX)
+ count = ULONG_MAX;
+
+ /*
+ * Kernel statistics are just like addupc_intr(), only easier.
+ */
+ if (!CLKF_USERMODE(frame) && g != NULL && g->state == GMON_PROF_ON) {
+ i = CLKF_PC(frame) - g->lowpc;
+ if (i < g->textsize) {
+ i /= HISTFRACTION * sizeof(*g->kcount);
+ g->kcount[i] += (u_long)count;
+ }
+ }
+}
+
#endif /* GPROF || DDBPROF */
/*
return (EINVAL);
if (SCARG(uap, scale) == 0) {
stopprofclock(pr);
+ need_resched(curcpu());
return (0);
}
upp = &pr->ps_prof;
upp->pr_size = SCARG(uap, size);
startprofclock(pr);
splx(s);
+ need_resched(curcpu());
return (0);
}
+void
+profclock(struct clockintr *cl, void *cf)
+{
+ uint64_t count;
+ struct clockframe *frame = cf;
+ struct proc *p = curproc;
+
+ count = clockintr_advance(cl, profclock_period);
+ if (count > ULONG_MAX)
+ count = ULONG_MAX;
+
+ if (CLKF_USERMODE(frame)) {
+ if (ISSET(p->p_p->ps_flags, PS_PROFIL))
+ addupc_intr(p, CLKF_PC(frame), (u_long)count);
+ } else {
+ if (p != NULL && ISSET(p->p_p->ps_flags, PS_PROFIL))
+ addupc_intr(p, PROC_PC(p), (u_long)count);
+ }
+}
+
/*
* Scale is a fixed-point number with the binary point 16 bits
* into the value, and is <= 1.0. pc is at most 32 bits, so the
-/* $OpenBSD: clockintr.h,v 1.8 2023/06/15 22:18:06 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.9 2023/07/25 18:16:19 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
/* Global state flags. */
#define CL_INIT 0x00000001 /* global init done */
-#define CL_STATCLOCK 0x00000002 /* statclock variables set */
-#define CL_STATE_MASK 0x00000003
+#define CL_STATE_MASK 0x00000001
/* Global behavior flags. */
#define CL_RNDSTAT 0x80000000 /* randomized statclock */
void clockintr_cpu_init(const struct intrclock *);
int clockintr_dispatch(void *);
void clockintr_init(u_int);
-void clockintr_setstatclockrate(int);
void clockintr_trigger(void);
/*
* Kernel API
*/
+uint64_t clockintr_advance(struct clockintr *, uint64_t);
+void clockintr_cancel(struct clockintr *);
+struct clockintr *clockintr_establish(struct clockintr_queue *,
+ void (*)(struct clockintr *, void *));
+void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int);
void clockqueue_init(struct clockintr_queue *);
int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t);
-/* $OpenBSD: resourcevar.h,v 1.26 2023/04/25 00:58:47 cheloha Exp $ */
+/* $OpenBSD: resourcevar.h,v 1.27 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: resourcevar.h,v 1.12 1995/11/22 23:01:53 cgd Exp $ */
/*
#include <lib/libkern/libkern.h> /* for KASSERT() */
+struct clockintr;
+
+extern uint32_t profclock_period;
+
void addupc_intr(struct proc *, u_long, u_long);
void addupc_task(struct proc *, u_long, u_int);
+void profclock(struct clockintr *, void *);
void tuagg_unlocked(struct process *, struct proc *);
void tuagg(struct process *, struct proc *);
struct tusage;
-/* $OpenBSD: sched.h,v 1.57 2020/12/25 12:49:31 visa Exp $ */
+/* $OpenBSD: sched.h,v 1.58 2023/07/25 18:16:19 cheloha Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
#define SCHED_NQS 32 /* 32 run queues. */
+struct clockintr;
struct smr_entry;
/*
u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */
u_char spc_curpriority; /* usrpri of curproc */
int spc_rrticks; /* ticks until roundrobin() */
- int spc_pscnt; /* prof/stat counter */
- int spc_psdiv; /* prof/stat divisor */
+
+ struct clockintr *spc_profclock; /* [o] profclock handle */
u_int spc_nrun; /* procs on the run queues */
fixpt_t spc_ldavg; /* shortest load avg. for this cpu */
#define SPCF_SWITCHCLEAR (SPCF_SEENRR|SPCF_SHOULDYIELD)
#define SPCF_SHOULDHALT 0x0004 /* CPU should be vacated */
#define SPCF_HALTED 0x0008 /* CPU has been halted */
+#define SPCF_PROFCLOCK 0x0010 /* profclock() was started */
#define SCHED_PPQ (128 / SCHED_NQS) /* priorities per queue */
#define NICE_WEIGHT 2 /* priorities per nice level */