- Remove the roundrobin() call from hardclock(9).
- Revise roundrobin() to make it a valid clock interrupt callback.
It is still periodic and it still runs at one tenth of the hardclock
frequency.
- Account for multiple expirations in roundrobin(): if two or more
roundrobin periods have elapsed, set SPCF_SHOULDYIELD on the running
thread immediately to simulate normal behavior.
- Each schedstate_percpu has its own roundrobin() handle, spc_roundrobin.
spc_roundrobin is started/advanced during clockintr_cpu_init().
Intervals elapsed across suspend/resume are discarded.
- rrticks_init and schedstate_percpu.spc_rrticks are now useless:
delete them.
Tweaked by mpi@. With input from mpi@ and claudio@.
Thread: https://marc.info/?l=openbsd-tech&m=
169127381314651&w=2
ok mpi@ claudio@
-/* $OpenBSD: kern_clock.c,v 1.111 2023/08/05 20:07:55 cheloha Exp $ */
+/* $OpenBSD: kern_clock.c,v 1.112 2023/08/11 22:02:50 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
{
struct cpu_info *ci = curcpu();
- if (--ci->ci_schedstate.spc_rrticks <= 0)
- roundrobin(ci);
-
#if NDT > 0
DT_ENTER(profile, NULL);
if (CPU_IS_PRIMARY(ci))
-/* $OpenBSD: kern_clockintr.c,v 1.30 2023/08/05 20:07:55 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.31 2023/08/11 22:02:50 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
KASSERT(hz > 0 && hz <= 1000000000);
hardclock_period = 1000000000 / hz;
+ roundrobin_period = hardclock_period * 10;
KASSERT(stathz >= 1 && stathz <= 1000000000);
clockintr_stagger(spc->spc_profclock, profclock_period,
multiplier, MAXCPUS);
}
+ if (spc->spc_roundrobin->cl_expiration == 0) {
+ clockintr_stagger(spc->spc_roundrobin, hardclock_period,
+ multiplier, MAXCPUS);
+ }
+ clockintr_advance(spc->spc_roundrobin, roundrobin_period);
if (reset_cq_intrclock)
SET(cq->cq_flags, CQ_INTRCLOCK);
-/* $OpenBSD: kern_sched.c,v 1.84 2023/08/05 20:07:55 cheloha Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.85 2023/08/11 22:02:50 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
if (spc->spc_profclock == NULL)
panic("%s: clockintr_establish profclock", __func__);
}
+ if (spc->spc_roundrobin == NULL) {
+ spc->spc_roundrobin = clockintr_establish(&ci->ci_queue,
+ roundrobin);
+ if (spc->spc_roundrobin == NULL)
+ panic("%s: clockintr_establish roundrobin", __func__);
+ }
kthread_create_deferred(sched_kthreads_create, ci);
-/* $OpenBSD: sched_bsd.c,v 1.79 2023/08/05 20:07:55 cheloha Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.80 2023/08/11 22:02:50 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
#include <sys/ktrace.h>
#endif
-
+uint32_t roundrobin_period; /* [I] roundrobin period (ns) */
int lbolt; /* once a second sleep address */
-int rrticks_init; /* # of hardclock ticks per roundrobin() */
#ifdef MULTIPROCESSOR
struct __mp_lock sched_lock;
* Force switch among equal priority processes every 100ms.
*/
void
-roundrobin(struct cpu_info *ci)
+roundrobin(struct clockintr *cl, void *cf)
{
+ uint64_t count;
+ struct cpu_info *ci = curcpu();
struct schedstate_percpu *spc = &ci->ci_schedstate;
- spc->spc_rrticks = rrticks_init;
+ count = clockintr_advance(cl, roundrobin_period);
if (ci->ci_curproc != NULL) {
- if (spc->spc_schedflags & SPCF_SEENRR) {
+ if (spc->spc_schedflags & SPCF_SEENRR || count >= 2) {
/*
* The process has already been through a roundrobin
* without switching and may be hogging the CPU.
* Indicate that the process should yield.
*/
atomic_setbits_int(&spc->spc_schedflags,
- SPCF_SHOULDYIELD);
+ SPCF_SEENRR | SPCF_SHOULDYIELD);
} else {
atomic_setbits_int(&spc->spc_schedflags,
SPCF_SEENRR);
* its job.
*/
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
-
- rrticks_init = hz / 10;
schedcpu(&schedcpu_to);
#ifndef SMALL_KERNEL
-/* $OpenBSD: sched.h,v 1.60 2023/08/05 20:07:56 cheloha Exp $ */
+/* $OpenBSD: sched.h,v 1.61 2023/08/11 22:02:50 cheloha Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
u_int spc_schedticks; /* ticks for schedclock() */
u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */
u_char spc_curpriority; /* usrpri of curproc */
- int spc_rrticks; /* ticks until roundrobin() */
struct clockintr *spc_itimer; /* [o] itimer_update handle */
struct clockintr *spc_profclock; /* [o] profclock handle */
+ struct clockintr *spc_roundrobin; /* [o] roundrobin handle */
u_int spc_nrun; /* procs on the run queues */
#define NICE_WEIGHT 2 /* priorities per nice level */
#define ESTCPULIM(e) min((e), NICE_WEIGHT * PRIO_MAX - SCHED_PPQ)
+extern uint32_t roundrobin_period;
extern int schedhz; /* ideally: 16 */
-extern int rrticks_init; /* ticks per roundrobin() */
struct proc;
void schedclock(struct proc *);
-struct cpu_info;
-void roundrobin(struct cpu_info *);
+void roundrobin(struct clockintr *, void *);
void scheduler_start(void);
void userret(struct proc *p);
+struct cpu_info;
void sched_init_cpu(struct cpu_info *);
void sched_idle(void *);
void sched_exit(struct proc *);