The current scheduler useage is highly questionable and probably not helpful.
OK kettenis@ cheloha@ deraadt@
-/* $OpenBSD: kern_sched.c,v 1.81 2023/07/27 17:52:53 cheloha Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.82 2023/08/03 16:12:08 claudio Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
{
#ifdef MULTIPROCESSOR
struct cpu_info *choice = NULL;
- fixpt_t load, best_load = ~0;
int run, best_run = INT_MAX;
struct cpu_info *ci;
struct cpuset set;
while ((ci = cpuset_first(&set)) != NULL) {
cpuset_del(&set, ci);
- load = ci->ci_schedstate.spc_ldavg;
run = ci->ci_schedstate.spc_nrun;
- if (choice == NULL || run < best_run ||
- (run == best_run &&load < best_load)) {
+ if (choice == NULL || run < best_run) {
choice = ci;
- best_load = load;
best_run = run;
}
}
if (CPU_IS_PRIMARY(ci))
cost += sched_cost_runnable;
- /*
- * Higher load on the destination means we don't want to go there.
- */
- cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT);
-
/*
* If the proc is on this cpu already, lower the cost by how much
* it has been running and an estimate of its footprint.
-/* $OpenBSD: sched.h,v 1.58 2023/07/25 18:16:19 cheloha Exp $ */
+/* $OpenBSD: sched.h,v 1.59 2023/08/03 16:12:08 claudio Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
struct clockintr *spc_profclock; /* [o] profclock handle */
u_int spc_nrun; /* procs on the run queues */
- fixpt_t spc_ldavg; /* shortest load avg. for this cpu */
volatile uint32_t spc_whichqs;
volatile u_int spc_spinning; /* this cpu is currently spinning */
-/* $OpenBSD: uvm_meter.c,v 1.47 2023/08/03 16:08:12 claudio Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.48 2023/08/03 16:12:08 claudio Exp $ */
/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
/*
static void
uvm_loadav(struct loadavg *avg)
{
+ extern struct cpuset sched_idle_cpus;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
- struct schedstate_percpu *spc;
- u_int i, nrun = 0, nrun_cpu;
- int s;
+ u_int i, nrun = 0;
-
- SCHED_LOCK(s);
CPU_INFO_FOREACH(cii, ci) {
- spc = &ci->ci_schedstate;
- nrun_cpu = spc->spc_nrun;
- if (ci->ci_curproc != spc->spc_idleproc)
- nrun_cpu++;
- if (nrun_cpu == 0)
- continue;
- spc->spc_ldavg = (cexp[0] * spc->spc_ldavg +
- nrun_cpu * FSCALE *
- (FSCALE - cexp[0])) >> FSHIFT;
- nrun += nrun_cpu;
+ if (!cpuset_isset(&sched_idle_cpus, ci))
+ nrun++;
+ nrun += ci->ci_schedstate.spc_nrun;
}
- SCHED_UNLOCK(s);
for (i = 0; i < 3; i++) {
avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +