From: claudio Date: Thu, 3 Aug 2023 16:12:08 +0000 (+0000) Subject: Remove the per-cpu loadavg calculation. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=834cc80d160ab56359e2fd98c74d60fe4208d67d;p=openbsd Remove the per-cpu loadavg calculation. The current scheduler useage is highly questionable and probably not helpful. OK kettenis@ cheloha@ deraadt@ --- diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index c94438ecf09..b6d1ea68431 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sched.c,v 1.81 2023/07/27 17:52:53 cheloha Exp $ */ +/* $OpenBSD: kern_sched.c,v 1.82 2023/08/03 16:12:08 claudio Exp $ */ /* * Copyright (c) 2007, 2008 Artur Grabowski * @@ -373,7 +373,6 @@ sched_choosecpu_fork(struct proc *parent, int flags) { #ifdef MULTIPROCESSOR struct cpu_info *choice = NULL; - fixpt_t load, best_load = ~0; int run, best_run = INT_MAX; struct cpu_info *ci; struct cpuset set; @@ -407,13 +406,10 @@ sched_choosecpu_fork(struct proc *parent, int flags) while ((ci = cpuset_first(&set)) != NULL) { cpuset_del(&set, ci); - load = ci->ci_schedstate.spc_ldavg; run = ci->ci_schedstate.spc_nrun; - if (choice == NULL || run < best_run || - (run == best_run &&load < best_load)) { + if (choice == NULL || run < best_run) { choice = ci; - best_load = load; best_run = run; } } @@ -606,11 +602,6 @@ sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p) if (CPU_IS_PRIMARY(ci)) cost += sched_cost_runnable; - /* - * Higher load on the destination means we don't want to go there. - */ - cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT); - /* * If the proc is on this cpu already, lower the cost by how much * it has been running and an estimate of its footprint. diff --git a/sys/sys/sched.h b/sys/sys/sched.h index 8673205dc7c..c53b0843120 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -1,4 +1,4 @@ -/* $OpenBSD: sched.h,v 1.58 2023/07/25 18:16:19 cheloha Exp $ */ +/* $OpenBSD: sched.h,v 1.59 2023/08/03 16:12:08 claudio Exp $ */ /* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */ /*- @@ -110,7 +110,6 @@ struct schedstate_percpu { struct clockintr *spc_profclock; /* [o] profclock handle */ u_int spc_nrun; /* procs on the run queues */ - fixpt_t spc_ldavg; /* shortest load avg. for this cpu */ volatile uint32_t spc_whichqs; volatile u_int spc_spinning; /* this cpu is currently spinning */ diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c index da3908233b8..e0c5d41ae8d 100644 --- a/sys/uvm/uvm_meter.c +++ b/sys/uvm/uvm_meter.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_meter.c,v 1.47 2023/08/03 16:08:12 claudio Exp $ */ +/* $OpenBSD: uvm_meter.c,v 1.48 2023/08/03 16:12:08 claudio Exp $ */ /* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */ /* @@ -98,27 +98,16 @@ uvm_meter(void) static void uvm_loadav(struct loadavg *avg) { + extern struct cpuset sched_idle_cpus; CPU_INFO_ITERATOR cii; struct cpu_info *ci; - struct schedstate_percpu *spc; - u_int i, nrun = 0, nrun_cpu; - int s; + u_int i, nrun = 0; - - SCHED_LOCK(s); CPU_INFO_FOREACH(cii, ci) { - spc = &ci->ci_schedstate; - nrun_cpu = spc->spc_nrun; - if (ci->ci_curproc != spc->spc_idleproc) - nrun_cpu++; - if (nrun_cpu == 0) - continue; - spc->spc_ldavg = (cexp[0] * spc->spc_ldavg + - nrun_cpu * FSCALE * - (FSCALE - cexp[0])) >> FSHIFT; - nrun += nrun_cpu; + if (!cpuset_isset(&sched_idle_cpus, ci)) + nrun++; + nrun += ci->ci_schedstate.spc_nrun; } - SCHED_UNLOCK(s); for (i = 0; i < 3; i++) { avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +