-/* $OpenBSD: sched_bsd.c,v 1.81 2023/08/14 08:33:24 mpi Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.82 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
struct __mp_lock sched_lock;
#endif
+void update_loadavg(void *);
void schedcpu(void *);
uint32_t decay_aftersleep(uint32_t, uint32_t);
+extern struct cpuset sched_idle_cpus;
+
+/*
+ * constants for averages over 1, 5, and 15 minutes when sampling at
+ * 5 second intervals.
+ */
+static const fixpt_t cexp[3] = {
+ 0.9200444146293232 * FSCALE, /* exp(-1/12) */
+ 0.9834714538216174 * FSCALE, /* exp(-1/60) */
+ 0.9944598480048967 * FSCALE, /* exp(-1/180) */
+};
+
+struct loadavg averunnable;
+
/*
* Force switch among equal priority processes every 100ms.
*/
need_resched(ci);
}
+
+
+/*
+ * update_loadav: compute a tenex style load average of a quantity on
+ * 1, 5, and 15 minute intervals.
+ */
+void
+update_loadavg(void *arg)
+{
+ struct timeout *to = (struct timeout *)arg;
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ u_int i, nrun = 0;
+
+ CPU_INFO_FOREACH(cii, ci) {
+ if (!cpuset_isset(&sched_idle_cpus, ci))
+ nrun++;
+ nrun += ci->ci_schedstate.spc_nrun;
+ }
+
+ for (i = 0; i < 3; i++) {
+ averunnable.ldavg[i] = (cexp[i] * averunnable.ldavg[i] +
+ nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
+ }
+
+ timeout_add_sec(to, 5);
+}
+
/*
* Constants for digital decay and forget:
* 90% of (p_estcpu) usage in 5 * loadav time
}
SCHED_UNLOCK(s);
}
- uvm_meter();
wakeup(&lbolt);
timeout_add_sec(to, 1);
}
scheduler_start(void)
{
static struct timeout schedcpu_to;
+ static struct timeout loadavg_to;
/*
* We avoid polluting the global namespace by keeping the scheduler
* its job.
*/
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
+ timeout_set(&loadavg_to, update_loadavg, &loadavg_to);
+
schedcpu(&schedcpu_to);
+ update_loadavg(&loadavg_to);
#ifndef SMALL_KERNEL
if (perfpolicy == PERFPOL_AUTO)
-/* $OpenBSD: uvm_extern.h,v 1.170 2023/06/21 21:16:21 cheloha Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.171 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
struct vmspace *uvmspace_share(struct process *);
int uvm_share(vm_map_t, vaddr_t, vm_prot_t,
vm_map_t, vaddr_t, vsize_t);
-void uvm_meter(void);
int uvm_sysctl(int *, u_int, void *, size_t *,
void *, size_t, struct proc *);
struct vm_page *uvm_pagealloc(struct uvm_object *,
-/* $OpenBSD: uvm_meter.c,v 1.48 2023/08/03 16:12:08 claudio Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.49 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
/*
#define MAXSLP 20
int maxslp = MAXSLP; /* patchable ... */
-struct loadavg averunnable;
-/*
- * constants for averages over 1, 5, and 15 minutes when sampling at
- * 5 second intervals.
- */
-
-static const fixpt_t cexp[3] = {
- 0.9200444146293232 * FSCALE, /* exp(-1/12) */
- 0.9834714538216174 * FSCALE, /* exp(-1/60) */
- 0.9944598480048967 * FSCALE, /* exp(-1/180) */
-};
+extern struct loadavg averunnable;
-
-static void uvm_loadav(struct loadavg *);
void uvm_total(struct vmtotal *);
void uvmexp_read(struct uvmexp *);
-/*
- * uvm_meter: calculate load average
- */
-void
-uvm_meter(void)
-{
- if ((gettime() % 5) == 0)
- uvm_loadav(&averunnable);
-}
-
-/*
- * uvm_loadav: compute a tenex style load average of a quantity on
- * 1, 5, and 15 minute intervals.
- */
-static void
-uvm_loadav(struct loadavg *avg)
-{
- extern struct cpuset sched_idle_cpus;
- CPU_INFO_ITERATOR cii;
- struct cpu_info *ci;
- u_int i, nrun = 0;
-
- CPU_INFO_FOREACH(cii, ci) {
- if (!cpuset_isset(&sched_idle_cpus, ci))
- nrun++;
- nrun += ci->ci_schedstate.spc_nrun;
- }
-
- for (i = 0; i < 3; i++) {
- avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
- nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
- }
-}
-
char malloc_conf[16];
/*