uvm_meter(9) should not base its periodic uvm_loadav() call on the UTC
clock. It also no longer needs to periodically wake up proc0 because
proc0 doesn't do any work. schedcpu() itself may change or go away,
but as kettenis@ notes we probably can't completely remove the concept
of a "load average" from OpenBSD, given its long Unix heritage.
So, (1) remove the uvm_meter() call from schedcpu(), (2) make
uvm_meter() an independent timeout started alongside schedcpu() during
scheduler_start(), and (3) delete the vestigial periodic proc0 wakeup.
With input from deraadt@, kettenis@, and claudio@. deraadt@ cautions
that this change may confuse administrators who hold the load average
in high regard.
Thread: https://marc.info/?l=openbsd-tech&m=
168710929409153&w=2
general agreement with this direction from kettenis@
ok claudio@
-.\" $OpenBSD: uvm_init.9,v 1.5 2023/05/21 05:11:38 jmc Exp $
+.\" $OpenBSD: uvm_init.9,v 1.6 2023/06/20 16:30:30 cheloha Exp $
.\" $NetBSD: uvm.9,v 1.14 2000/06/29 06:08:44 mrg Exp $
.\"
.\" Copyright (c) 1998 Matthew R. Green
.\" XXX this manual sets nS to 1 or 0 in the description, to obtain
.\" synopsis-like function prototypes. any better way?
.\"
-.Dd $Mdocdate: May 21 2023 $
+.Dd $Mdocdate: June 20 2023 $
.Dt UVM_INIT 9
.Os
.Sh NAME
.Ft void
.Fn uvm_kernacc "caddr_t addr" "size_t len" "int rw"
.Ft void
-.Fn uvm_meter
+.Fn uvm_meter "void *"
.Ft int
.Fn uvm_sysctl "int *name" "u_int namelen" "void *oldp" "size_t *oldlenp" "void *newp " "size_t newlen" "struct proc *p"
.Ft int
.Pp
The
.Fn uvm_meter
-function calculates the load average and wakes up the swapper if necessary.
+function periodically recomputes the load average.
.Pp
The
.Fn uvm_sysctl
-/* $OpenBSD: sched_bsd.c,v 1.74 2023/02/04 19:33:03 cheloha Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.75 2023/06/20 16:30:30 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
}
SCHED_UNLOCK(s);
}
- uvm_meter();
wakeup(&lbolt);
timeout_add_sec(to, 1);
}
rrticks_init = hz / 10;
schedcpu(&schedcpu_to);
+ uvm_meter(NULL);
#ifndef SMALL_KERNEL
if (perfpolicy == PERFPOL_AUTO)
-/* $OpenBSD: uvm_extern.h,v 1.168 2023/05/30 08:30:01 jsg Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.169 2023/06/20 16:30:30 cheloha Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
struct vmspace *uvmspace_share(struct process *);
int uvm_share(vm_map_t, vaddr_t, vm_prot_t,
vm_map_t, vaddr_t, vsize_t);
-void uvm_meter(void);
+void uvm_meter(void *);
int uvm_sysctl(int *, u_int, void *, size_t *,
void *, size_t, struct proc *);
struct vm_page *uvm_pagealloc(struct uvm_object *,
-/* $OpenBSD: uvm_meter.c,v 1.42 2020/12/28 14:01:23 mpi Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.43 2023/06/20 16:30:30 cheloha Exp $ */
/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
/*
int maxslp = MAXSLP; /* patchable ... */
struct loadavg averunnable;
+#define UVM_METER_INTVL 5
+struct timeout uvm_meter_to = TIMEOUT_INITIALIZER(uvm_meter, NULL);
+
/*
* constants for averages over 1, 5, and 15 minutes when sampling at
* 5 second intervals.
void uvmexp_read(struct uvmexp *);
/*
- * uvm_meter: calculate load average and wake up the swapper (if needed)
+ * uvm_meter: recompute load averages
*/
void
-uvm_meter(void)
+uvm_meter(void *unused)
{
- if ((gettime() % 5) == 0)
- uvm_loadav(&averunnable);
- if (proc0.p_slptime > (maxslp / 2))
- wakeup(&proc0);
+ timeout_add_sec(&uvm_meter_to, UVM_METER_INTVL);
+ uvm_loadav(&averunnable);
}
/*