-/* $OpenBSD: kern_acct.c,v 1.48 2024/04/13 23:44:11 jsg Exp $ */
+/* $OpenBSD: kern_acct.c,v 1.49 2024/07/08 13:17:11 claudio Exp $ */
/* $NetBSD: kern_acct.c,v 1.42 1996/02/04 02:15:12 christos Exp $ */
/*-
struct acct acct;
struct process *pr = p->p_p;
struct rusage *r;
+ struct tusage tu;
struct timespec booted, elapsed, realstart, st, tmp, uptime, ut;
int t;
struct vnode *vp;
memcpy(acct.ac_comm, pr->ps_comm, sizeof acct.ac_comm);
/* (2) The amount of user and system time that was used */
- calctsru(&pr->ps_tu, &ut, &st, NULL);
+ tuagg_get_process(&tu, pr);
+ calctsru(&tu, &ut, &st, NULL);
acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_nsec);
acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_nsec);
else
acct.ac_tty = -1;
- /* (8) The boolean flags that tell how process terminated or misbehaved. */
+ /* (8) The flags that tell how process terminated or misbehaved. */
acct.ac_flag = pr->ps_acflag;
/* Extensions */
-/* $OpenBSD: kern_clock.c,v 1.123 2024/02/12 22:07:33 cheloha Exp $ */
+/* $OpenBSD: kern_clock.c,v 1.124 2024/07/08 13:17:11 claudio Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
* Came from user mode; CPU was in user state.
* If this process is being profiled record the tick.
*/
- p->p_uticks += count;
+ tu_enter(&p->p_tu);
+ p->p_tu.tu_uticks += count;
+ tu_leave(&p->p_tu);
if (pr->ps_nice > NZERO)
spc->spc_cp_time[CP_NICE] += count;
else
* in ``non-process'' (i.e., interrupt) work.
*/
if (CLKF_INTR(frame)) {
- if (p != NULL)
- p->p_iticks += count;
+ if (p != NULL) {
+ tu_enter(&p->p_tu);
+ p->p_tu.tu_iticks += count;
+ tu_leave(&p->p_tu);
+ }
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_INTR] += count;
} else if (p != NULL && p != spc->spc_idleproc) {
- p->p_sticks += count;
+ tu_enter(&p->p_tu);
+ p->p_tu.tu_sticks += count;
+ tu_leave(&p->p_tu);
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_SYS] += count;
} else
-/* $OpenBSD: kern_exec.c,v 1.255 2024/04/02 08:39:16 deraadt Exp $ */
+/* $OpenBSD: kern_exec.c,v 1.256 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
/* reset CPU time usage for the thread, but not the process */
timespecclear(&p->p_tu.tu_runtime);
p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
+ p->p_tu.tu_gen = 0;
memset(p->p_name, 0, sizeof p->p_name);
-/* $OpenBSD: kern_exit.c,v 1.223 2024/07/08 09:15:05 claudio Exp $ */
+/* $OpenBSD: kern_exit.c,v 1.224 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/*
{
struct process *pr, *qr, *nqr;
struct rusage *rup;
- struct timespec ts;
+ struct timespec ts, pts;
atomic_setbits_int(&p->p_flag, P_WEXIT);
wakeup(&pr->ps_singlecnt);
}
+ /* proc is off ps_threads list so update accounting of process now */
+ nanouptime(&ts);
+ if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <))
+ timespecclear(&pts);
+ else
+ timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &pts);
+ tu_enter(&p->p_tu);
+ timespecadd(&p->p_tu.tu_runtime, &pts, &p->p_tu.tu_runtime);
+ tu_leave(&p->p_tu);
+ /* adjust spc_runtime to not double account the runtime from above */
+ curcpu()->ci_schedstate.spc_runtime = ts;
+ tuagg_add_process(p->p_p, p);
+
if ((p->p_flag & P_THREAD) == 0) {
/* main thread gotta wait because it has the pid, et al */
while (pr->ps_threadcnt + pr->ps_exitcnt > 1)
/* add thread's accumulated rusage into the process's total */
ruadd(rup, &p->p_ru);
- nanouptime(&ts);
- if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <))
- timespecclear(&ts);
- else
- timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &ts);
- SCHED_LOCK();
- tuagg_locked(pr, p, &ts);
- SCHED_UNLOCK();
/*
* clear %cpu usage during swap
if ((p->p_flag & P_THREAD) == 0) {
/*
* Final thread has died, so add on our children's rusage
- * and calculate the total times
+ * and calculate the total times.
*/
calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
ruadd(rup, &pr->ps_cru);
}
}
- /* just a thread? detach it from its process */
+ /* just a thread? check if last one standing. */
if (p->p_flag & P_THREAD) {
/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
mtx_enter(&pr->ps_mtx);
void
exit2(struct proc *p)
{
+ /* account the remainder of time spent in exit1() */
+ mtx_enter(&p->p_p->ps_mtx);
+ tuagg_add_process(p->p_p, p);
+ mtx_leave(&p->p_p->ps_mtx);
+
mtx_enter(&deadproc_mutex);
LIST_INSERT_HEAD(&deadproc, p, p_hash);
mtx_leave(&deadproc_mutex);
-/* $OpenBSD: kern_proc.c,v 1.98 2024/05/20 10:32:20 claudio Exp $ */
+/* $OpenBSD: kern_proc.c,v 1.99 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_proc.c,v 1.14 1996/02/09 18:59:41 christos Exp $ */
/*
(*pr)(" process=%p user=%p, vmspace=%p\n",
p->p_p, p->p_addr, p->p_vmspace);
(*pr)(" estcpu=%u, cpticks=%d, pctcpu=%u.%u, "
- "user=%u, sys=%u, intr=%u\n",
+ "user=%llu, sys=%llu, intr=%llu\n",
p->p_estcpu, p->p_cpticks, p->p_pctcpu / 100, p->p_pctcpu % 100,
- p->p_uticks, p->p_sticks, p->p_iticks);
+ p->p_tu.tu_uticks, p->p_tu.tu_sticks, p->p_tu.tu_iticks);
}
#include <machine/db_machdep.h>
-/* $OpenBSD: kern_resource.c,v 1.84 2024/06/03 12:48:25 claudio Exp $ */
+/* $OpenBSD: kern_resource.c,v 1.85 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
struct plimit *lim_write_begin(void);
void lim_write_commit(struct plimit *);
-void tuagg_sub(struct tusage *, struct proc *, const struct timespec *);
+void tuagg_sumup(struct tusage *, const struct tusage *);
/*
* Patchable maximum data and stack limits.
return (error);
}
+/* Add the counts from *from to *tu, ensuring a consistent read of *from. */
void
-tuagg_sub(struct tusage *tup, struct proc *p, const struct timespec *ts)
+tuagg_sumup(struct tusage *tu, const struct tusage *from)
{
- if (ts != NULL)
- timespecadd(&tup->tu_runtime, ts, &tup->tu_runtime);
- tup->tu_uticks += p->p_uticks;
- tup->tu_sticks += p->p_sticks;
- tup->tu_iticks += p->p_iticks;
+ struct tusage tmp;
+ uint64_t enter, leave;
+
+ enter = from->tu_gen;
+ for (;;) {
+ /* the generation number is odd during an update */
+ while (enter & 1) {
+ CPU_BUSY_CYCLE();
+ enter = from->tu_gen;
+ }
+
+ membar_consumer();
+ tmp = *from;
+ membar_consumer();
+ leave = from->tu_gen;
+
+ if (enter == leave)
+ break;
+ enter = leave;
+ }
+
+ tu->tu_uticks += tmp.tu_uticks;
+ tu->tu_sticks += tmp.tu_sticks;
+ tu->tu_iticks += tmp.tu_iticks;
+ timespecadd(&tu->tu_runtime, &tmp.tu_runtime, &tu->tu_runtime);
}
-/*
- * Aggregate a single thread's immediate time counts into the running
- * totals for the thread and process
- */
void
-tuagg_locked(struct process *pr, struct proc *p, const struct timespec *ts)
+tuagg_get_proc(struct tusage *tu, struct proc *p)
{
- tuagg_sub(&pr->ps_tu, p, ts);
- tuagg_sub(&p->p_tu, p, ts);
- p->p_uticks = 0;
- p->p_sticks = 0;
- p->p_iticks = 0;
+ memset(tu, 0, sizeof(*tu));
+ tuagg_sumup(tu, &p->p_tu);
}
void
-tuagg(struct process *pr, struct proc *p)
+tuagg_get_process(struct tusage *tu, struct process *pr)
{
- SCHED_LOCK();
- tuagg_locked(pr, p, NULL);
- SCHED_UNLOCK();
+ struct proc *q;
+
+ memset(tu, 0, sizeof(*tu));
+
+ mtx_enter(&pr->ps_mtx);
+ tuagg_sumup(tu, &pr->ps_tu);
+ /* add on all living threads */
+ TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)
+ tuagg_sumup(tu, &q->p_tu);
+ mtx_leave(&pr->ps_mtx);
+}
+
+/*
+ * Update the process ps_tu usage with the values from proc p while
+ * doing so the times for proc p are reset.
+ * This requires that p is either curproc or SDEAD and that the
+ * IPL is higher than IPL_STATCLOCK. ps_mtx uses IPL_HIGH so
+ * this should always be the case.
+ */
+void
+tuagg_add_process(struct process *pr, struct proc *p)
+{
+ MUTEX_ASSERT_LOCKED(&pr->ps_mtx);
+ splassert(IPL_STATCLOCK);
+ KASSERT(curproc == p || p->p_stat == SDEAD);
+
+ tu_enter(&pr->ps_tu);
+ tuagg_sumup(&pr->ps_tu, &p->p_tu);
+ tu_leave(&pr->ps_tu);
+
+ /* Now reset CPU time usage for the thread. */
+ timespecclear(&p->p_tu.tu_runtime);
+ p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
}
/*
{
struct process *pr = p->p_p;
struct proc *q;
+ struct tusage tu = { 0 };
KERNEL_ASSERT_LOCKED();
*rup = *pr->ps_ru;
else
memset(rup, 0, sizeof(*rup));
+ tuagg_sumup(&tu, &pr->ps_tu);
/* add on all living threads */
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
ruadd(rup, &q->p_ru);
- tuagg(pr, q);
+ tuagg_sumup(&tu, &q->p_tu);
}
- calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
+ calcru(&tu, &rup->ru_utime, &rup->ru_stime, NULL);
break;
case RUSAGE_THREAD:
-/* $OpenBSD: kern_sched.c,v 1.96 2024/06/03 12:48:25 claudio Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.97 2024/07/08 13:17:12 claudio Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
sched_exit(struct proc *p)
{
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
+ struct timespec ts;
LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
+ /* update the tu_runtime one last time */
+ nanouptime(&ts);
+ if (timespeccmp(&ts, &spc->spc_runtime, <))
+ timespecclear(&ts);
+ else
+ timespecsub(&ts, &spc->spc_runtime, &ts);
+
+ /* add the time counts for this thread */
+ tu_enter(&p->p_tu);
+ timespecadd(&p->p_tu.tu_runtime, &ts, &p->p_tu.tu_runtime);
+ tu_leave(&p->p_tu);
+
KERNEL_ASSERT_LOCKED();
sched_toidle();
}
-/* $OpenBSD: kern_sysctl.c,v 1.427 2024/04/12 16:07:09 bluhm Exp $ */
+/* $OpenBSD: kern_sysctl.c,v 1.428 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
struct tty *tp;
struct vmspace *vm = pr->ps_vmspace;
struct timespec booted, st, ut, utc;
+ struct tusage tu;
int isthread;
isthread = p != NULL;
- if (!isthread)
+ if (!isthread) {
p = pr->ps_mainproc; /* XXX */
+ tuagg_get_process(&tu, pr);
+ } else
+ tuagg_get_proc(&tu, p);
FILL_KPROC(ki, strlcpy, p, pr, pr->ps_ucred, pr->ps_pgrp,
- p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, isthread,
+ p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, &tu, isthread,
show_pointers);
/* stuff that's too painful to generalize into the macros */
if ((pr->ps_flags & PS_ZOMBIE) == 0) {
if ((pr->ps_flags & PS_EMBRYO) == 0 && vm != NULL)
ki->p_vm_rssize = vm_resident_count(vm);
- calctsru(isthread ? &p->p_tu : &pr->ps_tu, &ut, &st, NULL);
+ calctsru(&tu, &ut, &st, NULL);
ki->p_uutime_sec = ut.tv_sec;
ki->p_uutime_usec = ut.tv_nsec/1000;
ki->p_ustime_sec = st.tv_sec;
-/* $OpenBSD: kern_time.c,v 1.167 2023/10/17 00:04:02 cheloha Exp $ */
+/* $OpenBSD: kern_time.c,v 1.168 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
#include <sys/rwlock.h>
#include <sys/proc.h>
#include <sys/ktrace.h>
+#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/stdint.h>
#include <sys/pledge.h>
int
clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
{
+ struct tusage tu;
struct proc *q;
int error = 0;
break;
case CLOCK_PROCESS_CPUTIME_ID:
nanouptime(tp);
+ tuagg_get_process(&tu, p->p_p);
timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
- timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
+ timespecadd(tp, &tu.tu_runtime, tp);
break;
case CLOCK_THREAD_CPUTIME_ID:
nanouptime(tp);
+ tuagg_get_proc(&tu, p);
timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
- timespecadd(tp, &p->p_tu.tu_runtime, tp);
+ timespecadd(tp, &tu.tu_runtime, tp);
break;
default:
/* check for clock from pthread_getcpuclockid() */
-/* $OpenBSD: sched_bsd.c,v 1.93 2024/06/03 12:48:25 claudio Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.94 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
struct proc *p = curproc;
struct proc *nextproc;
- struct process *pr = p->p_p;
struct timespec ts;
int oldipl;
#ifdef MULTIPROCESSOR
} else {
timespecsub(&ts, &spc->spc_runtime, &ts);
}
-
- /* add the time counts for this thread to the process's total */
- tuagg_locked(pr, p, &ts);
+ tu_enter(&p->p_tu);
+ timespecadd(&p->p_tu.tu_runtime, &ts, &p->p_tu.tu_runtime);
+ tu_leave(&p->p_tu);
/* Stop any optional clock interrupts. */
if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
-/* $OpenBSD: tty.c,v 1.176 2022/08/14 01:58:28 jsg Exp $ */
+/* $OpenBSD: tty.c,v 1.177 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: tty.c,v 1.68.4.2 1996/06/06 16:04:52 thorpej Exp $ */
/*-
{
struct process *pr, *pickpr;
struct proc *p, *pick;
+ struct tusage tu;
struct timespec utime, stime;
int tmp;
pickpr->ps_vmspace != NULL)
rss = vm_resident_count(pickpr->ps_vmspace);
- calctsru(&pickpr->ps_tu, &utime, &stime, NULL);
+ tuagg_get_process(&tu, pickpr);
+ calctsru(&tu, &utime, &stime, NULL);
/* Round up and print user time. */
utime.tv_nsec += 5000000;
-/* $OpenBSD: proc.h,v 1.361 2024/05/20 10:32:20 claudio Exp $ */
+/* $OpenBSD: proc.h,v 1.362 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */
/*-
/*
* time usage: accumulated times in ticks
- * Once a second, each thread's immediate counts (p_[usi]ticks) are
- * accumulated into these.
+ * Each thread is immediatly accumulated here. For processes only the
+ * time of exited threads is accumulated and to get the proper process
+ * time usage tuagg_get_process() needs to be called.
+ * Accounting of threads is done lockless by curproc using the tu_gen
+ * generation counter. Code should use tu_enter() and tu_leave() for this.
+ * The process ps_tu structure is locked by the ps_mtx.
*/
struct tusage {
- struct timespec tu_runtime; /* Realtime. */
+ uint64_t tu_gen; /* generation counter */
uint64_t tu_uticks; /* Statclock hits in user mode. */
uint64_t tu_sticks; /* Statclock hits in system mode. */
uint64_t tu_iticks; /* Statclock hits processing intr. */
+ struct timespec tu_runtime; /* Realtime. */
};
/*
struct ptrace_state *ps_ptstat;/* Ptrace state */
struct rusage *ps_ru; /* sum of stats for dead threads. */
- struct tusage ps_tu; /* accumulated times. */
+ struct tusage ps_tu; /* [m] accumul times of dead threads. */
struct rusage ps_cru; /* sum of stats for reaped children */
struct itimerspec ps_timer[3]; /* [m] ITIMER_REAL timer */
/* [T] ITIMER_{VIRTUAL,PROF} timers */
const char *p_wmesg; /* [S] Reason for sleep. */
fixpt_t p_pctcpu; /* [S] %cpu for this thread */
u_int p_slptime; /* [S] Time since last blocked. */
- u_int p_uticks; /* Statclock hits in user mode. */
- u_int p_sticks; /* Statclock hits in system mode. */
- u_int p_iticks; /* Statclock hits processing intr. */
struct cpu_info * volatile p_cpu; /* [S] CPU we're running on. */
struct rusage p_ru; /* Statistics */
- struct tusage p_tu; /* accumulated times. */
+ struct tusage p_tu; /* [o] accumulated times. */
struct plimit *p_limit; /* [l] read ref. of p_p->ps_limit */
struct kcov_dev *p_kd; /* kcov device handle */
int cpuset_cardinality(struct cpuset *);
struct cpu_info *cpuset_first(struct cpuset *);
+static inline void
+tu_enter(struct tusage *tu)
+{
+ ++tu->tu_gen; /* make the generation number odd */
+ membar_producer();
+}
+
+static inline void
+tu_leave(struct tusage *tu)
+{
+ membar_producer();
+ ++tu->tu_gen; /* make the generation number even again */
+}
+
#endif /* _KERNEL */
#endif /* !_SYS_PROC_H_ */
-/* $OpenBSD: resourcevar.h,v 1.31 2023/10/17 00:04:02 cheloha Exp $ */
+/* $OpenBSD: resourcevar.h,v 1.32 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: resourcevar.h,v 1.12 1995/11/22 23:01:53 cgd Exp $ */
/*
void addupc_task(struct proc *, u_long, u_int);
struct clockrequest;
void profclock(struct clockrequest *, void *, void *);
-void tuagg_locked(struct process *, struct proc *, const struct timespec *);
-void tuagg(struct process *, struct proc *);
+void tuagg_add_process(struct process *, struct proc *);
struct tusage;
+void tuagg_get_proc(struct tusage *, struct proc *);
+void tuagg_get_process(struct tusage *, struct process *);
void calctsru(struct tusage *, struct timespec *, struct timespec *,
struct timespec *);
void calcru(struct tusage *, struct timeval *, struct timeval *,
void ruadd(struct rusage *, struct rusage *);
void rucheck(void *);
+
#endif
#endif /* !_SYS_RESOURCEVAR_H_ */
-/* $OpenBSD: sysctl.h,v 1.235 2023/10/01 15:58:12 krw Exp $ */
+/* $OpenBSD: sysctl.h,v 1.236 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: sysctl.h,v 1.16 1996/04/09 20:55:36 cgd Exp $ */
/*
#define _FILL_KPROC_MIN(a,b) (((a)<(b))?(a):(b))
#define FILL_KPROC(kp, copy_str, p, pr, uc, pg, paddr, \
- praddr, sess, vm, lim, sa, isthread, show_addresses) \
+ praddr, sess, vm, lim, sa, tu, isthread, show_addresses) \
do { \
memset((kp), 0, sizeof(*(kp))); \
\
\
(kp)->p_estcpu = (p)->p_estcpu; \
if (isthread) { \
- (kp)->p_rtime_sec = (p)->p_tu.tu_runtime.tv_sec; \
- (kp)->p_rtime_usec = (p)->p_tu.tu_runtime.tv_nsec/1000; \
(kp)->p_tid = (p)->p_tid + THREAD_PID_OFFSET; \
- (kp)->p_uticks = (p)->p_tu.tu_uticks; \
- (kp)->p_sticks = (p)->p_tu.tu_sticks; \
- (kp)->p_iticks = (p)->p_tu.tu_iticks; \
strlcpy((kp)->p_name, (p)->p_name, sizeof((kp)->p_name)); \
} else { \
- (kp)->p_rtime_sec = (pr)->ps_tu.tu_runtime.tv_sec; \
- (kp)->p_rtime_usec = (pr)->ps_tu.tu_runtime.tv_nsec/1000; \
(kp)->p_tid = -1; \
- (kp)->p_uticks = (pr)->ps_tu.tu_uticks; \
- (kp)->p_sticks = (pr)->ps_tu.tu_sticks; \
- (kp)->p_iticks = (pr)->ps_tu.tu_iticks; \
} \
+ (kp)->p_rtime_sec = (tu)->tu_runtime.tv_sec; \
+ (kp)->p_rtime_usec = (tu)->tu_runtime.tv_nsec/1000; \
+ (kp)->p_uticks = (tu)->tu_uticks; \
+ (kp)->p_sticks = (tu)->tu_sticks; \
+ (kp)->p_iticks = (tu)->tu_iticks; \
(kp)->p_cpticks = (p)->p_cpticks; \
\
if (show_addresses) \