-/* $OpenBSD: kern_clockintr.c,v 1.3 2023/02/26 23:00:42 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.4 2023/03/09 03:50:38 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
+#include <sys/queue.h>
#include <sys/stdint.h>
#include <sys/sysctl.h>
#include <sys/time.h>
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *);
void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *);
+uint64_t clockqueue_next(const struct clockintr_queue *);
uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
/*
KASSERT(ISSET(clockintr_flags, CL_INIT));
if (!ISSET(cq->cq_flags, CL_CPU_INIT)) {
- cq->cq_next = 0;
+ TAILQ_INIT(&cq->cq_est);
+ TAILQ_INIT(&cq->cq_pend);
cq->cq_hardclock = clockintr_establish(cq, clockintr_hardclock);
if (cq->cq_hardclock == NULL)
panic("%s: failed to establish hardclock", __func__);
{
uint64_t lateness, run = 0, start;
struct cpu_info *ci = curcpu();
+ struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
u_int ogen;
KASSERT(ISSET(cq->cq_flags, CL_CPU_INIT));
/*
- * If we arrived too early we have nothing to do.
+ * If nothing is scheduled or we arrived too early, we have
+ * nothing to do.
*/
start = nsecuptime();
cq->cq_uptime = start;
- if (cq->cq_uptime < cq->cq_next)
- goto done;
- lateness = start - cq->cq_next;
+ if (TAILQ_EMPTY(&cq->cq_pend))
+ goto stats;
+ if (cq->cq_uptime < clockqueue_next(cq))
+ goto rearm;
+ lateness = start - clockqueue_next(cq);
/*
* Dispatch expired events.
*/
-again:
- /* hardclock */
- if (cq->cq_hardclock->cl_expiration <= cq->cq_uptime) {
- cq->cq_hardclock->cl_func(cq->cq_hardclock, frame);
- run++;
- }
-
- /* statclock */
- if (cq->cq_statclock->cl_expiration <= cq->cq_uptime) {
- cq->cq_statclock->cl_func(cq->cq_statclock, frame);
- run++;
- }
-
- /* schedclock */
- if (ISSET(clockintr_flags, CL_SCHEDCLOCK)) {
- if (cq->cq_schedclock->cl_expiration <= cq->cq_uptime) {
- cq->cq_schedclock->cl_func(cq->cq_schedclock, frame);
- run++;
+ for (;;) {
+ cl = TAILQ_FIRST(&cq->cq_pend);
+ if (cl == NULL)
+ break;
+ if (cq->cq_uptime < cl->cl_expiration) {
+ /* Double-check the time before giving up. */
+ cq->cq_uptime = nsecuptime();
+ if (cq->cq_uptime < cl->cl_expiration)
+ break;
}
- }
+ TAILQ_REMOVE(&cq->cq_pend, cl, cl_plink);
+ CLR(cl->cl_flags, CLST_PENDING);
+ cq->cq_running = cl;
- /* Run the dispatch again if the next event has already expired. */
- cq->cq_next = cq->cq_hardclock->cl_expiration;
- if (cq->cq_statclock->cl_expiration < cq->cq_next)
- cq->cq_next = cq->cq_statclock->cl_expiration;
- if (ISSET(clockintr_flags, CL_SCHEDCLOCK)) {
- if (cq->cq_schedclock->cl_expiration < cq->cq_next)
- cq->cq_next = cq->cq_schedclock->cl_expiration;
+ cl->cl_func(cl, frame);
+
+ cq->cq_running = NULL;
+ run++;
}
- cq->cq_uptime = nsecuptime();
- if (cq->cq_next <= cq->cq_uptime)
- goto again;
/*
* Dispatch complete.
*/
-done:
+rearm:
/* Rearm the interrupt clock if we have one. */
- if (ISSET(cq->cq_flags, CL_CPU_INTRCLOCK))
- intrclock_rearm(&cq->cq_intrclock, cq->cq_next - cq->cq_uptime);
-
+ if (ISSET(cq->cq_flags, CL_CPU_INTRCLOCK)) {
+ if (!TAILQ_EMPTY(&cq->cq_pend)) {
+ intrclock_rearm(&cq->cq_intrclock,
+ clockqueue_next(cq) - cq->cq_uptime);
+ }
+ }
+stats:
/* Update our stats. */
ogen = cq->cq_gen;
cq->cq_gen = 0;
cq->cq_stat.cs_lateness += lateness;
cq->cq_stat.cs_prompt++;
cq->cq_stat.cs_run += run;
- } else {
+ } else if (!TAILQ_EMPTY(&cq->cq_pend)) {
cq->cq_stat.cs_early++;
- cq->cq_stat.cs_earliness += cq->cq_next - cq->cq_uptime;
- }
+ cq->cq_stat.cs_earliness += clockqueue_next(cq) - cq->cq_uptime;
+ } else
+ cq->cq_stat.cs_spurious++;
membar_producer();
cq->cq_gen = MAX(1, ogen + 1);
uint64_t
clockintr_advance(struct clockintr *cl, uint64_t period)
{
- return nsec_advance(&cl->cl_expiration, period,
- cl->cl_queue->cq_uptime);
+ uint64_t count, expiration;
+
+ expiration = cl->cl_expiration;
+ count = nsec_advance(&expiration, period, cl->cl_queue->cq_uptime);
+ clockintr_schedule(cl, expiration);
+ return count;
}
struct clockintr *
return NULL;
cl->cl_func = func;
cl->cl_queue = cq;
+ TAILQ_INSERT_TAIL(&cq->cq_est, cl, cl_elink);
return cl;
}
void
clockintr_schedule(struct clockintr *cl, uint64_t expiration)
{
+ struct clockintr *elm;
+ struct clockintr_queue *cq = cl->cl_queue;
+
+ if (ISSET(cl->cl_flags, CLST_PENDING)) {
+ TAILQ_REMOVE(&cq->cq_pend, cl, cl_plink);
+ CLR(cl->cl_flags, CLST_PENDING);
+ }
+
cl->cl_expiration = expiration;
+ TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink) {
+ if (cl->cl_expiration < elm->cl_expiration)
+ break;
+ }
+ if (elm == NULL)
+ TAILQ_INSERT_TAIL(&cq->cq_pend, cl, cl_plink);
+ else
+ TAILQ_INSERT_BEFORE(elm, cl, cl_plink);
+ SET(cl->cl_flags, CLST_PENDING);
}
/*
statclock(frame);
}
+uint64_t
+clockqueue_next(const struct clockintr_queue *cq)
+{
+ return TAILQ_FIRST(&cq->cq_pend)->cl_expiration;
+}
+
/*
* Advance *next in increments of period until it exceeds now.
* Returns the number of increments *next was advanced.
sum.cs_lateness += tmp.cs_lateness;
sum.cs_prompt += tmp.cs_prompt;
sum.cs_run += tmp.cs_run;
+ sum.cs_spurious += tmp.cs_spurious;
}
return sysctl_rdstruct(oldp, oldlenp, newp, &sum, sizeof sum);
default:
void
db_show_clockintr_cpu(struct cpu_info *ci)
{
+ struct clockintr *elm;
struct clockintr_queue *cq = &ci->ci_queue;
u_int cpu = CPU_INFO_UNIT(ci);
- db_show_clockintr(cq->cq_hardclock, cpu);
- db_show_clockintr(cq->cq_statclock, cpu);
- if (cq->cq_schedclock != NULL)
- db_show_clockintr(cq->cq_schedclock, cpu);
+ if (cq->cq_running != NULL)
+ db_show_clockintr(cq->cq_running, cpu);
+ TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink)
+ db_show_clockintr(elm, cpu);
+ TAILQ_FOREACH(elm, &cq->cq_est, cl_elink) {
+ if (!ISSET(elm->cl_flags, CLST_PENDING))
+ db_show_clockintr(elm, cpu);
+ }
}
void
-/* $OpenBSD: clockintr.h,v 1.2 2023/02/26 23:00:42 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.3 2023/03/09 03:50:38 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
uint64_t cs_lateness; /* total lateness (ns) */
uint64_t cs_prompt; /* number of prompt dispatch calls */
uint64_t cs_run; /* number of events dispatched */
+ uint64_t cs_spurious; /* number of spurious dispatch calls */
};
#ifdef _KERNEL
+#include <sys/queue.h>
+
/*
* Platform API
*/
struct clockintr_queue;
struct clockintr {
uint64_t cl_expiration; /* [o] dispatch time */
+ TAILQ_ENTRY(clockintr) cl_elink; /* [o] cq_est glue */
+ TAILQ_ENTRY(clockintr) cl_plink; /* [o] cq_pend glue */
void (*cl_func)(struct clockintr *, void *); /* [I] callback */
struct clockintr_queue *cl_queue; /* [I] parent queue */
+ u_int cl_flags; /* [o] CLST_* flags */
};
+#define CLST_PENDING 0x00000001 /* scheduled to run */
+
/*
* Per-CPU clock interrupt state.
*
*/
struct clockintr_queue {
uint64_t cq_uptime; /* [o] cached uptime */
- uint64_t cq_next; /* [o] next event expiration */
+ TAILQ_HEAD(, clockintr) cq_est; /* [o] established clockintr list */
+ TAILQ_HEAD(, clockintr) cq_pend;/* [o] pending clockintr list */
+ struct clockintr *cq_running; /* [o] running clockintr */
struct clockintr *cq_hardclock; /* [o] hardclock handle */
struct clockintr *cq_schedclock;/* [o] schedclock handle, if any */
struct clockintr *cq_statclock; /* [o] statclock handle */