-/* $OpenBSD: kern_clockintr.c,v 1.69 2024/02/25 18:17:11 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.70 2024/02/25 19:15:50 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
void clockintr_cancel_locked(struct clockintr *);
void clockintr_hardclock(struct clockrequest *, void *, void *);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
-void clockqueue_intrclock_install(struct clockintr_queue *,
+void clockqueue_intrclock_install(struct clockqueue *,
const struct intrclock *);
-void clockqueue_intrclock_reprogram(struct clockintr_queue *);
-uint64_t clockqueue_next(const struct clockintr_queue *);
-void clockqueue_pend_delete(struct clockintr_queue *, struct clockintr *);
-void clockqueue_pend_insert(struct clockintr_queue *, struct clockintr *,
+void clockqueue_intrclock_reprogram(struct clockqueue *);
+uint64_t clockqueue_next(const struct clockqueue *);
+void clockqueue_pend_delete(struct clockqueue *, struct clockintr *);
+void clockqueue_pend_insert(struct clockqueue *, struct clockintr *,
uint64_t);
void intrclock_rearm(struct intrclock *, uint64_t);
void intrclock_trigger(struct intrclock *);
{
uint64_t multiplier = 0;
struct cpu_info *ci = curcpu();
- struct clockintr_queue *cq = &ci->ci_queue;
+ struct clockqueue *cq = &ci->ci_queue;
struct schedstate_percpu *spc = &ci->ci_schedstate;
int reset_cq_intrclock = 0;
if (ic != NULL)
clockqueue_intrclock_install(cq, ic);
- /* TODO: Remove this from struct clockintr_queue. */
+ /* TODO: Remove this from struct clockqueue. */
if (CPU_IS_PRIMARY(ci) && cq->cq_hardclock.cl_expiration == 0) {
clockintr_bind(&cq->cq_hardclock, ci, clockintr_hardclock,
NULL);
void
clockintr_trigger(void)
{
- struct clockintr_queue *cq = &curcpu()->ci_queue;
+ struct clockqueue *cq = &curcpu()->ci_queue;
KASSERT(ISSET(cq->cq_flags, CQ_INIT));
uint64_t lateness, run = 0, start;
struct cpu_info *ci = curcpu();
struct clockintr *cl;
- struct clockintr_queue *cq = &ci->ci_queue;
+ struct clockqueue *cq = &ci->ci_queue;
struct clockrequest *request = &cq->cq_request;
void *arg;
void (*func)(struct clockrequest *, void *, void *);
clockintr_advance(struct clockintr *cl, uint64_t period)
{
uint64_t count, expiration;
- struct clockintr_queue *cq = cl->cl_queue;
+ struct clockqueue *cq = cl->cl_queue;
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
uint64_t
clockrequest_advance(struct clockrequest *cr, uint64_t period)
{
- struct clockintr_queue *cq = cr->cr_queue;
+ struct clockqueue *cq = cr->cr_queue;
KASSERT(cr == &cq->cq_request);
uint32_t mask)
{
uint64_t count = 0;
- struct clockintr_queue *cq = cr->cr_queue;
+ struct clockqueue *cq = cr->cr_queue;
uint32_t off;
KASSERT(cr == &cq->cq_request);
void
clockintr_cancel(struct clockintr *cl)
{
- struct clockintr_queue *cq = cl->cl_queue;
+ struct clockqueue *cq = cl->cl_queue;
mtx_enter(&cq->cq_mtx);
clockintr_cancel_locked(cl);
void
clockintr_cancel_locked(struct clockintr *cl)
{
- struct clockintr_queue *cq = cl->cl_queue;
+ struct clockqueue *cq = cl->cl_queue;
int was_next;
MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
clockintr_bind(struct clockintr *cl, struct cpu_info *ci,
void (*func)(struct clockrequest *, void *, void *), void *arg)
{
- struct clockintr_queue *cq = &ci->ci_queue;
+ struct clockqueue *cq = &ci->ci_queue;
splassert(IPL_NONE);
KASSERT(cl->cl_queue == NULL);
void
clockintr_unbind(struct clockintr *cl, uint32_t flags)
{
- struct clockintr_queue *cq = cl->cl_queue;
+ struct clockqueue *cq = cl->cl_queue;
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
void
clockintr_schedule(struct clockintr *cl, uint64_t expiration)
{
- struct clockintr_queue *cq = cl->cl_queue;
+ struct clockqueue *cq = cl->cl_queue;
mtx_enter(&cq->cq_mtx);
clockintr_schedule_locked(cl, expiration);
void
clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration)
{
- struct clockintr_queue *cq = cl->cl_queue;
+ struct clockqueue *cq = cl->cl_queue;
MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t numer,
uint32_t denom)
{
- struct clockintr_queue *cq = cl->cl_queue;
+ struct clockqueue *cq = cl->cl_queue;
KASSERT(numer < denom);
}
void
-clockqueue_init(struct clockintr_queue *cq)
+clockqueue_init(struct clockqueue *cq)
{
if (ISSET(cq->cq_flags, CQ_INIT))
return;
}
void
-clockqueue_intrclock_install(struct clockintr_queue *cq,
+clockqueue_intrclock_install(struct clockqueue *cq,
const struct intrclock *ic)
{
mtx_enter(&cq->cq_mtx);
}
uint64_t
-clockqueue_next(const struct clockintr_queue *cq)
+clockqueue_next(const struct clockqueue *cq)
{
MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
return TAILQ_FIRST(&cq->cq_pend)->cl_expiration;
}
void
-clockqueue_pend_delete(struct clockintr_queue *cq, struct clockintr *cl)
+clockqueue_pend_delete(struct clockqueue *cq, struct clockintr *cl)
{
MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
KASSERT(ISSET(cl->cl_flags, CLST_PENDING));
}
void
-clockqueue_pend_insert(struct clockintr_queue *cq, struct clockintr *cl,
+clockqueue_pend_insert(struct clockqueue *cq, struct clockintr *cl,
uint64_t expiration)
{
struct clockintr *elm;
}
void
-clockqueue_intrclock_reprogram(struct clockintr_queue *cq)
+clockqueue_intrclock_reprogram(struct clockqueue *cq)
{
uint64_t exp, now;
void *newp, size_t newlen)
{
struct clockintr_stat sum, tmp;
- struct clockintr_queue *cq;
+ struct clockqueue *cq;
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
uint32_t gen;
db_show_clockintr_cpu(struct cpu_info *ci)
{
struct clockintr *elm;
- struct clockintr_queue *cq = &ci->ci_queue;
+ struct clockqueue *cq = &ci->ci_queue;
u_int cpu = CPU_INFO_UNIT(ci);
if (cq->cq_running != NULL)
-/* $OpenBSD: clockintr.h,v 1.28 2024/02/25 18:29:26 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.29 2024/02/25 19:15:50 cheloha Exp $ */
/*
* Copyright (c) 2020-2024 Scott Cheloha <cheloha@openbsd.org>
*
#include <sys/mutex.h>
#include <sys/queue.h>
+struct clockqueue;
struct clockrequest;
-struct clockintr_queue;
struct cpu_info;
/*
TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */
void *cl_arg; /* [I] argument */
void (*cl_func)(struct clockrequest *, void*, void*); /* [I] callback */
- struct clockintr_queue *cl_queue; /* [I] parent queue */
+ struct clockqueue *cl_queue; /* [I] parent queue */
uint32_t cl_flags; /* [m] CLST_* flags */
};
*/
struct clockrequest {
uint64_t cr_expiration; /* [o] copy of dispatch time */
- struct clockintr_queue *cr_queue; /* [I] enclosing queue */
+ struct clockqueue *cr_queue; /* [I] enclosing queue */
uint32_t cr_flags; /* [o] CR_* flags */
};
* m Per-queue mutex (cq_mtx).
* o Owned by a single CPU.
*/
-struct clockintr_queue {
+struct clockqueue {
struct clockrequest cq_request; /* [o] callback request object */
struct mutex cq_mtx; /* [a] per-queue mutex */
uint64_t cq_uptime; /* [o] cached uptime */
void clockintr_unbind(struct clockintr *, uint32_t);
uint64_t clockrequest_advance(struct clockrequest *, uint64_t);
uint64_t clockrequest_advance_random(struct clockrequest *, uint64_t, uint32_t);
-void clockqueue_init(struct clockintr_queue *);
+void clockqueue_init(struct clockqueue *);
int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t);
#endif /* _KERNEL */