-/* $OpenBSD: kern_clockintr.c,v 1.37 2023/09/06 02:09:58 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.38 2023/09/06 02:33:18 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
*
* I Immutable after initialization.
*/
-u_int clockintr_flags; /* [I] global state + behavior flags */
+uint32_t clockintr_flags; /* [I] global state + behavior flags */
uint32_t hardclock_period; /* [I] hardclock period (ns) */
uint32_t statclock_avg; /* [I] average statclock period (ns) */
uint32_t statclock_min; /* [I] minimum statclock period (ns) */
* Initialize global state. Set flags and compute intervals.
*/
void
-clockintr_init(u_int flags)
+clockintr_init(uint32_t flags)
{
uint32_t half_avg, var;
struct cpu_info *ci = curcpu();
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
- u_int ogen;
+ uint32_t ogen;
if (cq->cq_dispatch != 0)
panic("%s: recursive dispatch", __func__);
}
void
-clockintr_stagger(struct clockintr *cl, uint64_t period, u_int n, u_int count)
+clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t n,
+ uint32_t count)
{
struct clockintr_queue *cq = cl->cl_queue;
struct clockintr_queue *cq;
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
- u_int gen;
+ uint32_t gen;
if (namelen != 1)
return ENOTDIR;
-/* $OpenBSD: clockintr.h,v 1.11 2023/09/06 02:09:58 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.12 2023/09/06 02:33:18 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */
void (*cl_func)(struct clockintr *, void *); /* [I] callback */
struct clockintr_queue *cl_queue; /* [I] parent queue */
- u_int cl_flags; /* [m] CLST_* flags */
+ uint32_t cl_flags; /* [m] CLST_* flags */
};
#define CLST_PENDING 0x00000001 /* scheduled to run */
struct clockintr *cq_statclock; /* [o] statclock handle */
struct intrclock cq_intrclock; /* [I] local interrupt clock */
struct clockintr_stat cq_stat; /* [o] dispatch statistics */
- volatile u_int cq_gen; /* [o] cq_stat update generation */
- volatile u_int cq_dispatch; /* [o] dispatch is running */
- u_int cq_flags; /* [I] CQ_* flags; see below */
+ volatile uint32_t cq_gen; /* [o] cq_stat update generation */
+ volatile uint32_t cq_dispatch; /* [o] dispatch is running */
+ uint32_t cq_flags; /* [I] CQ_* flags; see below */
};
#define CQ_INIT 0x00000001 /* clockintr_cpu_init() done */
void clockintr_cpu_init(const struct intrclock *);
int clockintr_dispatch(void *);
-void clockintr_init(u_int);
+void clockintr_init(uint32_t);
void clockintr_trigger(void);
/*
void clockintr_cancel(struct clockintr *);
struct clockintr *clockintr_establish(struct cpu_info *,
void (*)(struct clockintr *, void *));
-void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int);
+void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t);
void clockqueue_init(struct clockintr_queue *);
int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t);