From 4c63d5a8fde783b38668776a2ac0270a41fb0eeb Mon Sep 17 00:00:00 2001 From: cheloha Date: Tue, 6 Dec 2022 00:40:09 +0000 Subject: [PATCH] hppa: switch to clockintr - Remove hppa-specific clock interrupt scheduling bits from cpu_info. - Rename cpu_hardclock() to itmr_intr(); it doesn't exclusively run hardclock(9) anymore. - Wire up itmr_intrclock. hppa now has a randomized statclock(), stathz = hz. Patch help, testing, and review from kettenis@ (B2000) and miod@ (C3650). MP testing from guenther@ (dual-core J6700). ok mlarkin@ kettenis@, "Ship it?" guenther@ --- sys/arch/hppa/dev/clock.c | 177 ++++++++++++++++++++------------- sys/arch/hppa/dev/cpu.c | 13 +-- sys/arch/hppa/include/_types.h | 4 +- sys/arch/hppa/include/cpu.h | 7 +- 4 files changed, 120 insertions(+), 81 deletions(-) diff --git a/sys/arch/hppa/dev/clock.c b/sys/arch/hppa/dev/clock.c index b3ed1802f7b..3af77b25f37 100644 --- a/sys/arch/hppa/dev/clock.c +++ b/sys/arch/hppa/dev/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.32 2021/02/23 04:44:30 cheloha Exp $ */ +/* $OpenBSD: clock.c,v 1.33 2022/12/06 00:40:09 cheloha Exp $ */ /* * Copyright (c) 1998-2003 Michael Shalayeff @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include @@ -41,10 +43,15 @@ #include #include -u_long cpu_hzticks; +uint64_t itmr_nsec_cycle_ratio; +uint64_t itmr_nsec_max; -int cpu_hardclock(void *); u_int itmr_get_timecount(struct timecounter *); +int itmr_intr(void *); +void itmr_rearm(void *, uint64_t); +void itmr_trigger(void); +void itmr_trigger_masked(void); +void itmr_trigger_wrapper(void *); struct timecounter itmr_timecounter = { .tc_get_timecount = itmr_get_timecount, @@ -57,6 +64,11 @@ struct timecounter itmr_timecounter = { .tc_user = 0, }; +const struct intrclock itmr_intrclock = { + .ic_rearm = itmr_rearm, + .ic_trigger = itmr_trigger_wrapper +}; + extern todr_chip_handle_t todr_handle; struct todr_chip_handle pdc_todr; @@ -94,88 +106,43 @@ pdc_settime(struct todr_chip_handle *handle, struct timeval *tv) void cpu_initclocks(void) { - struct cpu_info *ci = curcpu(); - u_long __itmr; + uint64_t itmr_freq = PAGE0->mem_10msec * 100; pdc_todr.todr_gettime = pdc_gettime; pdc_todr.todr_settime = pdc_settime; todr_handle = &pdc_todr; - cpu_hzticks = (PAGE0->mem_10msec * 100) / hz; - - itmr_timecounter.tc_frequency = PAGE0->mem_10msec * 100; + itmr_timecounter.tc_frequency = itmr_freq; tc_init(&itmr_timecounter); - mfctl(CR_ITMR, __itmr); - ci->ci_itmr = __itmr; - __itmr += cpu_hzticks; - mtctl(__itmr, CR_ITMR); -} + stathz = hz; + profhz = stathz * 10; + clockintr_init(CL_RNDSTAT); -int -cpu_hardclock(void *v) -{ - struct cpu_info *ci = curcpu(); - u_long __itmr, delta, eta; - int wrap; - register_t eiem; + itmr_nsec_cycle_ratio = itmr_freq * (1ULL << 32) / 1000000000; + itmr_nsec_max = UINT64_MAX / itmr_nsec_cycle_ratio; - /* - * Invoke hardclock as many times as there has been cpu_hzticks - * ticks since the last interrupt. - */ - for (;;) { - mfctl(CR_ITMR, __itmr); - delta = __itmr - ci->ci_itmr; - if (delta >= cpu_hzticks) { - hardclock(v); - ci->ci_itmr += cpu_hzticks; - } else - break; - } + cpu_startclock(); +} - /* - * Program the next clock interrupt, making sure it will - * indeed happen in the future. This is done with interrupts - * disabled to avoid a possible race. - */ - eta = ci->ci_itmr + cpu_hzticks; - wrap = eta < ci->ci_itmr; /* watch out for a wraparound */ - __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); - __asm volatile("mtctl %r0, %cr15"); - mtctl(eta, CR_ITMR); - mfctl(CR_ITMR, __itmr); - /* - * If we were close enough to the next tick interrupt - * value, by the time we have programmed itmr, it might - * have passed the value, which would cause a complete - * cycle until the next interrupt occurs. On slow - * models, this would be a disaster (a complete cycle - * taking over two minutes on a 715/33). - * - * We expect that it will only be necessary to postpone - * the interrupt once. Thus, there are two cases: - * - We are expecting a wraparound: eta < cpu_itmr. - * itmr is in tracks if either >= cpu_itmr or < eta. - * - We are not wrapping: eta > cpu_itmr. - * itmr is in tracks if >= cpu_itmr and < eta (we need - * to keep the >= cpu_itmr test because itmr might wrap - * before eta does). - */ - if ((wrap && !(eta > __itmr || __itmr >= ci->ci_itmr)) || - (!wrap && !(eta > __itmr && __itmr >= ci->ci_itmr))) { - eta += cpu_hzticks; - mtctl(eta, CR_ITMR); - } - __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); +void +cpu_startclock(void) +{ + clockintr_cpu_init(&itmr_intrclock); + clockintr_trigger(); +} +int +itmr_intr(void *v) +{ + clockintr_dispatch(v); return (1); } void setstatclockrate(int newhz) { - /* nothing we can do */ + clockintr_setstatclockrate(newhz); } u_int @@ -186,3 +153,77 @@ itmr_get_timecount(struct timecounter *tc) mfctl(CR_ITMR, __itmr); return (__itmr); } + +/* + * Program the next clock interrupt, making sure it will + * indeed happen in the future. This is done with interrupts + * disabled to avoid a possible race. + */ +void +itmr_rearm(void *unused, uint64_t nsecs) +{ + uint32_t cycles, t0, t1, target; + register_t eiem, eirr; + + if (nsecs > itmr_nsec_max) + nsecs = itmr_nsec_max; + cycles = (nsecs * itmr_nsec_cycle_ratio) >> 32; + + eiem = hppa_intr_disable(); + mfctl(CR_ITMR, t0); + target = t0 + cycles; + mtctl(target, CR_ITMR); + mfctl(CR_ITMR, t1); + + /* + * If the interrupt isn't already pending we need to check if + * we missed. In general, we are checking whether ITMR had + * already passed the target value when we wrote the register. + * There are two cases. + * + * 1. If (t0 + cycles) did not overflow, we want t1 to be between + * t0 and target. If t0 <= t1 < target, we didn't miss. + * + * 2. If (t0 + cycles) overflowed, either t0 <= t1 or t1 < target + * are sufficient to show we didn't miss. + * + * Only try once. Fall back to itmr_trigger_masked() if we miss. + */ + mfctl(CR_EIRR, eirr); + if (!ISSET(eirr, 1U << 31)) { + if (t0 <= target) { + if (target <= t1 || t1 < t0) + itmr_trigger_masked(); + } else { + if (target <= t1 && t1 < t0) + itmr_trigger_masked(); + } + } + hppa_intr_enable(eiem); +} + +void +itmr_trigger(void) +{ + register_t eiem; + + eiem = hppa_intr_disable(); + itmr_trigger_masked(); + hppa_intr_enable(eiem); +} + +/* Trigger our own ITMR interrupt by setting EIR{0}. */ +void +itmr_trigger_masked(void) +{ + struct iomod *cpu = (struct iomod *)curcpu()->ci_hpa; + + cpu->io_eir = 0; + __asm volatile ("sync" ::: "memory"); +} + +void +itmr_trigger_wrapper(void *unused) +{ + itmr_trigger(); +} diff --git a/sys/arch/hppa/dev/cpu.c b/sys/arch/hppa/dev/cpu.c index f6abc9dbd24..91dd7dd263c 100644 --- a/sys/arch/hppa/dev/cpu.c +++ b/sys/arch/hppa/dev/cpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.c,v 1.43 2022/03/13 08:04:38 mpi Exp $ */ +/* $OpenBSD: cpu.c,v 1.44 2022/12/06 00:40:09 cheloha Exp $ */ /* * Copyright (c) 1998-2003 Michael Shalayeff @@ -89,7 +89,7 @@ cpuattach(struct device *parent, struct device *self, void *aux) extern u_int cpu_ticksnum, cpu_ticksdenom; extern u_int fpu_enable; /* clock.c */ - extern int cpu_hardclock(void *); + extern int itmr_intr(void *); /* ipi.c */ extern int hppa_ipi_intr(void *); @@ -173,7 +173,7 @@ cpuattach(struct device *parent, struct device *self, void *aux) printf(", %u/%u D/I BTLBs", pdc_btlb.finfo.num_i, pdc_btlb.finfo.num_d); - cpu_intr_establish(IPL_CLOCK, 31, cpu_hardclock, NULL, "clock"); + cpu_intr_establish(IPL_CLOCK, 31, itmr_intr, NULL, "clock"); #ifdef MULTIPROCESSOR cpu_intr_establish(IPL_IPI, 30, hppa_ipi_intr, NULL, "ipi"); #endif @@ -242,8 +242,6 @@ void cpu_hatch(void) { struct cpu_info *ci = curcpu(); - extern u_long cpu_hzticks; - u_long itmr; int s; /* Initialise IPIs. */ @@ -251,11 +249,8 @@ cpu_hatch(void) /* Initialise clock. */ mtctl((1U << 31), CR_EIRR); - mfctl(CR_ITMR, itmr); - ci->ci_itmr = itmr; - itmr += cpu_hzticks; - mtctl(itmr, CR_ITMR); ci->ci_mask |= (1U << 31); + cpu_startclock(); /* Enable interrupts. */ mtctl(ci->ci_mask, CR_EIEM); diff --git a/sys/arch/hppa/include/_types.h b/sys/arch/hppa/include/_types.h index 2cdb8923995..80d46e03d95 100644 --- a/sys/arch/hppa/include/_types.h +++ b/sys/arch/hppa/include/_types.h @@ -1,4 +1,4 @@ -/* $OpenBSD: _types.h,v 1.26 2018/03/05 01:15:25 deraadt Exp $ */ +/* $OpenBSD: _types.h,v 1.27 2022/12/06 00:40:09 cheloha Exp $ */ /*- * Copyright (c) 1990, 1993 @@ -35,6 +35,8 @@ #ifndef _MACHINE__TYPES_H_ #define _MACHINE__TYPES_H_ +#define __HAVE_CLOCKINTR + #if defined(_KERNEL) typedef struct label_t { long val[19]; diff --git a/sys/arch/hppa/include/cpu.h b/sys/arch/hppa/include/cpu.h index 34a0d21c801..c3fd832a235 100644 --- a/sys/arch/hppa/include/cpu.h +++ b/sys/arch/hppa/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.96 2022/10/25 15:15:38 guenther Exp $ */ +/* $OpenBSD: cpu.h,v 1.97 2022/12/06 00:40:09 cheloha Exp $ */ /* * Copyright (c) 2000-2004 Michael Shalayeff @@ -67,6 +67,7 @@ #ifndef _LOCORE #ifdef _KERNEL +#include #include #include #include @@ -101,7 +102,6 @@ struct cpu_info { volatile u_long ci_ipending; volatile int ci_in_intr; int ci_want_resched; - u_long ci_itmr; volatile u_long ci_ipi; /* IPIs pending. */ struct mutex ci_ipi_mtx; @@ -114,7 +114,7 @@ struct cpu_info { #ifdef GPROF struct gmonparam *ci_gmon; #endif - + struct clockintr_queue ci_queue; char ci_panicbuf[512]; } __attribute__((__aligned__(64))); @@ -241,6 +241,7 @@ int copy_on_fault(void); void proc_trampoline(void); int cpu_dumpsize(void); int cpu_dump(void); +void cpu_startclock(void); static inline unsigned int cpu_rnd_messybits(void) -- 2.20.1