From: cheloha Date: Mon, 22 Aug 2022 00:35:06 +0000 (+0000) Subject: mips64, octeon, loonson: trigger deferred clock interrupts from splx(9) X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=85caa4b9a5f41b995dcf54ce01413cd20e8f7dcd;p=openbsd mips64, octeon, loonson: trigger deferred clock interrupts from splx(9) As with powerpc, powerpc64, and riscv64, on mips64 platforms we need to isolate the clock interrupt schedule from the MD clock interrupt code. To do this, we need to stop deferring clock interrupt work until the next tick and instead defer the work until we logically unmask the clock interrupt from splx(9). Add a boolean (ci_clock_deferred) to the cpu_info struct to note whether we need to trigger the clock interrupt by hand, and then do so from splx(9) by calling md_triggerclock(). Currently md_triggerclock is only ever set to cp0_trigger_int5(). The routine takes great care to ensure that INT5 has fired or will fire before returning. There are some loongson machines that use glxclk instead of CP0. They can be switched to use CP0 later. With input and advice from visa@ and miod@. Compiled and extensively tested by visa@ and miod@ on various octeon and loongson machines. No issues seen on octeon machines. miod@ saw some odd things on loongsoon, but suggests that all issues are probably unrelated to this patch. Link: https://marc.info/?l=openbsd-tech&m=165929192702632&w=2 ok visa@, miod@ --- diff --git a/sys/arch/loongson/dev/bonito.c b/sys/arch/loongson/dev/bonito.c index c9d17e3879c..c75ebe342e8 100644 --- a/sys/arch/loongson/dev/bonito.c +++ b/sys/arch/loongson/dev/bonito.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bonito.c,v 1.35 2021/03/11 11:16:57 jsg Exp $ */ +/* $OpenBSD: bonito.c,v 1.36 2022/08/22 00:35:07 cheloha Exp $ */ /* $NetBSD: bonito_mainbus.c,v 1.11 2008/04/28 20:23:10 martin Exp $ */ /* $NetBSD: bonito_pci.c,v 1.5 2008/04/28 20:23:28 martin Exp $ */ @@ -485,6 +485,11 @@ bonito_splx(int newipl) /* Update masks to new ipl. Order highly important! */ ci->ci_ipl = newipl; bonito_setintrmask(newipl); + + /* Trigger deferred clock interrupt if it is now unmasked. */ + if (ci->ci_clock_deferred && newipl < IPL_CLOCK) + md_triggerclock(); + /* If we still have softints pending trigger processing. */ if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) setsoftintr0(); diff --git a/sys/arch/loongson/loongson/loongson3_intr.c b/sys/arch/loongson/loongson/loongson3_intr.c index 8d07fe4beab..bc215e9aeb4 100644 --- a/sys/arch/loongson/loongson/loongson3_intr.c +++ b/sys/arch/loongson/loongson/loongson3_intr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: loongson3_intr.c,v 1.7 2018/02/24 11:42:31 visa Exp $ */ +/* $OpenBSD: loongson3_intr.c,v 1.8 2022/08/22 00:35:07 cheloha Exp $ */ /* * Copyright (c) 2016 Visa Hankala @@ -355,6 +355,10 @@ loongson3_splx(int newipl) REGVAL(LS3_IRT_INTENSET(0)) = loongson3_intem & ~loongson3_imask[newipl]; + /* Trigger deferred clock interrupt if it is now unmasked. */ + if (ci->ci_clock_deferred && newipl < IPL_CLOCK) + md_triggerclock(); + if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) setsoftintr0(); } diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h index 451aa7861ba..170661b1b09 100644 --- a/sys/arch/mips64/include/cpu.h +++ b/sys/arch/mips64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.138 2022/01/28 16:20:09 visa Exp $ */ +/* $OpenBSD: cpu.h,v 1.139 2022/08/22 00:35:06 cheloha Exp $ */ /*- * Copyright (c) 1992, 1993 @@ -178,11 +178,10 @@ struct cpu_info { volatile int ci_ipl; /* software IPL */ uint32_t ci_softpending; /* pending soft interrupts */ int ci_clock_started; + volatile int ci_clock_deferred; /* clock interrupt postponed */ u_int32_t ci_cpu_counter_last; /* last compare value loaded */ u_int32_t ci_cpu_counter_interval; /* # of counter ticks/tick */ - u_int32_t ci_pendingticks; - struct pmap *ci_curpmap; uint ci_intrdepth; /* interrupt depth */ #ifdef MULTIPROCESSOR @@ -258,6 +257,7 @@ void smp_rendezvous_cpus(unsigned long, void (*)(void *), void *arg); #define CPU_BUSY_CYCLE() do {} while (0) extern void (*md_startclock)(struct cpu_info *); +extern void (*md_triggerclock)(void); void cp0_calibrate(struct cpu_info *); unsigned int cpu_rnd_messybits(void); @@ -447,6 +447,7 @@ register_t disableintr(void); register_t getsr(void); register_t setsr(register_t); +uint32_t cp0_get_cause(void); u_int cp0_get_count(void); register_t cp0_get_config(void); uint32_t cp0_get_config_1(void); diff --git a/sys/arch/mips64/mips64/clock.c b/sys/arch/mips64/mips64/clock.c index 796b45514c1..0eb1c16201b 100644 --- a/sys/arch/mips64/mips64/clock.c +++ b/sys/arch/mips64/mips64/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.45 2022/04/06 18:59:26 naddy Exp $ */ +/* $OpenBSD: clock.c,v 1.46 2022/08/22 00:35:06 cheloha Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -60,6 +60,7 @@ const struct cfattach clock_ca = { }; void cp0_startclock(struct cpu_info *); +void cp0_trigger_int5(void); uint32_t cp0_int5(uint32_t, struct trapframe *); int @@ -86,19 +87,20 @@ clockattach(struct device *parent, struct device *self, void *aux) cp0_set_compare(cp0_get_count() - 1); md_startclock = cp0_startclock; + md_triggerclock = cp0_trigger_int5; } /* * Interrupt handler for targets using the internal count register * as interval clock. Normally the system is run with the clock * interrupt always enabled. Masking is done here and if the clock - * can not be run the tick is just counted and handled later when - * the clock is logically unmasked again. + * cannot be run the tick is handled later when the clock is logically + * unmasked again. */ uint32_t cp0_int5(uint32_t mask, struct trapframe *tf) { - u_int32_t clkdiff; + u_int32_t clkdiff, pendingticks = 0; struct cpu_info *ci = curcpu(); /* @@ -112,6 +114,17 @@ cp0_int5(uint32_t mask, struct trapframe *tf) return CR_INT_5; } + /* + * If the clock interrupt is logically masked, defer all + * work until it is logically unmasked from splx(9). + */ + if (tf->ipl >= IPL_CLOCK) { + ci->ci_clock_deferred = 1; + cp0_set_compare(cp0_get_count() - 1); + return CR_INT_5; + } + ci->ci_clock_deferred = 0; + /* * Count how many ticks have passed since the last clock interrupt... */ @@ -119,9 +132,9 @@ cp0_int5(uint32_t mask, struct trapframe *tf) while (clkdiff >= ci->ci_cpu_counter_interval) { ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; - ci->ci_pendingticks++; + pendingticks++; } - ci->ci_pendingticks++; + pendingticks++; ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; /* @@ -132,34 +145,61 @@ cp0_int5(uint32_t mask, struct trapframe *tf) clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; if ((int)clkdiff >= 0) { ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; - ci->ci_pendingticks++; + pendingticks++; cp0_set_compare(ci->ci_cpu_counter_last); } /* - * Process clock interrupt unless it is currently masked. + * Process clock interrupt. */ - if (tf->ipl < IPL_CLOCK) { #ifdef MULTIPROCESSOR - register_t sr; + register_t sr; - sr = getsr(); - ENABLEIPI(); + sr = getsr(); + ENABLEIPI(); #endif - while (ci->ci_pendingticks) { - atomic_inc_long( - (unsigned long *)&cp0_clock_count.ec_count); - hardclock(tf); - ci->ci_pendingticks--; - } + while (pendingticks) { + atomic_inc_long((unsigned long *)&cp0_clock_count.ec_count); + hardclock(tf); + pendingticks--; + } #ifdef MULTIPROCESSOR - setsr(sr); + setsr(sr); #endif - } return CR_INT_5; /* Clock is always on 5 */ } +/* + * Trigger the clock interrupt. + * + * We need to spin until either (a) INT5 is pending or (b) the compare + * register leads the count register, i.e. we know INT5 will be pending + * very soon. + * + * To ensure we don't spin forever, double the compensatory offset + * added to the compare value every time we miss the count register. + * The initial offset of 16 cycles was chosen experimentally. It + * is the smallest power of two that doesn't require multiple loops + * to arm the timer on most Octeon hardware. + */ +void +cp0_trigger_int5(void) +{ + uint32_t compare, offset = 16; + int leading = 0; + register_t sr; + + sr = disableintr(); + while (!leading && !ISSET(cp0_get_cause(), CR_INT_5)) { + compare = cp0_get_count() + offset; + cp0_set_compare(compare); + leading = (int32_t)(compare - cp0_get_count()) > 0; + offset *= 2; + } + setsr(sr); +} + /* * Start the real-time and statistics clocks. Leave stathz 0 since there * are no other timers available. diff --git a/sys/arch/mips64/mips64/cp0access.S b/sys/arch/mips64/mips64/cp0access.S index cd2a34b6c6e..e04bd7a62b9 100644 --- a/sys/arch/mips64/mips64/cp0access.S +++ b/sys/arch/mips64/mips64/cp0access.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cp0access.S,v 1.23 2021/05/01 16:11:11 visa Exp $ */ +/* $OpenBSD: cp0access.S,v 1.24 2022/08/22 00:35:06 cheloha Exp $ */ /* * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -198,3 +198,10 @@ LEAF(cpu_rnd_messybits, 0) j ra NOP END(cpu_rnd_messybits) + +LEAF(cp0_get_cause, 0) + MFC0 v0, COP_0_CAUSE_REG + MFC0_HAZARD + j ra + NOP +END(cp0_get_cause) diff --git a/sys/arch/mips64/mips64/mips64_machdep.c b/sys/arch/mips64/mips64/mips64_machdep.c index 6c7bab77939..77e452440a9 100644 --- a/sys/arch/mips64/mips64/mips64_machdep.c +++ b/sys/arch/mips64/mips64/mips64_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mips64_machdep.c,v 1.37 2021/05/01 16:11:11 visa Exp $ */ +/* $OpenBSD: mips64_machdep.c,v 1.38 2022/08/22 00:35:06 cheloha Exp $ */ /* * Copyright (c) 2009, 2010, 2012 Miodrag Vallat. @@ -219,6 +219,7 @@ tlb_asid_wrap(struct cpu_info *ci) */ void (*md_startclock)(struct cpu_info *); +void (*md_triggerclock)(void); extern todr_chip_handle_t todr_handle; diff --git a/sys/arch/octeon/dev/octcit.c b/sys/arch/octeon/dev/octcit.c index be7a5a93f0e..111c2b353b9 100644 --- a/sys/arch/octeon/dev/octcit.c +++ b/sys/arch/octeon/dev/octcit.c @@ -1,4 +1,4 @@ -/* $OpenBSD: octcit.c,v 1.12 2019/09/01 12:16:01 visa Exp $ */ +/* $OpenBSD: octcit.c,v 1.13 2022/08/22 00:35:07 cheloha Exp $ */ /* * Copyright (c) 2017, 2019 Visa Hankala @@ -489,6 +489,10 @@ octcit_splx(int newipl) (void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0))); } + /* Trigger deferred clock interrupt if it is now unmasked. */ + if (ci->ci_clock_deferred && newipl < IPL_CLOCK) + md_triggerclock(); + /* If we still have softints pending trigger processing. */ if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) setsoftintr0(); diff --git a/sys/arch/octeon/dev/octciu.c b/sys/arch/octeon/dev/octciu.c index 2fb43a95c53..8e7a5afb72e 100644 --- a/sys/arch/octeon/dev/octciu.c +++ b/sys/arch/octeon/dev/octciu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: octciu.c,v 1.17 2019/09/01 12:16:01 visa Exp $ */ +/* $OpenBSD: octciu.c,v 1.18 2022/08/22 00:35:07 cheloha Exp $ */ /* * Copyright (c) 2000-2004 Opsycon AB (www.opsycon.se) @@ -588,6 +588,10 @@ octciu_splx(int newipl) scpu->scpu_ibank[2].en, scpu->scpu_intem[2] & ~scpu->scpu_imask[newipl][2]); + /* Trigger deferred clock interrupt if it is now unmasked. */ + if (ci->ci_clock_deferred && newipl < IPL_CLOCK) + md_triggerclock(); + /* If we still have softints pending trigger processing. */ if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) setsoftintr0();