From 8272ea32c72a9b6974df8a91bd1bc3c0e2aa883f Mon Sep 17 00:00:00 2001 From: cheloha Date: Tue, 9 Aug 2022 04:49:08 +0000 Subject: [PATCH] riscv64: trigger deferred timer interrupts from splx(9) In order to move riscv64 to a machine-independent clock interrupt subsystem, the riscv64 clock interrupt code needs to function without any specific knowledge of the clock interrupt schedule. The easiest way to achieve this (as we just did with powerpc and powerpc64) is, if the timer interrupt fires while the CPU is at or above IPL_CLOCK, defer clock interrupt work until the the timer interrupt is logically unmasked in splx(9). In particular, trigger the timer interrupt from plic_setipl() so the interrupt, if any, is pending before we handle soft interrupts. Because we're no longer deferring work until the next tick, we don't need to count pending statclock ticks in struct cpu_info. kettenis@ notes that the timer triggering code should be refactored into more generic code when we add support for a non-plic(4) riscv64 machine. Graciously fixed, compiled, and tested by jca@. Link: https://marc.info/?l=openbsd-tech&m=165931635410276&w=2 ok kettenis@ --- sys/arch/riscv64/dev/plic.c | 7 ++++- sys/arch/riscv64/include/cpu.h | 4 +-- sys/arch/riscv64/riscv64/clock.c | 53 +++++++++++++++++--------------- 3 files changed, 37 insertions(+), 27 deletions(-) diff --git a/sys/arch/riscv64/dev/plic.c b/sys/arch/riscv64/dev/plic.c index 6a863fedba3..755fd9262e7 100644 --- a/sys/arch/riscv64/dev/plic.c +++ b/sys/arch/riscv64/dev/plic.c @@ -1,4 +1,4 @@ -/* $OpenBSD: plic.c,v 1.10 2022/04/06 18:59:27 naddy Exp $ */ +/* $OpenBSD: plic.c,v 1.11 2022/08/09 04:49:08 cheloha Exp $ */ /* * Copyright (c) 2020, Mars Li @@ -27,6 +27,7 @@ #include #include #include +#include #include "riscv64/dev/riscv_cpu_intc.h" #include @@ -557,6 +558,10 @@ plic_setipl(int new) /* higher values are higher priority */ plic_set_threshold(ci->ci_cpuid, new); + /* trigger deferred timer interrupt if cpl is now low enough */ + if (ci->ci_timer_deferred && new < IPL_CLOCK) + sbi_set_timer(0); + intr_restore(sie); } diff --git a/sys/arch/riscv64/include/cpu.h b/sys/arch/riscv64/include/cpu.h index 4c32f35fdff..a455820bb7c 100644 --- a/sys/arch/riscv64/include/cpu.h +++ b/sys/arch/riscv64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.12 2022/06/10 21:34:15 jca Exp $ */ +/* $OpenBSD: cpu.h,v 1.13 2022/08/09 04:49:08 cheloha Exp $ */ /* * Copyright (c) 2019 Mike Larkin @@ -92,7 +92,7 @@ struct cpu_info { uint64_t ci_lasttb; uint64_t ci_nexttimerevent; uint64_t ci_nextstatevent; - int ci_statspending; + volatile int ci_timer_deferred; uint32_t ci_cpl; uint32_t ci_ipending; diff --git a/sys/arch/riscv64/riscv64/clock.c b/sys/arch/riscv64/riscv64/clock.c index 15b2eaf8c5d..75643f998f5 100644 --- a/sys/arch/riscv64/riscv64/clock.c +++ b/sys/arch/riscv64/riscv64/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.3 2021/07/24 22:41:09 jca Exp $ */ +/* $OpenBSD: clock.c,v 1.4 2022/08/09 04:49:08 cheloha Exp $ */ /* * Copyright (c) 2020 Mark Kettenis @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -105,6 +106,17 @@ clock_intr(void *frame) int nstats; int s; + /* + * If the clock interrupt is masked, defer all clock interrupt + * work until the clock interrupt is unmasked from splx(9). + */ + if (ci->ci_cpl >= IPL_CLOCK) { + ci->ci_timer_deferred = 1; + sbi_set_timer(UINT64_MAX); + return 0; + } + ci->ci_timer_deferred = 0; + /* * Based on the actual time delay since the last clock interrupt, * we arrange for earlier interrupt next time. @@ -132,31 +144,24 @@ clock_intr(void *frame) sbi_set_timer(nextevent); - if (ci->ci_cpl >= IPL_CLOCK) { - ci->ci_statspending += nstats; - } else { - nstats += ci->ci_statspending; - ci->ci_statspending = 0; - - s = splclock(); - intr_enable(); - - /* - * Do standard timer interrupt stuff. - */ - while (ci->ci_lasttb < prevtb) { - ci->ci_lasttb += tick_increment; - clock_count.ec_count++; - hardclock((struct clockframe *)frame); - } - - while (nstats-- > 0) - statclock((struct clockframe *)frame); - - intr_disable(); - splx(s); + s = splclock(); + intr_enable(); + + /* + * Do standard timer interrupt stuff. + */ + while (ci->ci_lasttb < prevtb) { + ci->ci_lasttb += tick_increment; + clock_count.ec_count++; + hardclock((struct clockframe *)frame); } + while (nstats-- > 0) + statclock((struct clockframe *)frame); + + intr_disable(); + splx(s); + return 0; } -- 2.20.1