As with powerpc, powerpc64, and riscv64, on mips64 platforms we need
to isolate the clock interrupt schedule from the MD clock interrupt
code. To do this, we need to stop deferring clock interrupt work
until the next tick and instead defer the work until we logically
unmask the clock interrupt from splx(9).
Add a boolean (ci_clock_deferred) to the cpu_info struct to note
whether we need to trigger the clock interrupt by hand, and then
do so from splx(9) by calling md_triggerclock().
Currently md_triggerclock is only ever set to cp0_trigger_int5(). The
routine takes great care to ensure that INT5 has fired or will fire
before returning.
There are some loongson machines that use glxclk instead of CP0. They
can be switched to use CP0 later.
With input and advice from visa@ and miod@.
Compiled and extensively tested by visa@ and miod@ on various octeon
and loongson machines. No issues seen on octeon machines. miod@ saw
some odd things on loongsoon, but suggests that all issues are
probably unrelated to this patch.
Link: https://marc.info/?l=openbsd-tech&m=165929192702632&w=2
ok visa@, miod@
-/* $OpenBSD: bonito.c,v 1.35 2021/03/11 11:16:57 jsg Exp $ */
+/* $OpenBSD: bonito.c,v 1.36 2022/08/22 00:35:07 cheloha Exp $ */
/* $NetBSD: bonito_mainbus.c,v 1.11 2008/04/28 20:23:10 martin Exp $ */
/* $NetBSD: bonito_pci.c,v 1.5 2008/04/28 20:23:28 martin Exp $ */
/* Update masks to new ipl. Order highly important! */
ci->ci_ipl = newipl;
bonito_setintrmask(newipl);
+
+ /* Trigger deferred clock interrupt if it is now unmasked. */
+ if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
+ md_triggerclock();
+
/* If we still have softints pending trigger processing. */
if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
setsoftintr0();
-/* $OpenBSD: loongson3_intr.c,v 1.7 2018/02/24 11:42:31 visa Exp $ */
+/* $OpenBSD: loongson3_intr.c,v 1.8 2022/08/22 00:35:07 cheloha Exp $ */
/*
* Copyright (c) 2016 Visa Hankala
REGVAL(LS3_IRT_INTENSET(0)) =
loongson3_intem & ~loongson3_imask[newipl];
+ /* Trigger deferred clock interrupt if it is now unmasked. */
+ if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
+ md_triggerclock();
+
if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
setsoftintr0();
}
-/* $OpenBSD: cpu.h,v 1.138 2022/01/28 16:20:09 visa Exp $ */
+/* $OpenBSD: cpu.h,v 1.139 2022/08/22 00:35:06 cheloha Exp $ */
/*-
* Copyright (c) 1992, 1993
volatile int ci_ipl; /* software IPL */
uint32_t ci_softpending; /* pending soft interrupts */
int ci_clock_started;
+ volatile int ci_clock_deferred; /* clock interrupt postponed */
u_int32_t ci_cpu_counter_last; /* last compare value loaded */
u_int32_t ci_cpu_counter_interval; /* # of counter ticks/tick */
- u_int32_t ci_pendingticks;
-
struct pmap *ci_curpmap;
uint ci_intrdepth; /* interrupt depth */
#ifdef MULTIPROCESSOR
#define CPU_BUSY_CYCLE() do {} while (0)
extern void (*md_startclock)(struct cpu_info *);
+extern void (*md_triggerclock)(void);
void cp0_calibrate(struct cpu_info *);
unsigned int cpu_rnd_messybits(void);
register_t getsr(void);
register_t setsr(register_t);
+uint32_t cp0_get_cause(void);
u_int cp0_get_count(void);
register_t cp0_get_config(void);
uint32_t cp0_get_config_1(void);
-/* $OpenBSD: clock.c,v 1.45 2022/04/06 18:59:26 naddy Exp $ */
+/* $OpenBSD: clock.c,v 1.46 2022/08/22 00:35:06 cheloha Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
};
void cp0_startclock(struct cpu_info *);
+void cp0_trigger_int5(void);
uint32_t cp0_int5(uint32_t, struct trapframe *);
int
cp0_set_compare(cp0_get_count() - 1);
md_startclock = cp0_startclock;
+ md_triggerclock = cp0_trigger_int5;
}
/*
* Interrupt handler for targets using the internal count register
* as interval clock. Normally the system is run with the clock
* interrupt always enabled. Masking is done here and if the clock
- * can not be run the tick is just counted and handled later when
- * the clock is logically unmasked again.
+ * cannot be run the tick is handled later when the clock is logically
+ * unmasked again.
*/
uint32_t
cp0_int5(uint32_t mask, struct trapframe *tf)
{
- u_int32_t clkdiff;
+ u_int32_t clkdiff, pendingticks = 0;
struct cpu_info *ci = curcpu();
/*
return CR_INT_5;
}
+ /*
+ * If the clock interrupt is logically masked, defer all
+ * work until it is logically unmasked from splx(9).
+ */
+ if (tf->ipl >= IPL_CLOCK) {
+ ci->ci_clock_deferred = 1;
+ cp0_set_compare(cp0_get_count() - 1);
+ return CR_INT_5;
+ }
+ ci->ci_clock_deferred = 0;
+
/*
* Count how many ticks have passed since the last clock interrupt...
*/
while (clkdiff >= ci->ci_cpu_counter_interval) {
ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
- ci->ci_pendingticks++;
+ pendingticks++;
}
- ci->ci_pendingticks++;
+ pendingticks++;
ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
/*
clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
if ((int)clkdiff >= 0) {
ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
- ci->ci_pendingticks++;
+ pendingticks++;
cp0_set_compare(ci->ci_cpu_counter_last);
}
/*
- * Process clock interrupt unless it is currently masked.
+ * Process clock interrupt.
*/
- if (tf->ipl < IPL_CLOCK) {
#ifdef MULTIPROCESSOR
- register_t sr;
+ register_t sr;
- sr = getsr();
- ENABLEIPI();
+ sr = getsr();
+ ENABLEIPI();
#endif
- while (ci->ci_pendingticks) {
- atomic_inc_long(
- (unsigned long *)&cp0_clock_count.ec_count);
- hardclock(tf);
- ci->ci_pendingticks--;
- }
+ while (pendingticks) {
+ atomic_inc_long((unsigned long *)&cp0_clock_count.ec_count);
+ hardclock(tf);
+ pendingticks--;
+ }
#ifdef MULTIPROCESSOR
- setsr(sr);
+ setsr(sr);
#endif
- }
return CR_INT_5; /* Clock is always on 5 */
}
+/*
+ * Trigger the clock interrupt.
+ *
+ * We need to spin until either (a) INT5 is pending or (b) the compare
+ * register leads the count register, i.e. we know INT5 will be pending
+ * very soon.
+ *
+ * To ensure we don't spin forever, double the compensatory offset
+ * added to the compare value every time we miss the count register.
+ * The initial offset of 16 cycles was chosen experimentally. It
+ * is the smallest power of two that doesn't require multiple loops
+ * to arm the timer on most Octeon hardware.
+ */
+void
+cp0_trigger_int5(void)
+{
+ uint32_t compare, offset = 16;
+ int leading = 0;
+ register_t sr;
+
+ sr = disableintr();
+ while (!leading && !ISSET(cp0_get_cause(), CR_INT_5)) {
+ compare = cp0_get_count() + offset;
+ cp0_set_compare(compare);
+ leading = (int32_t)(compare - cp0_get_count()) > 0;
+ offset *= 2;
+ }
+ setsr(sr);
+}
+
/*
* Start the real-time and statistics clocks. Leave stathz 0 since there
* are no other timers available.
-/* $OpenBSD: cp0access.S,v 1.23 2021/05/01 16:11:11 visa Exp $ */
+/* $OpenBSD: cp0access.S,v 1.24 2022/08/22 00:35:06 cheloha Exp $ */
/*
* Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
j ra
NOP
END(cpu_rnd_messybits)
+
+LEAF(cp0_get_cause, 0)
+ MFC0 v0, COP_0_CAUSE_REG
+ MFC0_HAZARD
+ j ra
+ NOP
+END(cp0_get_cause)
-/* $OpenBSD: mips64_machdep.c,v 1.37 2021/05/01 16:11:11 visa Exp $ */
+/* $OpenBSD: mips64_machdep.c,v 1.38 2022/08/22 00:35:06 cheloha Exp $ */
/*
* Copyright (c) 2009, 2010, 2012 Miodrag Vallat.
*/
void (*md_startclock)(struct cpu_info *);
+void (*md_triggerclock)(void);
extern todr_chip_handle_t todr_handle;
-/* $OpenBSD: octcit.c,v 1.12 2019/09/01 12:16:01 visa Exp $ */
+/* $OpenBSD: octcit.c,v 1.13 2022/08/22 00:35:07 cheloha Exp $ */
/*
* Copyright (c) 2017, 2019 Visa Hankala
(void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)));
}
+ /* Trigger deferred clock interrupt if it is now unmasked. */
+ if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
+ md_triggerclock();
+
/* If we still have softints pending trigger processing. */
if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
setsoftintr0();
-/* $OpenBSD: octciu.c,v 1.17 2019/09/01 12:16:01 visa Exp $ */
+/* $OpenBSD: octciu.c,v 1.18 2022/08/22 00:35:07 cheloha Exp $ */
/*
* Copyright (c) 2000-2004 Opsycon AB (www.opsycon.se)
scpu->scpu_ibank[2].en,
scpu->scpu_intem[2] & ~scpu->scpu_imask[newipl][2]);
+ /* Trigger deferred clock interrupt if it is now unmasked. */
+ if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
+ md_triggerclock();
+
/* If we still have softints pending trigger processing. */
if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
setsoftintr0();