sure clock interrupts do not attempt to acquire it.
This will also eventually allow for IPL_MPSAFE interrupts on alpha.
Tested by dlg@ and I.
-/* $OpenBSD: interrupt.c,v 1.34 2014/11/18 20:51:00 krw Exp $ */
+/* $OpenBSD: interrupt.c,v 1.35 2015/05/19 20:28:14 miod Exp $ */
/* $NetBSD: interrupt.c,v 1.46 2000/06/03 20:47:36 thorpej Exp $ */
/*-
splx(s);
}
+#ifdef unused
u_long
scb_alloc(void (*func)(void *, u_long), void *arg)
{
return (SCB_ALLOC_FAILED);
}
+#endif
void
scb_free(u_long vec)
KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE);
atomic_add_ulong(&ci->ci_intrdepth, 1);
-#if defined(MULTIPROCESSOR)
- /*
- * XXX Need to support IPL_MPSAFE eventually. Acquiring the
- * XXX kernel lock could be done deeper, as most of the
- * XXX scb handlers end up invoking
- * XXX alpha_shared_intr_dispatch().
- */
- __mp_lock(&kernel_lock);
-#endif
atomic_add_int(&uvmexp.intrs, 1);
-
scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)];
(*scb->scb_func)(scb->scb_arg, a1);
-#if defined(MULTIPROCESSOR)
- __mp_unlock(&kernel_lock);
-#endif
atomic_sub_ulong(&ci->ci_intrdepth, 1);
break;
}
-/* $OpenBSD: shared_intr.c,v 1.20 2014/12/09 06:58:28 doug Exp $ */
+/* $OpenBSD: shared_intr.c,v 1.21 2015/05/19 20:28:14 miod Exp $ */
/* $NetBSD: shared_intr.c,v 1.13 2000/03/19 01:46:18 thorpej Exp $ */
/*
handled = 0;
TAILQ_FOREACH(ih, &intr[num].intr_q, ih_q) {
+#if defined(MULTIPROCESSOR)
+ /* XXX Need to support IPL_MPSAFE eventually. */
+ if (ih->ih_level < IPL_CLOCK)
+ __mp_lock(&kernel_lock);
+#endif
/*
* The handler returns one of three values:
* 0: This interrupt wasn't for me.
rv = (*ih->ih_fn)(ih->ih_arg);
if (rv)
ih->ih_count.ec_count++;
+#if defined(MULTIPROCESSOR)
+ if (ih->ih_level < IPL_CLOCK)
+ __mp_unlock(&kernel_lock);
+#endif
handled = handled || (rv != 0);
if (intr_shared_edge == 0 && rv == 1)
break;
-/* $OpenBSD: tc_3000_300.c,v 1.17 2010/09/22 12:36:32 miod Exp $ */
+/* $OpenBSD: tc_3000_300.c,v 1.18 2015/05/19 20:28:14 miod Exp $ */
/* $NetBSD: tc_3000_300.c,v 1.26 2001/07/27 00:25:21 thorpej Exp $ */
/*
struct tcintr {
int (*tci_func)(void *);
void *tci_arg;
+ int tci_level;
struct evcount tci_count;
} tc_3000_300_intr[TC_3000_300_NCOOKIES];
for (i = 0; i < TC_3000_300_NCOOKIES; i++) {
tc_3000_300_intr[i].tci_func = tc_3000_300_intrnull;
tc_3000_300_intr[i].tci_arg = (void *)i;
+ tc_3000_300_intr[i].tci_level = IPL_HIGH;
}
}
tc_3000_300_intr[dev].tci_func = func;
tc_3000_300_intr[dev].tci_arg = arg;
+ tc_3000_300_intr[dev].tci_level = level;
if (name != NULL)
evcount_attach(&tc_3000_300_intr[dev].tci_count, name, NULL);
tc_3000_300_intr[dev].tci_func = tc_3000_300_intrnull;
tc_3000_300_intr[dev].tci_arg = (void *)dev;
+ tc_3000_300_intr[dev].tci_level = IPL_HIGH;
if (name != NULL)
evcount_detach(&tc_3000_300_intr[dev].tci_count);
}
u_int32_t tcir, ioasicir, ioasicimr;
int ifound;
-#ifdef DIAGNOSTIC
- int s;
- if (vec != 0x800)
- panic("INVALID ASSUMPTION: vec 0x%lx, not 0x800", vec);
- s = splhigh();
- if (s != ALPHA_PSL_IPL_IO)
- panic("INVALID ASSUMPTION: IPL %d, not %d", s,
- ALPHA_PSL_IPL_IO);
- splx(s);
-#endif
-
do {
tc_syncbus();
ifound = 0;
+#ifdef MULTIPROCESSOR
+#define INTRLOCK(slot) \
+ if (tc_3000_300_intr[slot].tci_level < IPL_CLOCK) \
+ __mp_lock(&kernel_lock)
+#define INTRUNLOCK(slot) \
+ if (tc_3000_300_intr[slot].tci_level < IPL_CLOCK) \
+ __mp_unlock(&kernel_lock)
+#else
+#define INTRLOCK(slot) do { } while (0)
+#define INTRUNLOCK(slot) do { } while (0)
+#endif
#define CHECKINTR(slot, flag) \
if (flag) { \
ifound = 1; \
- tc_3000_300_intr[slot].tci_count.ec_count++; \
+ INTRLOCK(slot); \
(*tc_3000_300_intr[slot].tci_func) \
(tc_3000_300_intr[slot].tci_arg); \
+ tc_3000_300_intr[slot].tci_count.ec_count++; \
+ INTRUNLOCK(slot); \
}
+
/* Do them in order of priority; highest slot # first. */
CHECKINTR(TC_3000_300_DEV_CXTURBO,
tcir & TC_3000_300_IR_CXTURBO);
ioasicir & IOASIC_INTR_300_OPT1);
CHECKINTR(TC_3000_300_DEV_OPT0,
ioasicir & IOASIC_INTR_300_OPT0);
+
+#undef INTRUNLOCK
+#undef INTRLOCK
#undef CHECKINTR
#ifdef DIAGNOSTIC
#define PRINTINTR(msg, bits) \
if (tcir & bits) \
printf(msg);
+
PRINTINTR("BCache tag parity error\n",
TC_3000_300_IR_BCTAGPARITY);
PRINTINTR("TC overrun error\n", TC_3000_300_IR_TCOVERRUN);
PRINTINTR("Bcache parity error\n",
TC_3000_300_IR_BCACHEPARITY);
PRINTINTR("Memory parity error\n", TC_3000_300_IR_MEMPARITY);
+
#undef PRINTINTR
#endif
} while (ifound);
-/* $OpenBSD: tc_3000_500.c,v 1.18 2010/09/22 12:36:32 miod Exp $ */
+/* $OpenBSD: tc_3000_500.c,v 1.19 2015/05/19 20:28:14 miod Exp $ */
/* $NetBSD: tc_3000_500.c,v 1.24 2001/07/27 00:25:21 thorpej Exp $ */
/*
struct tcintr {
int (*tci_func)(void *);
void *tci_arg;
+ int tci_level;
struct evcount tci_count;
} tc_3000_500_intr[TC_3000_500_NCOOKIES];
for (i = 0; i < TC_3000_500_NCOOKIES; i++) {
tc_3000_500_intr[i].tci_func = tc_3000_500_intrnull;
tc_3000_500_intr[i].tci_arg = (void *)i;
+ tc_3000_500_intr[i].tci_level = IPL_HIGH;
}
}
tc_3000_500_intr[dev].tci_func = func;
tc_3000_500_intr[dev].tci_arg = arg;
+ tc_3000_500_intr[dev].tci_level = level;
if (name != NULL)
evcount_attach(&tc_3000_500_intr[dev].tci_count, name, NULL);
tc_3000_500_intr[dev].tci_func = tc_3000_500_intrnull;
tc_3000_500_intr[dev].tci_arg = (void *)dev;
+ tc_3000_500_intr[dev].tci_level = IPL_HIGH;
if (name != NULL)
evcount_detach(&tc_3000_500_intr[dev].tci_count);
}
u_int32_t ir;
int ifound;
-#ifdef DIAGNOSTIC
- int s;
- if (vec != 0x800)
- panic("INVALID ASSUMPTION: vec 0x%lx, not 0x800", vec);
- s = splhigh();
- if (s != ALPHA_PSL_IPL_IO)
- panic("INVALID ASSUMPTION: IPL %d, not %d", s,
- ALPHA_PSL_IPL_IO);
- splx(s);
-#endif
-
do {
tc_syncbus();
ir = *(volatile u_int32_t *)TC_3000_500_IR_CLEAR;
ifound = 0;
+#ifdef MULTIPROCESSOR
+#define INTRLOCK(slot) \
+ if (tc_3000_500_intr[slot].tci_level < IPL_CLOCK) \
+ __mp_lock(&kernel_lock)
+#define INTRUNLOCK(slot) \
+ if (tc_3000_500_intr[slot].tci_level < IPL_CLOCK) \
+ __mp_unlock(&kernel_lock)
+#else
+#define INTRLOCK(slot) do { } while (0)
+#define INTRUNLOCK(slot) do { } while (0)
+#endif
#define CHECKINTR(slot) \
if (ir & tc_3000_500_intrbits[slot]) { \
ifound = 1; \
- tc_3000_500_intr[slot].tci_count.ec_count++; \
+ INTRLOCK(slot); \
(*tc_3000_500_intr[slot].tci_func) \
(tc_3000_500_intr[slot].tci_arg); \
+ tc_3000_500_intr[slot].tci_count.ec_count++; \
+ INTRUNLOCK(slot); \
}
+
/* Do them in order of priority; highest slot # first. */
CHECKINTR(TC_3000_500_DEV_CXTURBO);
CHECKINTR(TC_3000_500_DEV_IOASIC);
CHECKINTR(TC_3000_500_DEV_OPT2);
CHECKINTR(TC_3000_500_DEV_OPT1);
CHECKINTR(TC_3000_500_DEV_OPT0);
+
+#undef INTRUNLOCK
+#undef INTRLOCK
#undef CHECKINTR
#ifdef DIAGNOSTIC
#define PRINTINTR(msg, bits) \
if (ir & bits) \
printf(msg);
+
PRINTINTR("Second error occurred\n", TC_3000_500_IR_ERR2);
PRINTINTR("DMA buffer error\n", TC_3000_500_IR_DMABE);
PRINTINTR("DMA cross 2K boundary\n", TC_3000_500_IR_DMA2K);
PRINTINTR("DMA scatter/gather invalid\n", TC_3000_500_IR_DMASG);
PRINTINTR("Scatter/gather parity error\n",
TC_3000_500_IR_SGPAR);
+
#undef PRINTINTR
#endif
} while (ifound);