the secondary CPUs receive clock interrupts. Based on diffs from drahn@.
ok patrick@
-/* $OpenBSD: cpu.c,v 1.12 2018/01/28 13:17:45 kettenis Exp $ */
+/* $OpenBSD: cpu.c,v 1.13 2018/01/31 10:52:12 kettenis Exp $ */
/*
* Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
WRITE_SPECIALREG(tcr_el1, tcr);
s = splhigh();
-#ifdef notyet
arm_intr_cpu_enable();
cpu_startclock();
-#endif
nanouptime(&ci->ci_schedstate.spc_runtime);
atomic_setbits_int(&ci->ci_flags, CPUF_RUNNING);
__asm volatile("dsb sy; sev");
-#ifdef notyet
spllower(IPL_NONE);
+#ifdef notyet
SCHED_LOCK(s);
cpu_switchto(NULL, sched_chooseproc());
#else
-/* $OpenBSD: intr.c,v 1.9 2018/01/28 13:17:45 kettenis Exp $ */
+/* $OpenBSD: intr.c,v 1.10 2018/01/31 10:52:12 kettenis Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
*
ic->ic_route(ih->ih_ih, enable, ci);
}
+void
+arm_intr_cpu_enable(void)
+{
+ struct interrupt_controller *ic;
+
+ LIST_FOREACH(ic, &interrupt_controllers, ic_list)
+ if (ic->ic_cpu_enable)
+ ic->ic_cpu_enable();
+}
+
int
arm_dflt_splraise(int newcpl)
{
arm_clock_func.initclocks();
}
+void
+cpu_startclock(void)
+{
+ if (arm_clock_func.mpstartclock == NULL)
+ panic("startclock function not initialized yet");
+
+ arm_clock_func.mpstartclock();
+}
+
void
arm_dflt_delay(u_int usecs)
{
-/* $OpenBSD: agintc.c,v 1.6 2018/01/12 22:20:28 kettenis Exp $ */
+/* $OpenBSD: agintc.c,v 1.7 2018/01/31 10:52:12 kettenis Exp $ */
/*
* Copyright (c) 2007, 2009, 2011, 2017 Dale Rahn <drahn@dalerahn.com>
*
#define GICR_WAKER_CHILDRENASLEEP (1 << 2)
#define GICR_WAKER_PROCESSORSLEEP (1 << 1)
#define GICR_WAKER_X0 (1 << 0)
+#define GICR_IGROUP0 0x10080
#define GICR_ISENABLE0 0x10100
#define GICR_ICENABLE0 0x10180
#define GICR_ISPENDR0 0x10200
int sc_ncells;
int sc_num_redist;
struct interrupt_controller sc_ic;
+ int sc_ipi_num[2]; /* id for NOP and DDB ipi */
+ int sc_ipi_reason[MAX_CORES]; /* NOP or DDB caused */
+ void *sc_ipi_irq[2]; /* irqhandle for each ipi */
};
struct agintc_softc *agintc_sc;
int iq_irq; /* IRQ to mask while handling */
int iq_levels; /* IPL_*'s this IRQ has */
int iq_ist; /* share type */
+ int iq_route;
};
int agintc_match(struct device *, void *, void *);
void agintc_intr_disable(struct agintc_softc *, int);
void agintc_route(struct agintc_softc *, int, int,
struct cpu_info *);
+void agintc_route_irq(void *, int, struct cpu_info *);
void agintc_wait_rwp(struct agintc_softc *sc);
void agintc_r_wait_rwp(struct agintc_softc *sc);
uint32_t agintc_r_ictlr(void);
+int agintc_ipi_ddb(void *v);
+int agintc_ipi_nop(void *v);
+int agintc_ipi_combined(void *);
+void agintc_send_ipi(struct cpu_info *, int);
+
struct cfattach agintc_ca = {
sizeof (struct agintc_softc), agintc_match, agintc_attach
};
int psw;
int offset, nredist;
int grp1enable;
+#ifdef MULTIPROCESSOR
+ int nipi, ipiirq[2];
+#endif
psw = disable_interrupts();
arm_init_smask();
TAILQ_INIT(&sc->sc_agintc_handler[i].iq_list);
/* set priority to IPL_HIGH until configure lowers to desired IPL */
- agintc_setipl(IPL_HIGH);
+ agintc_setipl(IPL_HIGH);
/* initialize all interrupts as disabled */
agintc_calc_mask();
__asm volatile("msr "STR(ICC_BPR1)", %x0" :: "r"(0));
__asm volatile("msr "STR(ICC_IGRPEN1)", %x0" :: "r"(grp1enable));
+#ifdef MULTIPROCESSOR
+ /* setup IPI interrupts */
+
+ /*
+ * Ideally we want two IPI interrupts, one for NOP and one for
+ * DDB, however we can survive if only one is available it is
+ * possible that most are not available to the non-secure OS.
+ */
+ nipi = 0;
+ for (i = 0; i < 16; i++) {
+ int hwcpu = sc->sc_cpuremap[cpu_number()];
+ int reg, oldreg;
+
+ oldreg = bus_space_read_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
+ GICR_IPRIORITYR(i));
+ bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
+ GICR_IPRIORITYR(i), oldreg ^ 0x20);
+
+ /* if this interrupt is not usable, pri will be unmodified */
+ reg = bus_space_read_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
+ GICR_IPRIORITYR(i));
+ if (reg == oldreg)
+ continue;
+
+ /* return to original value, will be set when used */
+ bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
+ GICR_IPRIORITYR(i), oldreg);
+
+ if (nipi == 0)
+ printf(" ipi: %d", i);
+ else
+ printf(", %d", i);
+ ipiirq[nipi++] = i;
+ if (nipi == 2)
+ break;
+ }
+
+ if (nipi == 0)
+ panic("no irq available for IPI");
+
+ switch (nipi) {
+ case 1:
+ sc->sc_ipi_irq[0] = agintc_intr_establish(ipiirq[0],
+ IPL_IPI|IPL_MPSAFE, agintc_ipi_combined, sc, "ipi");
+ sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
+ sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
+ break;
+ case 2:
+ sc->sc_ipi_irq[0] = agintc_intr_establish(ipiirq[0],
+ IPL_IPI|IPL_MPSAFE, agintc_ipi_nop, sc, "ipinop");
+ sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
+ sc->sc_ipi_irq[1] = agintc_intr_establish(ipiirq[1],
+ IPL_IPI|IPL_MPSAFE, agintc_ipi_ddb, sc, "ipiddb");
+ sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
+ break;
+ default:
+ panic("nipi unexpected number %d", nipi);
+ }
+
+ intr_send_ipi_func = agintc_send_ipi;
+#endif
+
+ printf("\n");
+
sc->sc_ic.ic_node = faa->fa_node;
sc->sc_ic.ic_cookie = self;
sc->sc_ic.ic_establish = agintc_intr_establish_fdt;
sc->sc_ic.ic_disestablish = agintc_intr_disestablish;
+ sc->sc_ic.ic_route = agintc_route_irq;
+ sc->sc_ic.ic_cpu_enable = agintc_cpuinit;
arm_intr_register_fdt(&sc->sc_ic);
restore_interrupts(psw);
mpidr, affinity);
for (i = 0; i < sc->sc_num_redist; i++)
printf("rdist%d: %016llx\n", i, sc->sc_affinity[i]);
- panic("failed to indentify cpunumber %d \n", ci->ci_cpuid);
+ panic("failed to indentify cpunumber %d", ci->ci_cpuid);
}
waker = bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
GICR_IPRIORITYR(i), ~0);
}
+
+ if (sc->sc_ipi_irq[0] != NULL)
+ agintc_route_irq(sc->sc_ipi_irq[0], IRQ_ENABLE, curcpu());
+ if (sc->sc_ipi_irq[1] != NULL)
+ agintc_route_irq(sc->sc_ipi_irq[1], IRQ_ENABLE, curcpu());
+
+ __asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(0xff));
+ __asm volatile("msr "STR(ICC_BPR1)", %x0" :: "r"(0));
+ __asm volatile("msr "STR(ICC_IGRPEN1)", %x0" :: "r"(1));
+ enable_interrupts();
}
void
prival = ((NIPL - new) << 4);
__asm volatile("msr "STR(ICC_PMR)", %x0" : : "r" (prival));
+ __isb();
+
restore_interrupts(psw);
}
struct cpu_info *ci = curcpu();
int hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
int bit = 1 << IRQ_TO_REG32BIT(irq);
+ uint32_t enable;
if (irq >= 32) {
bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
} else {
bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
GICR_ISENABLE0, bit);
+ /* enable group1 as well */
+ bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
+ GICR_IGROUP0);
+ enable |= 1 << IRQ_TO_REG32BIT(irq);
+ bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
+ GICR_IGROUP0, enable);
}
}
return irq;
}
+void
+agintc_route_irq(void *v, int enable, struct cpu_info *ci)
+{
+ struct agintc_softc *sc = agintc_sc;
+ struct intrhand *ih = v;
+
+ if (enable) {
+ agintc_set_priority(sc, ih->ih_irq,
+ sc->sc_agintc_handler[ih->ih_irq].iq_irq);
+ agintc_route(sc, ih->ih_irq, IRQ_ENABLE, ci);
+ agintc_intr_enable(sc, ih->ih_irq);
+ }
+}
+
void
agintc_route(struct agintc_softc *sc, int irq, int enable, struct cpu_info *ci)
{
ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
ih->ih_func = func;
ih->ih_arg = arg;
- ih->ih_ipl = level;
- ih->ih_flags = 0;
+ ih->ih_ipl = level & IPL_IRQMASK;
+ ih->ih_flags = level & IPL_FLAGMASK;
ih->ih_irq = irqno;
ih->ih_name = name;
} while (--count && (v & GICD_CTLR_RWP));
if (count == 0)
- panic("%s: RWP timed out 0x08%x\n", __func__, v);
+ panic("%s: RWP timed out 0x08%x", __func__, v);
}
void
} while (--count && (v & GICR_CTLR_RWP));
if (count == 0)
- panic("%s: RWP timed out 0x08%x\n", __func__, v);
+ panic("%s: RWP timed out 0x08%x", __func__, v);
+}
+
+#ifdef MULTIPROCESSOR
+int
+agintc_ipi_ddb(void *v)
+{
+ /* XXX */
+ db_enter();
+ return 1;
+}
+
+int
+agintc_ipi_nop(void *v)
+{
+ /* Nothing to do here, just enough to wake up from WFI */
+ return 1;
+}
+
+int
+agintc_ipi_combined(void *v)
+{
+ struct agintc_softc *sc = v;
+
+ if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
+ sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
+ return agintc_ipi_ddb(v);
+ } else {
+ return agintc_ipi_nop(v);
+ }
}
void
-agintc_send_ipi(int sgi, int targetmask)
+agintc_send_ipi(struct cpu_info *ci, int id)
{
- int val = (sgi << 24) | (targetmask);
+ struct agintc_softc *sc = agintc_sc;
+ uint64_t sendmask;
+
+ if (ci == curcpu() && id == ARM_IPI_NOP)
+ return;
- __asm volatile("msr "STR(ICC_SGI1R)", %x0" ::"r" (val));
+ /* never overwrite IPI_DDB with IPI_NOP */
+ if (id == ARM_IPI_DDB)
+ sc->sc_ipi_reason[ci->ci_cpuid] = id;
+
+ /* will only send 1 cpu */
+ sendmask = (sc->sc_affinity[ci->ci_cpuid] & 0xff000000) << 48;
+ sendmask |= (sc->sc_affinity[ci->ci_cpuid] & 0x00ffff00) << 8;
+ sendmask |= 1 << (sc->sc_affinity[ci->ci_cpuid] & 0x0000000f);
+ sendmask |= (sc->sc_ipi_num[id] << 24);
+
+ __asm volatile ("msr " STR(ICC_SGI1R)", %x0" ::"r"(sendmask));
}
+#endif
-/* $OpenBSD: agtimer.c,v 1.8 2017/03/26 18:27:55 drahn Exp $ */
+/* $OpenBSD: agtimer.c,v 1.9 2018/01/31 10:52:12 kettenis Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
struct evcount sc_clk_count;
struct evcount sc_stat_count;
#endif
+ void *sc_ih;
};
int agtimer_match(struct device *, void *, void *);
pc->pc_ticks_err_sum = 0;
/* configure virtual timer interupt */
- arm_intr_establish_fdt_idx(sc->sc_node, 2, IPL_CLOCK,
- agtimer_intr, NULL, "tick");
+ sc->sc_ih = arm_intr_establish_fdt_idx(sc->sc_node, 2,
+ IPL_CLOCK|IPL_MPSAFE, agtimer_intr, NULL, "tick");
next = agtimer_readcnt64() + sc->sc_ticks_per_intr;
pc->pc_nexttickevent = pc->pc_nextstatevent = next;
nextevent = agtimer_readcnt64() + sc->sc_ticks_per_intr;
pc->pc_nexttickevent = pc->pc_nextstatevent = nextevent;
+ arm_intr_route(sc->sc_ih, 1, curcpu());
+
reg = agtimer_get_ctrl();
reg &= ~GTIMER_CNTV_CTL_IMASK;
reg |= GTIMER_CNTV_CTL_ENABLE;
-/* $OpenBSD: ampintc.c,v 1.11 2018/01/12 22:20:28 kettenis Exp $ */
+/* $OpenBSD: ampintc.c,v 1.12 2018/01/31 10:52:12 kettenis Exp $ */
/*
* Copyright (c) 2007,2009,2011 Dale Rahn <drahn@openbsd.org>
*
struct ampintc_softc {
struct simplebus_softc sc_sbus;
- struct intrq *sc_ampintc_handler;
+ struct intrq *sc_handler;
int sc_nintr;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_d_ioh, sc_p_ioh;
uint8_t sc_cpu_mask[ICD_ICTR_CPU_M + 1];
struct evcount sc_spur;
struct interrupt_controller sc_ic;
+ int sc_ipi_reason[ICD_ICTR_CPU_M + 1];
+ int sc_ipi_num[2];
};
struct ampintc_softc *ampintc;
int ampintc_match(struct device *, void *, void *);
void ampintc_attach(struct device *, struct device *, void *);
+void ampintc_cpuinit(void);
int ampintc_spllower(int);
void ampintc_splx(int);
int ampintc_splraise(int);
void ampintc_intr_disable(int);
void ampintc_intr_config(int, int);
void ampintc_route(int, int, struct cpu_info *);
+void ampintc_route_irq(void *, int, struct cpu_info *);
+
+int ampintc_ipi_combined(void *);
+int ampintc_ipi_nop(void *);
+int ampintc_ipi_ddb(void *);
+void ampintc_send_ipi(struct cpu_info *, int);
struct cfattach ampintc_ca = {
sizeof (struct ampintc_softc), ampintc_match, ampintc_attach
struct fdt_attach_args *faa = aux;
int i, nintr, ncpu;
uint32_t ictr;
+#ifdef MULTIPROCESSOR
+ int nipi, ipiirq[2];
+#endif
ampintc = sc;
/* XXX - check power saving bit */
- sc->sc_ampintc_handler = mallocarray(nintr,
- sizeof(*sc->sc_ampintc_handler), M_DEVBUF, M_ZERO | M_NOWAIT);
+ sc->sc_handler = mallocarray(nintr, sizeof(*sc->sc_handler), M_DEVBUF,
+ M_ZERO | M_NOWAIT);
for (i = 0; i < nintr; i++) {
- TAILQ_INIT(&sc->sc_ampintc_handler[i].iq_list);
+ TAILQ_INIT(&sc->sc_handler[i].iq_list);
}
ampintc_setipl(IPL_HIGH); /* XXX ??? */
arm_set_intr_handler(ampintc_splraise, ampintc_spllower, ampintc_splx,
ampintc_setipl, ampintc_irq_handler);
+#ifdef MULTIPROCESSOR
+ /* setup IPI interrupts */
+
+ /*
+ * Ideally we want two IPI interrupts, one for NOP and one for
+ * DDB, however we can survive if only one is available it is
+ * possible that most are not available to the non-secure OS.
+ */
+ nipi = 0;
+ for (i = 0; i < 16; i++) {
+ int reg, oldreg;
+
+ oldreg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
+ ICD_IPRn(i));
+ bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
+ oldreg ^ 0x20);
+
+ /* if this interrupt is not usable, route will be zero */
+ reg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i));
+ if (reg == oldreg)
+ continue;
+
+ /* return to original value, will be set when used */
+ bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
+ oldreg);
+
+ if (nipi == 0)
+ printf(" ipi: %d", i);
+ else
+ printf(", %d", i);
+ ipiirq[nipi++] = i;
+ if (nipi == 2)
+ break;
+ }
+
+ if (nipi == 0)
+ panic ("no irq available for IPI");
+
+ switch (nipi) {
+ case 1:
+ ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
+ IPL_IPI|IPL_MPSAFE, ampintc_ipi_combined, sc, "ipi");
+ sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
+ sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
+ break;
+ case 2:
+ ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
+ IPL_IPI|IPL_MPSAFE, ampintc_ipi_nop, sc, "ipinop");
+ sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
+ ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
+ IPL_IPI|IPL_MPSAFE, ampintc_ipi_ddb, sc, "ipiddb");
+ sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
+ break;
+ default:
+ panic("nipi unexpected number %d", nipi);
+ }
+
+ intr_send_ipi_func = ampintc_send_ipi;
+#endif
+
/* enable interrupts */
bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
sc->sc_ic.ic_cookie = self;
sc->sc_ic.ic_establish = ampintc_intr_establish_fdt;
sc->sc_ic.ic_disestablish = ampintc_intr_disestablish;
+ sc->sc_ic.ic_route = ampintc_route_irq;
+ sc->sc_ic.ic_cpu_enable = ampintc_cpuinit;
arm_intr_register_fdt(&sc->sc_ic);
/* attach GICv2M frame controller */
for (irq = 0; irq < sc->sc_nintr; irq++) {
int max = IPL_NONE;
int min = IPL_HIGH;
- TAILQ_FOREACH(ih, &sc->sc_ampintc_handler[irq].iq_list,
- ih_list) {
+ TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
if (ih->ih_ipl > max)
max = ih->ih_ipl;
min = ih->ih_ipl;
}
- if (sc->sc_ampintc_handler[irq].iq_irq == max) {
+ if (sc->sc_handler[irq].iq_irq == max) {
continue;
}
- sc->sc_ampintc_handler[irq].iq_irq = max;
+ sc->sc_handler[irq].iq_irq = max;
if (max == IPL_NONE)
min = IPL_NONE;
bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq), val);
}
+void
+ampintc_cpuinit()
+{
+ struct ampintc_softc *sc = ampintc;
+ int i;
+
+ /* XXX - this is the only cpu specific call to set this */
+ if (sc->sc_cpu_mask[cpu_number()] == 0) {
+ for (i = 0; i < 32; i++) {
+ int cpumask =
+ bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
+ ICD_IPTRn(i));
+
+ if (cpumask != 0) {
+ sc->sc_cpu_mask[cpu_number()] = cpumask;
+ break;
+ }
+ }
+ }
+
+ if (sc->sc_cpu_mask[cpu_number()] == 0)
+ panic("could not determine cpu target mask");
+}
+
+void
+ampintc_route_irq(void *v, int enable, struct cpu_info *ci)
+{
+ struct ampintc_softc *sc = ampintc;
+ struct intrhand *ih = v;
+
+ bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
+ bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(ih->ih_irq), 0);
+ if (enable) {
+ ampintc_set_priority(ih->ih_irq,
+ sc->sc_handler[ih->ih_irq].iq_irq);
+ ampintc_intr_enable(ih->ih_irq);
+ }
+
+ ampintc_route(ih->ih_irq, enable, ci);
+}
+
void
ampintc_irq_handler(void *frame)
{
if (irq >= sc->sc_nintr)
return;
- pri = sc->sc_ampintc_handler[irq].iq_irq;
+ pri = sc->sc_handler[irq].iq_irq;
s = ampintc_splraise(pri);
- TAILQ_FOREACH(ih, &sc->sc_ampintc_handler[irq].iq_list, ih_list) {
+ TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
#ifdef MULTIPROCESSOR
int need_lock;
panic("ampintc_intr_establish: bogus irqnumber %d: %s",
irqno, name);
+ if (irqno < 16) {
+ /* SGI are only EDGE */
+ type = IST_EDGE_RISING;
+ } else if (irqno < 32) {
+ /* PPI are only LEVEL */
+ type = IST_LEVEL_HIGH;
+ }
+
ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
ih->ih_func = func;
ih->ih_arg = arg;
- ih->ih_ipl = level;
- ih->ih_flags = 0;
+ ih->ih_ipl = level & IPL_IRQMASK;
+ ih->ih_flags = level & IPL_FLAGMASK;
ih->ih_irq = irqno;
ih->ih_name = name;
psw = disable_interrupts();
- TAILQ_INSERT_TAIL(&sc->sc_ampintc_handler[irqno].iq_list, ih, ih_list);
+ TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
if (name != NULL)
evcount_attach(&ih->ih_count, name, &ih->ih_irq);
psw = disable_interrupts();
- TAILQ_REMOVE(&sc->sc_ampintc_handler[ih->ih_irq].iq_list, ih, ih_list);
+ TAILQ_REMOVE(&sc->sc_handler[ih->ih_irq].iq_list, ih, ih_list);
if (ih->ih_name != NULL)
evcount_detach(&ih->ih_count);
free(ih, M_DEVBUF, sizeof(*ih));
ampintc_intr_disestablish(*(void **)cookie);
*(void **)cookie = NULL;
}
+
+#ifdef MULTIPROCESSOR
+int
+ampintc_ipi_ddb(void *v)
+{
+ /* XXX */
+ db_enter();
+ return 1;
+}
+
+int
+ampintc_ipi_nop(void *v)
+{
+ /* Nothing to do here, just enough to wake up from WFI */
+ return 1;
+}
+
+int
+ampintc_ipi_combined(void *v)
+{
+ struct ampintc_softc *sc = (struct ampintc_softc *)v;
+
+ if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
+ sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
+ return ampintc_ipi_ddb(v);
+ } else {
+ return ampintc_ipi_nop(v);
+ }
+}
+
+void
+ampintc_send_ipi(struct cpu_info *ci, int id)
+{
+ struct ampintc_softc *sc = ampintc;
+ int sendmask;
+
+ if (ci == curcpu() && id == ARM_IPI_NOP)
+ return;
+
+ /* never overwrite IPI_DDB with IPI_NOP */
+ if (id == ARM_IPI_DDB)
+ sc->sc_ipi_reason[ci->ci_cpuid] = id;
+
+ /* currently will only send to one cpu */
+ sendmask = 1 << (16 + ci->ci_cpuid);
+ sendmask |= sc->sc_ipi_num[id];
+
+ bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_SGIR, sendmask);
+}
+#endif
-/* $OpenBSD: bcm2836_intr.c,v 1.4 2018/01/12 22:20:28 kettenis Exp $ */
+/* $OpenBSD: bcm2836_intr.c,v 1.5 2018/01/31 10:52:12 kettenis Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2015 Patrick Wildt <patrick@blueri.se>
#define ARM_LOCAL_INT_MAILBOX(n) (0x50 + (n) * 4)
#define ARM_LOCAL_INT_PENDING(n) (0x60 + (n) * 4)
#define ARM_LOCAL_INT_PENDING_MASK 0x0f
+#define ARM_LOCAL_INT_MAILBOX_SET(n) (0x80 + (n) * 16)
+#define ARM_LOCAL_INT_MAILBOX_CLR(n) (0xc0 + (n) * 16)
#define BANK0_START 64
#define BANK0_END (BANK0_START + 32 - 1)
#define IRQ_BANK2(n) ((n) - BANK2_START)
#define IRQ_LOCAL(n) ((n) - LOCAL_START)
+#define ARM_LOCAL_IRQ_MAILBOX(n) (4 + (n))
+
#define INTC_NIRQ 128
#define INTC_NBANK 4
int ih_ipl; /* IPL_* */
int ih_flags;
int ih_irq; /* IRQ number */
- struct evcount ih_count;
- char *ih_name;
+ struct evcount ih_count; /* interrupt counter */
+ char *ih_name; /* device name */
};
struct intrsource {
struct device sc_dev;
struct intrsource sc_bcm_intc_handler[INTC_NIRQ];
uint32_t sc_bcm_intc_imask[INTC_NBANK][NIPL];
+ int32_t sc_localcoremask[MAXCPUS];
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh;
bus_space_handle_t sc_lioh;
void *, char *);
void bcm_intc_intr_disestablish(void *);
void bcm_intc_irq_handler(void *);
+void bcm_intc_intr_route(void *, int , struct cpu_info *);
+void bcm_intc_handle_ipi(void);
+void bcm_intc_send_ipi(struct cpu_info *, int);
struct cfattach bcmintc_ca = {
sizeof (struct bcm_intc_softc), bcm_intc_match, bcm_intc_attach
*/
node = OF_finddevice("/soc/local_intc");
if (node == -1)
- panic("%s: can't find ARM control logic\n", __func__);
+ panic("%s: can't find ARM control logic", __func__);
if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
- panic("%s: can't map ARM control logic\n", __func__);
+ panic("%s: can't map ARM control logic", __func__);
if (bus_space_map(sc->sc_iot, reg[0], reg[1], 0, &sc->sc_lioh))
panic("%s: bus_space_map failed!", __func__);
sc->sc_intc.ic_cookie = sc;
sc->sc_intc.ic_establish = bcm_intc_intr_establish_fdt;
sc->sc_intc.ic_disestablish = bcm_intc_intr_disestablish;
+ sc->sc_intc.ic_route = bcm_intc_intr_route;
arm_intr_register_fdt(&sc->sc_intc);
sc->sc_l1_intc.ic_node = node;
sc->sc_l1_intc.ic_cookie = sc;
sc->sc_l1_intc.ic_establish = l1_intc_intr_establish_fdt;
sc->sc_l1_intc.ic_disestablish = bcm_intc_intr_disestablish;
+ sc->sc_l1_intc.ic_route = bcm_intc_intr_route;
arm_intr_register_fdt(&sc->sc_l1_intc);
+ intr_send_ipi_func = bcm_intc_send_ipi;
+
bcm_intc_setipl(IPL_HIGH); /* XXX ??? */
enable_interrupts();
}
{
struct cpu_info *ci = curcpu();
struct bcm_intc_softc *sc = bcm_intc;
- int i, psw;
+ int psw;
psw = disable_interrupts();
ci->ci_cpl = new;
- bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK0,
- 0xffffffff);
- bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK1,
- 0xffffffff);
- bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK2,
- 0xffffffff);
- bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK0,
- sc->sc_bcm_intc_imask[0][new]);
- bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK1,
- sc->sc_bcm_intc_imask[1][new]);
- bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK2,
- sc->sc_bcm_intc_imask[2][new]);
- /* XXX: SMP */
- for (i = 0; i < 4; i++)
- bus_space_write_4(sc->sc_iot, sc->sc_lioh,
- ARM_LOCAL_INT_TIMER(i), sc->sc_bcm_intc_imask[3][new]);
+ if (cpu_number() == 0) {
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK0,
+ 0xffffffff);
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK1,
+ 0xffffffff);
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK2,
+ 0xffffffff);
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK0,
+ sc->sc_bcm_intc_imask[0][new]);
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK1,
+ sc->sc_bcm_intc_imask[1][new]);
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK2,
+ sc->sc_bcm_intc_imask[2][new]);
+ }
+ /* timer for current core */
+ bus_space_write_4(sc->sc_iot, sc->sc_lioh,
+ ARM_LOCAL_INT_TIMER(cpu_number()),
+ sc->sc_bcm_intc_imask[3][ci->ci_cpl] &
+ sc->sc_localcoremask[cpu_number()]);
restore_interrupts(psw);
}
} while (IS_IRQ_BANK0(irq));
}
if (IS_IRQ_LOCAL(irq)) {
- /* XXX: SMP */
pending = bus_space_read_4(sc->sc_iot, sc->sc_lioh,
- ARM_LOCAL_INT_PENDING(0));
+ ARM_LOCAL_INT_PENDING(cpu_number()));
pending &= ARM_LOCAL_INT_PENDING_MASK;
if (pending != 0) do {
if (pending & (1 << IRQ_LOCAL(irq)))
void
bcm_intc_irq_handler(void *frame)
{
- int irq = -1;
+ int irq = (cpu_number() == 0 ? 0 : LOCAL_START) - 1;
- while ((irq = bcm_intc_get_next_irq(irq)) != -1)
+ while ((irq = bcm_intc_get_next_irq(irq)) != -1) {
+#ifdef MULTIPROCESSOR
+ if (irq == ARM_LOCAL_IRQ_MAILBOX(cpu_number())) {
+ bcm_intc_handle_ipi();
+ continue;
+ }
+#endif
bcm_intc_call_handler(irq, frame);
+ }
}
void *
ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
ih->ih_fun = func;
ih->ih_arg = arg;
- ih->ih_ipl = level;
- ih->ih_flags = 0;
+ ih->ih_ipl = level & IPL_IRQMASK;
+ ih->ih_flags = level & IPL_FLAGMASK;
ih->ih_irq = irqno;
ih->ih_name = name;
+ if (IS_IRQ_LOCAL(irqno))
+ sc->sc_localcoremask[0] |= (1 << IRQ_LOCAL(irqno));
+
TAILQ_INSERT_TAIL(&sc->sc_bcm_intc_handler[irqno].is_list, ih, ih_list);
if (name != NULL)
free(ih, M_DEVBUF, 0);
restore_interrupts(psw);
}
+
+void
+bcm_intc_intr_route(void *cookie, int enable, struct cpu_info *ci)
+{
+ struct bcm_intc_softc *sc = bcm_intc;
+ struct intrhand *ih = cookie;
+ int lirq = IRQ_LOCAL(ih->ih_irq);
+
+ if (enable)
+ sc->sc_localcoremask[ci->ci_cpuid] |= (1 << lirq);
+ else
+ sc->sc_localcoremask[ci->ci_cpuid] &= ~(1 << lirq);
+
+ if (ci == curcpu()) {
+ bus_space_write_4(sc->sc_iot, sc->sc_lioh,
+ ARM_LOCAL_INT_TIMER(cpu_number()),
+ sc->sc_bcm_intc_imask[3][ci->ci_cpl] &
+ sc->sc_localcoremask[cpu_number()]);
+#ifdef MULTIPROCESSOR
+ bus_space_write_4(sc->sc_iot, sc->sc_lioh,
+ ARM_LOCAL_INT_MAILBOX(cpu_number()),
+ sc->sc_bcm_intc_imask[3][ci->ci_cpl] &
+ sc->sc_localcoremask[cpu_number()]);
+#endif
+ }
+}
+
+void
+bcm_intc_handle_ipi(void)
+{
+ struct bcm_intc_softc *sc = bcm_intc;
+ int cpuno = cpu_number();
+ uint32_t mbox_val;
+ int ipi;
+
+ mbox_val = bus_space_read_4(sc->sc_iot, sc->sc_lioh,
+ ARM_LOCAL_INT_MAILBOX_CLR(cpuno));
+ ipi = ffs(mbox_val) - 1;
+ bus_space_write_4(sc->sc_iot, sc->sc_lioh,
+ ARM_LOCAL_INT_MAILBOX_CLR(cpuno), 1 << ipi);
+ switch (ipi) {
+ case ARM_IPI_DDB:
+ /* XXX */
+ db_enter();
+ break;
+ case ARM_IPI_NOP:
+ break;
+ }
+}
+
+void
+bcm_intc_send_ipi(struct cpu_info *ci, int id)
+{
+ struct bcm_intc_softc *sc = bcm_intc;
+
+ __asm volatile("dsb sy"); /* XXX */
+
+ bus_space_write_4(sc->sc_iot, sc->sc_lioh,
+ ARM_LOCAL_INT_MAILBOX_SET(ci->ci_cpuid), 1 << id);
+}
-/* $OpenBSD: intr.h,v 1.8 2018/01/28 13:17:45 kettenis Exp $ */
+/* $OpenBSD: intr.h,v 1.9 2018/01/31 10:52:12 kettenis Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
#define IPL_MPFLOOR IPL_TTY
/* Interrupt priority 'flags'. */
-#define IPL_MPSAFE 0 /* no "mpsafe" interrupts */
+#define IPL_IRQMASK 0xf /* priority only */
+#define IPL_FLAGMASK 0xf00 /* flags only*/
+#define IPL_MPSAFE 0x100 /* 'mpsafe' interrupt, no kernel lock */
/* Interrupt sharing types. */
#define IST_NONE 0 /* none */
int (*)(void *), void *, char *);
void (*ic_disestablish)(void *);
void (*ic_route)(void *, int, struct cpu_info *);
+ void (*ic_cpu_enable)(void);
LIST_ENTRY(interrupt_controller) ic_list;
uint32_t ic_phandle;
int (*)(void *), void *, char *);
void arm_intr_disestablish_fdt(void *);
void arm_intr_route(void *, int, struct cpu_info *);
-
+void arm_intr_cpu_enable(void);
void *arm_intr_parent_establish_fdt(void *, int *, int,
int (*)(void *), void *, char *);
void arm_intr_parent_disestablish_fdt(void *);