-/* $OpenBSD: hypervisor.h,v 1.1 2008/03/08 19:15:56 kettenis Exp $ */
+/* $OpenBSD: hypervisor.h,v 1.2 2008/07/21 13:30:04 art Exp $ */
/*
* Copyright (c) 2008 Mark Kettenis
int64_t hv_cpu_mondo_send(uint64_t ncpus, paddr_t cpulist, paddr_t data);
int64_t hv_cpu_myid(uint64_t *cpuid);
+void hv_cpu_yield(void);
+
/*
* MMU services
*/
-/* $OpenBSD: cpu.c,v 1.44 2008/07/12 14:26:07 kettenis Exp $ */
+/* $OpenBSD: cpu.c,v 1.45 2008/07/21 13:30:05 art Exp $ */
/* $NetBSD: cpu.c,v 1.13 2001/05/26 21:27:15 chs Exp $ */
/*
if (ci->ci_curproc != NULL)
aston(ci->ci_curproc);
}
+
+/*
+ * Idle loop.
+ *
+ * We disable and reenable the interrupts in every cycle of the idle loop.
+ * Since hv_cpu_yield doesn't actually reenable interrupts, it just wakes
+ * up if an interrupt would have happened, but it's our responsibility to
+ * unblock interrupts.
+ */
+
+void
+cpu_idle_enter(void)
+{
+ if (CPU_ISSUN4V) {
+ sparc_wrpr(pstate, sparc_rdpr(pstate) & ~PSTATE_IE, 0);
+ }
+}
+
+void
+cpu_idle_cycle(void)
+{
+ if (CPU_ISSUN4V) {
+ hv_cpu_yield();
+ sparc_wrpr(pstate, sparc_rdpr(pstate) | PSTATE_IE, 0);
+ sparc_wrpr(pstate, sparc_rdpr(pstate) & ~PSTATE_IE, 0);
+ }
+}
+
+void
+cpu_idle_leave()
+{
+ if (CPU_ISSUN4V) {
+ sparc_wrpr(pstate, sparc_rdpr(pstate) | PSTATE_IE, 0);
+ }
+}
-/* $OpenBSD: hvcall.S,v 1.1 2008/03/08 19:15:56 kettenis Exp $ */
+/* $OpenBSD: hvcall.S,v 1.2 2008/07/21 13:30:05 art Exp $ */
/*
* Copyright (c) 2008 Mark Kettenis
ta FAST_TRAP
retl
stx %o1, [%g5]
+
+ENTRY(hv_cpu_yield)
+ mov CPU_YIELD, %o5
+ ta FAST_TRAP
+ retl
+ nop
-/* $OpenBSD: locore.s,v 1.146 2008/07/12 15:05:51 kettenis Exp $ */
+/* $OpenBSD: locore.s,v 1.147 2008/07/21 13:30:05 art Exp $ */
/* $NetBSD: locore.s,v 1.137 2001/08/13 06:10:10 jdolecek Exp $ */
/*
ret
restore
-ENTRY(cpu_idle_enter)
- retl
- nop
-
-ENTRY(cpu_idle_cycle)
- retl
- nop
-
-ENTRY(cpu_idle_leave)
- retl
- nop
-
/*
* Snapshot the current process so that stack frames are up to date.
* Only used just before a crash dump.