-/* $OpenBSD: pmap.c,v 1.76 2015/05/02 20:50:08 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.77 2015/06/23 19:50:48 miod Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
struct pmap_tlb_shootdown_q {
TAILQ_HEAD(, pmap_tlb_shootdown_job) pq_head;
TAILQ_HEAD(, pmap_tlb_shootdown_job) pq_free;
- int pq_pte; /* aggregate PTE bits */
+ int pq_pte; /* aggregate low PTE bits */
int pq_tbia; /* pending global flush */
struct mutex pq_mtx; /* queue lock */
struct pmap_tlb_shootdown_job pq_jobs[PMAP_TLB_SHOOTDOWN_MAXJOBS];
#ifdef DEBUG /* These checks are more expensive */
if (!pmap_pte_v(pte))
panic("pmap_emulate_reference: invalid pte");
+#ifndef MULTIPROCESSOR
+ /*
+ * Quoting the Alpha ARM 14.3.1.4/5/6:
+ * ``The Translation Buffer may reload and cache the old PTE value
+ * between the time the FOR (resp. FOW, FOE) fault invalidates the
+ * old value from the Translation Buffer and the time software
+ * updates the PTE in memory. Software that depends on the
+ * processor-provided invalidate must thus be prepared to take
+ * another FOR (resp. FOW, FOE) fault on a page after clearing the
+ * page's PTE<FOR(resp. FOW, FOE)> bit. The second fault will
+ * invalidate the stale PTE from the Translation Buffer, and the
+ * processor cannot load another stale copy. Thus, in the worst case,
+ * a multiprocessor system will take an initial FOR (resp. FOW, FOE)
+ * fault and then an additional FOR (resp. FOW, FOE) fault on each
+ * processor. In practice, even a single repetition is unlikely.''
+ *
+ * In practice, spurious faults on the other processors happen, at
+ * least on fast 21264 or better processors.
+ */
if (type == ALPHA_MMCSR_FOW) {
- if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE)))
- panic("pmap_emulate_reference: write but unwritable");
- if (!(*pte & PG_FOW))
- panic("pmap_emulate_reference: write but not FOW");
+ if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE))) {
+ panic("pmap_emulate_reference(%d,%d): "
+ "write but unwritable pte 0x%lx",
+ user, type, *pte);
+ }
+ if (!(*pte & PG_FOW)) {
+ panic("pmap_emulate_reference(%d,%d): "
+ "write but not FOW pte 0x%lx",
+ user, type, *pte);
+ }
} else {
- if (!(*pte & (user ? PG_URE : PG_URE | PG_KRE)))
- panic("pmap_emulate_reference: !write but unreadable");
- if (!(*pte & (PG_FOR | PG_FOE)))
- panic("pmap_emulate_reference: !write but not FOR|FOE");
+ if (!(*pte & (user ? PG_URE : PG_URE | PG_KRE))) {
+ panic("pmap_emulate_reference(%d,%d): "
+ "!write but unreadable pte 0x%lx",
+ user, type, *pte);
+ }
+ if (!(*pte & (PG_FOR | PG_FOE))) {
+ panic("pmap_emulate_reference(%d,%d): "
+ "!write but not FOR|FOE pte 0x%lx",
+ user, type, *pte);
+ }
}
+#endif /* MULTIPROCESSOR */
/* Other diagnostics? */
#endif
pa = pmap_pte_pa(pte);
pg = PHYS_TO_VM_PAGE(pa);
#ifdef DIAGNOSTIC
- if (pg == NULL)
+ if (pg == NULL) {
panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): "
- "pa 0x%lx not managed", p, v, user, type, pa);
+ "pa 0x%lx (pte %p 0x%08lx) not managed",
+ p, v, user, type, pa, pte, *pte);
+ }
#endif
/*
pj->pj_pmap->pm_cpus & cpu_mask, cpu_id);
pmap_tlb_shootdown_job_put(pq, pj);
}
-
- pq->pq_pte = 0;
}
+ pq->pq_pte = 0;
PSJQ_UNLOCK(pq, s);
}
TAILQ_REMOVE(&pq->pq_head, pj, pj_list);
pmap_tlb_shootdown_job_put(pq, pj);
}
- pq->pq_pte = 0;
}
/*