-/* $OpenBSD: pmap.c,v 1.89 2015/04/08 14:02:43 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.90 2015/04/09 16:56:52 kettenis Exp $ */
/* $NetBSD: pmap.c,v 1.107 2001/08/31 16:47:41 eeh Exp $ */
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
/*
/* Setup a pool for additional pvlist structures */
pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL);
+ pool_setipl(&pv_pool, IPL_VM);
pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
&pool_allocator_nointr);
}
pm = pool_get(&pmap_pool, PR_WAITOK | PR_ZERO);
+ mtx_init(&pm->pm_mtx, IPL_VM);
pm->pm_refs = 1;
pmap_get_page(&pm->pm_physaddr, "pmap_create", pm);
pm->pm_segs = (int64_t *)(u_long)pm->pm_physaddr;
void
pmap_reference(struct pmap *pm)
{
- pm->pm_refs++;
+ atomic_inc_int(&pm->pm_refs);
}
/*
void
pmap_destroy(struct pmap *pm)
{
- if (--pm->pm_refs == 0) {
+ if (atomic_dec_int_nv(&pm->pm_refs) == 0) {
pmap_release(pm);
pool_put(&pmap_pool, pm);
}
void
pmap_release(struct pmap *pm)
{
- int i, j, k, s;
+ int i, j, k;
paddr_t *pdir, *ptbl, tmp;
#ifdef DIAGNOSTIC
panic("pmap_release: releasing pmap_kernel()");
#endif
- s=splvm();
+ mtx_enter(&pm->pm_mtx);
for(i=0; i<STSZ; i++) {
paddr_t psegentp = (paddr_t)(u_long)&pm->pm_segs[i];
if((pdir = (paddr_t *)(u_long)ldxa((vaddr_t)psegentp,
tmp = (paddr_t)(u_long)pm->pm_segs;
pm->pm_segs = NULL;
pmap_free_page(tmp, pm);
- splx(s);
+ mtx_leave(&pm->pm_mtx);
ctx_free(pm);
}
{
pte_t tte;
paddr_t pg;
- int s, aliased = 0;
+ int aliased = 0;
pv_entry_t pv = NULL;
int size = 0; /* PMAP_SZ_TO_TTE(pa); */
boolean_t wired = (flags & PMAP_WIRED) != 0;
/*
* XXXX If a mapping at this address already exists, remove it.
*/
- s = splvm();
+ mtx_enter(&pm->pm_mtx);
tte.data = pseg_get(pm, va);
if (tte.data & TLB_V) {
+ mtx_leave(&pm->pm_mtx);
pmap_remove(pm, va, va + NBPG-1);
+ mtx_enter(&pm->pm_mtx);
tte.data = pseg_get(pm, va);
}
*/
pv = pa_to_pvh(pa);
if (pv != NULL) {
- aliased = (pv->pv_va&(PV_ALIAS|PV_NVC));
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+
+ mtx_enter(&pg->mdpage.pvmtx);
+ aliased = (pv->pv_va & (PV_ALIAS|PV_NVC));
#ifdef DIAGNOSTIC
if ((flags & PROT_MASK) & ~prot)
panic("pmap_enter: access_type exceeds prot");
if (flags & PROT_WRITE)
pv->pv_va |= PV_MOD;
pv->pv_va |= pmap_tte2flags(tte.data);
+ mtx_leave(&pg->mdpage.pvmtx);
} else {
aliased = 0;
}
- if (pa & PMAP_NVC) aliased = 1;
+ if (pa & PMAP_NVC)
+ aliased = 1;
#ifdef NO_VCACHE
aliased = 1; /* Disable D$ */
#endif
if (!pmap_get_page(&pg, NULL, pm)) {
if ((flags & PMAP_CANFAIL) == 0)
panic("pmap_enter: no memory");
- splx(s);
+ mtx_leave(&pm->pm_mtx);
return (ENOMEM);
}
}
if (pv)
pmap_enter_pv(pm, va, pa);
pm->pm_stats.resident_count++;
- splx(s);
+ mtx_leave(&pm->pm_mtx);
if (pm->pm_ctx || pm == pmap_kernel()) {
tsb_invalidate(pm->pm_ctx, va);
void
pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
{
- int s, flush = 0;
+ int flush = 0;
int64_t data;
vaddr_t flushva = va;
KDASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK);
KDASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata);
- s = splvm();
+ mtx_enter(&pm->pm_mtx);
/* Now do the real work */
while (va < endva) {
flush |= 1;
/* First remove it from the pv_table */
- entry = (data&TLB_PA_MASK);
+ entry = (data & TLB_PA_MASK);
pv = pa_to_pvh(entry);
- if (pv != NULL) {
- /* Save REF/MOD info */
- pv->pv_va |= pmap_tte2flags(data);
+ if (pv != NULL)
pmap_remove_pv(pm, va, entry);
- }
/* We need to flip the valid bit and clear the access statistics. */
if (pseg_set(pm, va, 0, 0)) {
printf("pmap_remove: gotten pseg empty!\n");
}
va += NBPG;
}
- splx(s);
+ mtx_leave(&pm->pm_mtx);
if (flush) {
cache_flush_virt(flushva, endva - flushva);
}
void
pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
- int s;
paddr_t pa;
pv_entry_t pv;
int64_t data;
return;
}
- s = splvm();
+ mtx_enter(&pm->pm_mtx);
sva = sva & ~PGOFSET;
while (sva < eva) {
/*
sva < roundup(ekdata, 4*MEG)) {
prom_printf("pmap_protect: va=%08x in locked TLB\r\n", sva);
OF_enter();
- splx(s);
+ mtx_leave(&pm->pm_mtx);
return;
}
if (((data = pseg_get(pm, sva))&TLB_V) /*&& ((data&TLB_TSB_LOCK) == 0)*/) {
- pa = data&TLB_PA_MASK;
+ pa = data & TLB_PA_MASK;
pv = pa_to_pvh(pa);
if (pv != NULL) {
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+
/* Save REF/MOD info */
+ mtx_enter(&pg->mdpage.pvmtx);
pv->pv_va |= pmap_tte2flags(data);
+ mtx_leave(&pg->mdpage.pvmtx);
}
/* Just do the pmap and TSB, not the pv_list */
if (CPU_ISSUN4V) {
}
sva += NBPG;
}
- splx(s);
+ mtx_leave(&pm->pm_mtx);
}
/*
int s;
s = splvm();
- pa = (pseg_get(pm, va)&TLB_PA_MASK)+(va&PGOFSET);
+ pa = (pseg_get(pm, va) & TLB_PA_MASK) + (va & PGOFSET);
splx(s);
}
if (pa == 0)
paddr_t pa = VM_PAGE_TO_PHYS(pg);
int changed = 0;
pv_entry_t pv;
- int s;
/* Clear all mappings */
- s = splvm();
+ mtx_enter(&pg->mdpage.pvmtx);
pv = pa_to_pvh(pa);
if (pv->pv_va & PV_MOD)
changed |= 1;
dcache_flush_page(pa);
}
}
- splx(s);
+ mtx_leave(&pg->mdpage.pvmtx);
return (changed);
}
paddr_t pa = VM_PAGE_TO_PHYS(pg);
int changed = 0;
pv_entry_t pv;
- int s;
/* Clear all references */
- s = splvm();
+ mtx_enter(&pg->mdpage.pvmtx);
pv = pa_to_pvh(pa);
if (pv->pv_va & PV_REF)
changed = 1;
}
/* Stupid here will take a cache hit even on unmapped pages 8^( */
dcache_flush_page(VM_PAGE_TO_PHYS(pg));
- splx(s);
+ mtx_leave(&pg->mdpage.pvmtx);
return (changed);
}
{
pv_entry_t pv, npv;
int mod = 0;
- int s;
/* Check if any mapping has been modified */
- s = splvm();
+ mtx_enter(&pg->mdpage.pvmtx);
pv = &pg->mdpage.pvent;
if (pv->pv_va & PV_MOD)
mod = 1;
/* Save modify info */
if (mod)
pv->pv_va |= PV_MOD;
- splx(s);
+ mtx_leave(&pg->mdpage.pvmtx);
return (mod);
}
{
pv_entry_t pv, npv;
int ref = 0;
- int s;
/* Check if any mapping has been referenced */
- s = splvm();
+ mtx_enter(&pg->mdpage.pvmtx);
pv = &pg->mdpage.pvent;
if (pv->pv_va & PV_REF)
ref = 1;
/* Save ref info */
if (ref)
pv->pv_va |= PV_REF;
- splx(s);
+ mtx_leave(&pg->mdpage.pvmtx);
return (ref);
}
pmap_unwire(struct pmap *pmap, vaddr_t va)
{
int64_t data;
- int s;
if (pmap == NULL)
return;
OF_enter();
return;
}
- s = splvm();
- data = pseg_get(pmap, va&PV_VAMASK);
+ mtx_enter(&pmap->pm_mtx);
+ data = pseg_get(pmap, va & PV_VAMASK);
if (CPU_ISSUN4V)
data &= ~SUN4V_TLB_TSB_LOCK;
else
data &= ~SUN4U_TLB_TSB_LOCK;
- if (pseg_set(pmap, va&PV_VAMASK, data, 0)) {
+ if (pseg_set(pmap, va & PV_VAMASK, data, 0)) {
printf("pmap_unwire: gotten pseg empty!\n");
Debugger();
/* panic? */
}
- splx(s);
+ mtx_leave(&pmap->pm_mtx);
}
/*
paddr_t pa = VM_PAGE_TO_PHYS(pg);
pv_entry_t pv;
int64_t data, clear, set;
- int s;
if (prot & PROT_WRITE)
return;
}
pv = pa_to_pvh(pa);
- s = splvm();
+ mtx_enter(&pg->mdpage.pvmtx);
if (pv->pv_pmap != NULL) {
for (; pv; pv = pv->pv_next) {
data = pseg_get(pv->pv_pmap, pv->pv_va & PV_VAMASK);
}
}
}
- splx(s);
+ mtx_leave(&pg->mdpage.pvmtx);
} else {
- pv_entry_t npv, firstpv;
+ pv_entry_t firstpv;
/* remove mappings */
- firstpv = pv = pa_to_pvh(pa);
- s = splvm();
+ firstpv = pa_to_pvh(pa);
+ mtx_enter(&pg->mdpage.pvmtx);
/* First remove the entire list of continuation pv's*/
- for (npv = pv->pv_next; npv; npv = pv->pv_next) {
- /* We're removing npv from pv->pv_next */
- data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
+ while ((pv = firstpv->pv_next) != NULL) {
+ data = pseg_get(pv->pv_pmap, pv->pv_va & PV_VAMASK);
/* Save REF/MOD info */
firstpv->pv_va |= pmap_tte2flags(data);
/* Clear mapping */
- if (pseg_set(npv->pv_pmap, npv->pv_va & PV_VAMASK, 0, 0)) {
+ if (pseg_set(pv->pv_pmap, pv->pv_va & PV_VAMASK, 0, 0)) {
printf("pmap_page_protect: gotten pseg empty!\n");
Debugger();
/* panic? */
}
- if (npv->pv_pmap->pm_ctx || npv->pv_pmap == pmap_kernel()) {
- tsb_invalidate(npv->pv_pmap->pm_ctx,
- (npv->pv_va & PV_VAMASK));
- tlb_flush_pte(npv->pv_va & PV_VAMASK, npv->pv_pmap->pm_ctx);
+ if (pv->pv_pmap->pm_ctx || pv->pv_pmap == pmap_kernel()) {
+ tsb_invalidate(pv->pv_pmap->pm_ctx,
+ (pv->pv_va & PV_VAMASK));
+ tlb_flush_pte(pv->pv_va & PV_VAMASK, pv->pv_pmap->pm_ctx);
}
- npv->pv_pmap->pm_stats.resident_count--;
+ pv->pv_pmap->pm_stats.resident_count--;
/* free the pv */
- pv->pv_next = npv->pv_next;
- pool_put(&pv_pool, npv);
+ firstpv->pv_next = pv->pv_next;
+ mtx_leave(&pg->mdpage.pvmtx);
+ pool_put(&pv_pool, pv);
+ mtx_enter(&pg->mdpage.pvmtx);
}
pv = firstpv;
/* Save REF/MOD info */
pv->pv_va |= pmap_tte2flags(data);
- if (pseg_set(pv->pv_pmap, pv->pv_va&PV_VAMASK, 0, 0)) {
+ if (pseg_set(pv->pv_pmap, pv->pv_va & PV_VAMASK, 0, 0)) {
printf("pmap_page_protect: gotten pseg empty!\n");
Debugger();
/* panic? */
pv->pv_pmap->pm_ctx);
}
pv->pv_pmap->pm_stats.resident_count--;
- npv = pv->pv_next;
+ KASSERT(pv->pv_next == NULL);
/* dump the first pv */
- if (npv) {
- /* First save mod/ref bits */
- pv->pv_va |= (npv->pv_va & PV_MASK);
- pv->pv_next = npv->pv_next;
- pv->pv_pmap = npv->pv_pmap;
- pool_put(&pv_pool, npv);
- } else {
- pv->pv_pmap = NULL;
- pv->pv_next = NULL;
- }
+ pv->pv_pmap = NULL;
}
dcache_flush_page(pa);
- splx(s);
+ mtx_leave(&pg->mdpage.pvmtx);
}
/* We should really only flush the pages we demapped. */
}
void
pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa)
{
- pv_entry_t pv, npv;
- int s;
+ pv_entry_t pv, npv = NULL;
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
pv = pa_to_pvh(pa);
- s = splvm();
+ mtx_enter(&pg->mdpage.pvmtx);
+
+retry:
if (pv->pv_pmap == NULL) {
/*
* No entries yet, use header as the first entry
PV_SETVA(pv, va);
pv->pv_pmap = pmap;
pv->pv_next = NULL;
- } else {
- if (!(pv->pv_va & PV_ALIAS)) {
- /*
- * There is at least one other VA mapping this page.
- * Check if they are cache index compatible. If not
- * remove all mappings, flush the cache and set page
- * to be mapped uncached. Caching will be restored
- * when pages are mapped compatible again.
- * XXX - caching is not currently being restored, but
- * XXX - I haven't seen the pages uncached since
- * XXX - using pmap_prefer(). mhitch
- */
- if ((pv->pv_va ^ va) & VA_ALIAS_MASK) {
- pv->pv_va |= PV_ALIAS;
- pmap_page_cache(pmap, pa, 0);
- }
- }
+ mtx_leave(&pg->mdpage.pvmtx);
+ if (npv)
+ pool_put(&pv_pool, npv);
+ return;
+ }
+
+ if (npv == NULL) {
+ mtx_leave(&pg->mdpage.pvmtx);
+ npv = pool_get(&pv_pool, PR_NOWAIT);
+ if (npv == NULL)
+ panic("%s: no pv entries available", __func__);
+ mtx_enter(&pg->mdpage.pvmtx);
+ goto retry;
+ }
+
+ if (!(pv->pv_va & PV_ALIAS)) {
/*
* There is at least one other VA mapping this page.
- * Place this entry after the header.
- *
- * Note: the entry may already be in the table if
- * we are only changing the protection bits.
+ * Check if they are cache index compatible. If not
+ * remove all mappings, flush the cache and set page
+ * to be mapped uncached. Caching will be restored
+ * when pages are mapped compatible again.
+ * XXX - caching is not currently being restored, but
+ * XXX - I haven't seen the pages uncached since
+ * XXX - using pmap_prefer(). mhitch
*/
- for (npv = pv; npv; npv = npv->pv_next) {
- if (pmap == npv->pv_pmap && PV_MATCH(npv, va)) {
- goto found;
- }
+ if ((pv->pv_va ^ va) & VA_ALIAS_MASK) {
+ pv->pv_va |= PV_ALIAS;
+ pmap_page_cache(pmap, pa, 0);
}
- /*
- * XXXXX can this cause us to recurse forever?
- */
- npv = pool_get(&pv_pool, PR_NOWAIT);
- if (npv == NULL)
- panic("pmap_enter: new pv malloc() failed");
- npv->pv_va = va&PV_VAMASK;
- npv->pv_pmap = pmap;
- npv->pv_next = pv->pv_next;
- pv->pv_next = npv;
-found:
- ;
}
- splx(s);
+
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ npv->pv_va = va & PV_VAMASK;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+
+ mtx_leave(&pg->mdpage.pvmtx);
}
/*
void
pmap_remove_pv(struct pmap *pmap, vaddr_t va, paddr_t pa)
{
- pv_entry_t pv, opv, npv;
+ pv_entry_t pv, opv, npv = NULL;
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
int64_t data = 0LL;
- int s;
- /*
- * Remove page from the PV table (raise IPL since we
- * may be called at interrupt time).
- */
opv = pv = pa_to_pvh(pa);
- s = splvm();
+ mtx_enter(&pg->mdpage.pvmtx);
/*
* If it is the first entry on the list, it is actually
pv->pv_va = (pv->pv_va & PV_MASK) | npv->pv_va;
pv->pv_next = npv->pv_next;
pv->pv_pmap = npv->pv_pmap;
- pool_put(&pv_pool, npv);
} else {
pv->pv_pmap = NULL;
pv->pv_next = NULL;
* Sometimes UVM gets confused and calls pmap_remove() instead
* of pmap_kremove()
*/
- splx(s);
+ mtx_leave(&pg->mdpage.pvmtx);
return;
found:
pv->pv_next = npv->pv_next;
* alias that may have occurred. However, that's a complicated
* operation involving multiple scans of the pv list.
*/
- pool_put(&pv_pool, npv);
}
/* Save REF/MOD info */
/* Check to see if the alias went away */
if (opv->pv_va & PV_ALIAS) {
opv->pv_va &= ~PV_ALIAS;
- for (npv = opv; npv; npv = npv->pv_next) {
- if ((npv->pv_va ^ opv->pv_va) & VA_ALIAS_MASK) {
+ for (pv = opv; pv; pv = pv->pv_next) {
+ if ((pv->pv_va ^ opv->pv_va) & VA_ALIAS_MASK) {
opv->pv_va |= PV_ALIAS;
}
}
if (!(opv->pv_va & PV_ALIAS))
pmap_page_cache(pmap, pa, 1);
}
- splx(s);
+
+ mtx_leave(&pg->mdpage.pvmtx);
+
+ if (npv)
+ pool_put(&pv_pool, npv);
}
/*
pmap_page_cache(struct pmap *pm, paddr_t pa, int mode)
{
pv_entry_t pv;
- int s;
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
if (CPU_ISSUN4US || CPU_ISSUN4V)
return;
pv = pa_to_pvh(pa);
if (pv == NULL)
return;
- s = splvm();
+
+ MUTEX_ASSERT_LOCKED(&pg->mdpage.pvmtx);
while (pv) {
vaddr_t va;
pv = pv->pv_next;
}
-
- splx(s);
}
int