-/* $OpenBSD: pmap.c,v 1.73 2014/11/16 12:30:52 deraadt Exp $ */
+/* $OpenBSD: pmap.c,v 1.74 2014/12/17 15:23:40 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
pmap_kernel()->pm_asni[i].pma_asngen =
pmap_asn_info[i].pma_asngen;
}
- simple_lock_init(&pmap_kernel()->pm_slock);
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
#if defined(MULTIPROCESSOR)
/* XXX Locking? */
pmap->pm_asni[i].pma_asngen = pmap_asn_info[i].pma_asngen;
}
- simple_lock_init(&pmap->pm_slock);
for (;;) {
mtx_enter(&pmap_growkernel_mtx);
printf("pmap_destroy(%p)\n", pmap);
#endif
- PMAP_LOCK(pmap);
refs = --pmap->pm_count;
- PMAP_UNLOCK(pmap);
-
if (refs > 0)
return;
printf("pmap_reference(%p)\n", pmap);
#endif
- PMAP_LOCK(pmap);
pmap->pm_count++;
- PMAP_UNLOCK(pmap);
}
/*
*/
if (pmap == pmap_kernel()) {
PMAP_MAP_TO_HEAD_LOCK();
- PMAP_LOCK(pmap);
KASSERT(dowired == TRUE);
sva += PAGE_SIZE;
}
- PMAP_UNLOCK(pmap);
PMAP_MAP_TO_HEAD_UNLOCK();
if (needisync)
#endif
PMAP_MAP_TO_HEAD_LOCK();
- PMAP_LOCK(pmap);
/*
* If we're already referencing the kernel_lev1map, there
PMAP_SYNC_ISTREAM_USER(pmap);
out:
- PMAP_UNLOCK(pmap);
PMAP_MAP_TO_HEAD_UNLOCK();
}
case PROT_READ:
PMAP_HEAD_TO_MAP_LOCK();
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
- PMAP_LOCK(pv->pv_pmap);
if (*pv->pv_pte & (PG_KWE | PG_UWE)) {
*pv->pv_pte &= ~(PG_KWE | PG_UWE);
PMAP_INVALIDATE_TLB(pv->pv_pmap, pv->pv_va,
PMAP_TLB_SHOOTDOWN(pv->pv_pmap, pv->pv_va,
pmap_pte_asm(pv->pv_pte));
}
- PMAP_UNLOCK(pv->pv_pmap);
}
PMAP_HEAD_TO_MAP_UNLOCK();
PMAP_TLB_SHOOTNOW();
nextpv = pv->pv_next;
pmap = pv->pv_pmap;
- PMAP_LOCK(pmap);
#ifdef DEBUG
if (pmap_pte_v(pmap_l2pte(pv->pv_pmap, pv->pv_va, NULL)) == 0 ||
pmap_pte_pa(pv->pv_pte) != pa)
else
PMAP_SYNC_ISTREAM_USER(pmap);
}
- PMAP_UNLOCK(pmap);
}
if (needkisync)
return;
}
- PMAP_LOCK(pmap);
-
bits = pte_prot(pmap, prot);
isactive = PMAP_ISACTIVE(pmap, cpu_id);
if (prot & PROT_EXEC)
PMAP_SYNC_ISTREAM(pmap);
-
- PMAP_UNLOCK(pmap);
}
/*
}
PMAP_MAP_TO_HEAD_LOCK();
- PMAP_LOCK(pmap);
if (pmap == pmap_kernel()) {
#ifdef DIAGNOSTIC
PMAP_SYNC_ISTREAM(pmap);
out:
- PMAP_UNLOCK(pmap);
PMAP_MAP_TO_HEAD_UNLOCK();
return error;
printf("pmap_unwire(%p, %lx)\n", pmap, va);
#endif
- PMAP_LOCK(pmap);
-
pte = pmap_l3pte(pmap, va, NULL);
#ifdef DIAGNOSTIC
if (pte == NULL || pmap_pte_v(pte) == 0)
"didn't change!\n", pmap, va);
}
#endif
-
- PMAP_UNLOCK(pmap);
}
/*
goto out_nolock;
}
- PMAP_LOCK(pmap);
-
l1pte = pmap_l1pte(pmap, va);
if (pmap_pte_v(l1pte) == 0)
goto out;
*pap = pa;
rv = TRUE;
out:
- PMAP_UNLOCK(pmap);
out_nolock:
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
va = pv->pv_va;
- PMAP_LOCK(pv->pv_pmap);
-
pte = pv->pv_pte;
npte = (*pte | set) & mask;
if (*pte != npte) {
PMAP_TLB_SHOOTDOWN(pv->pv_pmap, va,
hadasm ? PG_ASM : 0);
}
- PMAP_UNLOCK(pv->pv_pmap);
}
PMAP_TLB_SHOOTNOW();
pt_entry_t faultoff, *pte;
struct vm_page *pg;
paddr_t pa;
- boolean_t didlock = FALSE;
boolean_t exec = FALSE;
cpuid_t cpu_id = cpu_number();
panic("pmap_emulate_reference: bad p_vmspace");
#endif
pmap = p->p_vmspace->vm_map.pmap;
- PMAP_LOCK(pmap);
- didlock = TRUE;
pte = pmap_l3pte(pmap, v, NULL);
/*
* We'll unlock below where we're done with the PTE.
}
exec = pmap_pte_exec(pte);
if (!exec && type == ALPHA_MMCSR_FOE) {
- if (didlock)
- PMAP_UNLOCK(pmap);
return (1);
}
#ifdef DEBUG
#endif
pa = pmap_pte_pa(pte);
- /*
- * We're now done with the PTE. If it was a user pmap, unlock
- * it now.
- */
- if (didlock)
- PMAP_UNLOCK(pmap);
-
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("\tpa = 0x%lx\n", pa);
if (pm == pmap_kernel())
continue;
- PMAP_LOCK(pm);
KDASSERT(pm->pm_lev1map != kernel_lev1map);
pm->pm_lev1map[l1idx] = pte;
- PMAP_UNLOCK(pm);
}
mtx_leave(&pmap_all_pmaps_mtx);
}
-/* $OpenBSD: pmap.h,v 1.32 2014/01/30 18:16:41 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.33 2014/12/17 15:23:42 deraadt Exp $ */
/* $NetBSD: pmap.h,v 1.37 2000/11/19 03:16:35 thorpej Exp $ */
/*-
TAILQ_ENTRY(pmap) pm_list; /* list of all pmaps */
pt_entry_t *pm_lev1map; /* level 1 map */
int pm_count; /* pmap reference count */
- struct simplelock pm_slock; /* lock on pmap */
struct pmap_statistics pm_stats; /* pmap statistics */
unsigned long pm_cpus; /* mask of CPUs using pmap */
unsigned long pm_needisync; /* mask of CPUs needing isync */
return (&lev3map[l3pte_index(v)]);
}
-/*
- * Macros for locking pmap structures.
- *
- * Note that we if we access the kernel pmap in interrupt context, it
- * is only to update statistics. Since stats are updated using atomic
- * operations, locking the kernel pmap is not necessary. Therefore,
- * it is not necessary to block interrupts when locking pmap structures.
- */
-#define PMAP_LOCK(pmap) simple_lock(&(pmap)->pm_slock)
-#define PMAP_UNLOCK(pmap) simple_unlock(&(pmap)->pm_slock)
-
/*
* Macro for processing deferred I-stream synchronization.
*