-/* $OpenBSD: pmap.c,v 1.48 2018/02/17 22:33:00 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.49 2018/02/20 23:45:24 kettenis Exp $ */
/*
* Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com>
*
int pmap_cnt_avail, pmap_cnt_allocated;
uint64_t pmap_avail_kvo;
+static inline void
+pmap_lock(struct pmap *pmap)
+{
+ if (pmap != pmap_kernel())
+ mtx_enter(&pmap->pm_mtx);
+}
+
+static inline void
+pmap_unlock(struct pmap *pmap)
+{
+ if (pmap != pmap_kernel())
+ mtx_leave(&pmap->pm_mtx);
+}
/* virtual to physical helpers */
static inline int
pmap_vp_page_alloc(struct pool *pp, int flags, int *slowdown)
{
struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
- void *v;
kd.kd_waitok = ISSET(flags, PR_WAITOK);
+ kd.kd_trylock = ISSET(flags, PR_NOWAIT);
kd.kd_slowdown = slowdown;
- KERNEL_LOCK();
- v = km_alloc(pp->pr_pgsize, &kv_any, &kp_dirty, &kd);
- KERNEL_UNLOCK();
-
- return v;
+ return km_alloc(pp->pr_pgsize, &kv_any, &kp_dirty, &kd);
}
void
pmap_vp_page_free(struct pool *pp, void *v)
{
- KERNEL_LOCK();
km_free(v, pp->pr_pgsize, &kv_any, &kp_dirty);
- KERNEL_UNLOCK();
}
u_int32_t PTED_MANAGED(struct pte_desc *pted);
if (__predict_false(!pmap_initialized))
return;
+ mtx_enter(&pg->mdpage.pv_mtx);
LIST_INSERT_HEAD(&(pg->mdpage.pv_list), pted, pted_pv_list);
pted->pted_va |= PTED_VA_MANAGED_M;
+ mtx_leave(&pg->mdpage.pv_mtx);
}
void
pmap_remove_pv(struct pte_desc *pted)
{
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pted->pted_pte & PTE_RPGN);
+
+ mtx_enter(&pg->mdpage.pv_mtx);
LIST_REMOVE(pted, pted_pv_list);
+ mtx_leave(&pg->mdpage.pv_mtx);
}
int
{
struct pte_desc *pted;
struct vm_page *pg;
- int s, error;
+ int error;
int cache = PMAP_CACHE_WB;
int need_sync = 0;
cache = PMAP_CACHE_DEV;
pg = PHYS_TO_VM_PAGE(pa);
- /* MP - Acquire lock for this pmap */
-
- s = splvm();
+ pmap_lock(pm);
pted = pmap_vp_lookup(pm, va, NULL);
if (pted && PTED_VALID(pted)) {
pmap_remove_pted(pm, pted);
error = 0;
out:
- splx(s);
- /* MP - free pmap lock */
+ pmap_unlock(pm);
return error;
}
struct pte_desc *pted;
vaddr_t va;
+ pmap_lock(pm);
for (va = sva; va < eva; va += PAGE_SIZE) {
pted = pmap_vp_lookup(pm, va, NULL);
if (PTED_VALID(pted))
pmap_remove_pted(pm, pted);
}
+ pmap_unlock(pm);
}
/*
void
pmap_remove_pted(pmap_t pm, struct pte_desc *pted)
{
- int s;
-
- s = splvm();
pm->pm_stats.resident_count--;
if (pted->pted_va & PTED_VA_WIRED_M) {
pted->pted_va &= ~PTED_VA_EXEC_M;
}
- pted->pted_pte = 0;
-
if (PTED_MANAGED(pted))
pmap_remove_pv(pted);
+ pted->pted_pte = 0;
+ pted->pted_va = 0;
+
if (pm != pmap_kernel())
pool_put(&pmap_pted_pool, pted);
- splx(s);
}
{
pmap_t pm = pmap_kernel();
struct pte_desc *pted;
- int s;
-
- /* MP - lock pmap. */
- s = splvm();
pted = pmap_vp_lookup(pm, va, NULL);
pmap_pte_insert(pted);
ttlb_flush(pm, va & ~PAGE_MASK);
-
- splx(s);
}
void
pmap_t pmap;
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK | PR_ZERO);
+
+ mtx_init(&pmap->pm_mtx, IPL_VM);
+
pmap_pinit(pmap);
if (pmap_vp_poolcache == 0) {
pool_setlowat(&pmap_vp_pool, 20);
void
pmap_reference(pmap_t pm)
{
- pm->pm_refs++;
+ atomic_inc_int(&pm->pm_refs);
}
/*
{
int refs;
- refs = --pm->pm_refs;
+ refs = atomic_dec_int_nv(&pm->pm_refs);
if (refs > 0)
return;
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- int s;
struct pte_desc *pted;
+ struct pmap *pm;
- /* need to lock for this pv */
- s = splvm();
-
- if (prot == PROT_NONE) {
- while (!LIST_EMPTY(&(pg->mdpage.pv_list))) {
- pted = LIST_FIRST(&(pg->mdpage.pv_list));
- pmap_remove_pted(pted->pted_pmap, pted);
+ if (prot != PROT_NONE) {
+ mtx_enter(&pg->mdpage.pv_mtx);
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
+ pmap_page_ro(pted->pted_pmap, pted->pted_va, prot);
}
- /* page is being reclaimed, sync icache next use */
- atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
- splx(s);
- return;
+ mtx_leave(&pg->mdpage.pv_mtx);
}
- LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
- pmap_page_ro(pted->pted_pmap, pted->pted_va, prot);
+ mtx_enter(&pg->mdpage.pv_mtx);
+ while ((pted = LIST_FIRST(&(pg->mdpage.pv_list))) != NULL) {
+ pmap_reference(pted->pted_pmap);
+ pm = pted->pted_pmap;
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ pmap_lock(pm);
+
+ /*
+ * We dropped the pvlist lock before grabbing the pmap
+ * lock to avoid lock ordering problems. This means
+ * we have to check the pvlist again since somebody
+ * else might have modified it. All we care about is
+ * that the pvlist entry matches the pmap we just
+ * locked. If it doesn't, unlock the pmap and try
+ * again.
+ */
+ mtx_enter(&pg->mdpage.pv_mtx);
+ pted = LIST_FIRST(&(pg->mdpage.pv_list));
+ if (pted == NULL || pted->pted_pmap != pm) {
+ mtx_leave(&pg->mdpage.pv_mtx);
+ pmap_unlock(pm);
+ pmap_destroy(pm);
+ mtx_enter(&pg->mdpage.pv_mtx);
+ continue;
+ }
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ pmap_remove_pted(pm, pted);
+ pmap_unlock(pm);
+ pmap_destroy(pm);
+
+ mtx_enter(&pg->mdpage.pv_mtx);
}
- splx(s);
+ /* page is being reclaimed, sync icache next use */
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
+ mtx_leave(&pg->mdpage.pv_mtx);
}
void
pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
- int s;
-
if (prot & (PROT_READ | PROT_EXEC)) {
- s = splvm();
+ pmap_lock(pm);
while (sva < eva) {
pmap_page_ro(pm, sva, 0);
sva += PAGE_SIZE;
}
- splx(s);
+ pmap_unlock(pm);
return;
}
pmap_remove(pm, sva, eva);
pool_init(&pmap_pted_pool, sizeof(struct pte_desc), 0, IPL_VM, 0,
"pted", NULL);
pool_setlowat(&pmap_pted_pool, 20);
- pool_init(&pmap_vp_pool, sizeof(struct pmapvp0), PAGE_SIZE, IPL_VM,
- PR_WAITOK, "vp", &pmap_vp_allocator);
+ pool_init(&pmap_vp_pool, sizeof(struct pmapvp0), PAGE_SIZE, IPL_VM, 0,
+ "vp", &pmap_vp_allocator);
/* pool_setlowat(&pmap_vp_pool, 20); */
pmap_initialized = 1;
{
struct pte_desc *pted;
uint64_t *pl3 = NULL;
- int s;
- s = splvm();
-
- pg->pg_flags &= ~PG_PMAP_MOD;
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_MOD);
+ mtx_enter(&pg->mdpage.pv_mtx);
LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
if (pmap_vp_lookup(pted->pted_pmap, pted->pted_va & ~PAGE_MASK, &pl3) == NULL)
panic("failed to look up pte\n");
ttlb_flush(pted->pted_pmap, pted->pted_va & ~PAGE_MASK);
}
- splx(s);
+ mtx_leave(&pg->mdpage.pv_mtx);
return 0;
}
pmap_clear_reference(struct vm_page *pg)
{
struct pte_desc *pted;
- int s;
- s = splvm();
-
- pg->pg_flags &= ~PG_PMAP_REF;
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_REF);
+ mtx_enter(&pg->mdpage.pv_mtx);
LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
pted->pted_pte &= ~PROT_MASK;
pmap_pte_insert(pted);
ttlb_flush(pted->pted_pmap, pted->pted_va & ~PAGE_MASK);
}
- splx(s);
+ mtx_leave(&pg->mdpage.pv_mtx);
return 0;
}