From e4632938fbbd2e01a29e1e3b1462ce28a93fc447 Mon Sep 17 00:00:00 2001 From: kettenis Date: Thu, 9 Apr 2015 16:56:52 +0000 Subject: [PATCH] Make the sparc64 pmap (more) mpsafe by protecting both the pmap itself and the pv lists with a mutex. Some minor code adjustments to prevent holding locks too long. This should make pmap_enter(9), pmap_remove(9) and pmap_page_protect(9) safe to use without holding the kernel lock. --- sys/arch/sparc64/include/pmap.h | 4 + sys/arch/sparc64/sparc64/pmap.c | 255 ++++++++++++++++---------------- 2 files changed, 128 insertions(+), 131 deletions(-) diff --git a/sys/arch/sparc64/include/pmap.h b/sys/arch/sparc64/include/pmap.h index c22ae4a0891..646ce3aa154 100644 --- a/sys/arch/sparc64/include/pmap.h +++ b/sys/arch/sparc64/include/pmap.h @@ -39,6 +39,7 @@ #ifdef _KERNEL #include #endif +#include #endif /* @@ -108,6 +109,7 @@ struct page_size_map { extern struct page_size_map page_size_map[]; struct pmap { + struct mutex pm_mtx; int pm_ctx; /* Current context */ int pm_refs; /* ref count */ /* @@ -201,10 +203,12 @@ typedef struct pv_entry { /* PV flags encoded in the low bits of the VA of the first pv_entry */ struct vm_page_md { + struct mutex pvmtx; struct pv_entry pvent; }; #define VM_MDPAGE_INIT(pg) do { \ + mtx_init(&(pg)->mdpage.pvmtx, IPL_VM); \ (pg)->mdpage.pvent.pv_next = NULL; \ (pg)->mdpage.pvent.pv_pmap = NULL; \ (pg)->mdpage.pvent.pv_va = 0; \ diff --git a/sys/arch/sparc64/sparc64/pmap.c b/sys/arch/sparc64/sparc64/pmap.c index 9a15512683f..25f4a157764 100644 --- a/sys/arch/sparc64/sparc64/pmap.c +++ b/sys/arch/sparc64/sparc64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.89 2015/04/08 14:02:43 kettenis Exp $ */ +/* $OpenBSD: pmap.c,v 1.90 2015/04/09 16:56:52 kettenis Exp $ */ /* $NetBSD: pmap.c,v 1.107 2001/08/31 16:47:41 eeh Exp $ */ #undef NO_VCACHE /* Don't forget the locked TLB in dostart */ /* @@ -1380,6 +1380,7 @@ pmap_init(void) /* Setup a pool for additional pvlist structures */ pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL); + pool_setipl(&pv_pool, IPL_VM); pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", &pool_allocator_nointr); } @@ -1457,6 +1458,7 @@ pmap_create(void) pm = pool_get(&pmap_pool, PR_WAITOK | PR_ZERO); + mtx_init(&pm->pm_mtx, IPL_VM); pm->pm_refs = 1; pmap_get_page(&pm->pm_physaddr, "pmap_create", pm); pm->pm_segs = (int64_t *)(u_long)pm->pm_physaddr; @@ -1471,7 +1473,7 @@ pmap_create(void) void pmap_reference(struct pmap *pm) { - pm->pm_refs++; + atomic_inc_int(&pm->pm_refs); } /* @@ -1481,7 +1483,7 @@ pmap_reference(struct pmap *pm) void pmap_destroy(struct pmap *pm) { - if (--pm->pm_refs == 0) { + if (atomic_dec_int_nv(&pm->pm_refs) == 0) { pmap_release(pm); pool_put(&pmap_pool, pm); } @@ -1494,7 +1496,7 @@ pmap_destroy(struct pmap *pm) void pmap_release(struct pmap *pm) { - int i, j, k, s; + int i, j, k; paddr_t *pdir, *ptbl, tmp; #ifdef DIAGNOSTIC @@ -1502,7 +1504,7 @@ pmap_release(struct pmap *pm) panic("pmap_release: releasing pmap_kernel()"); #endif - s=splvm(); + mtx_enter(&pm->pm_mtx); for(i=0; ipm_segs[i]; if((pdir = (paddr_t *)(u_long)ldxa((vaddr_t)psegentp, @@ -1539,7 +1541,7 @@ pmap_release(struct pmap *pm) tmp = (paddr_t)(u_long)pm->pm_segs; pm->pm_segs = NULL; pmap_free_page(tmp, pm); - splx(s); + mtx_leave(&pm->pm_mtx); ctx_free(pm); } @@ -1781,7 +1783,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) { pte_t tte; paddr_t pg; - int s, aliased = 0; + int aliased = 0; pv_entry_t pv = NULL; int size = 0; /* PMAP_SZ_TO_TTE(pa); */ boolean_t wired = (flags & PMAP_WIRED) != 0; @@ -1795,10 +1797,12 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) /* * XXXX If a mapping at this address already exists, remove it. */ - s = splvm(); + mtx_enter(&pm->pm_mtx); tte.data = pseg_get(pm, va); if (tte.data & TLB_V) { + mtx_leave(&pm->pm_mtx); pmap_remove(pm, va, va + NBPG-1); + mtx_enter(&pm->pm_mtx); tte.data = pseg_get(pm, va); } @@ -1807,7 +1811,10 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) */ pv = pa_to_pvh(pa); if (pv != NULL) { - aliased = (pv->pv_va&(PV_ALIAS|PV_NVC)); + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); + + mtx_enter(&pg->mdpage.pvmtx); + aliased = (pv->pv_va & (PV_ALIAS|PV_NVC)); #ifdef DIAGNOSTIC if ((flags & PROT_MASK) & ~prot) panic("pmap_enter: access_type exceeds prot"); @@ -1818,10 +1825,12 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) if (flags & PROT_WRITE) pv->pv_va |= PV_MOD; pv->pv_va |= pmap_tte2flags(tte.data); + mtx_leave(&pg->mdpage.pvmtx); } else { aliased = 0; } - if (pa & PMAP_NVC) aliased = 1; + if (pa & PMAP_NVC) + aliased = 1; #ifdef NO_VCACHE aliased = 1; /* Disable D$ */ #endif @@ -1854,7 +1863,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) if (!pmap_get_page(&pg, NULL, pm)) { if ((flags & PMAP_CANFAIL) == 0) panic("pmap_enter: no memory"); - splx(s); + mtx_leave(&pm->pm_mtx); return (ENOMEM); } } @@ -1862,7 +1871,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) if (pv) pmap_enter_pv(pm, va, pa); pm->pm_stats.resident_count++; - splx(s); + mtx_leave(&pm->pm_mtx); if (pm->pm_ctx || pm == pmap_kernel()) { tsb_invalidate(pm->pm_ctx, va); @@ -1882,7 +1891,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) void pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) { - int s, flush = 0; + int flush = 0; int64_t data; vaddr_t flushva = va; @@ -1894,7 +1903,7 @@ pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) KDASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK); KDASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata); - s = splvm(); + mtx_enter(&pm->pm_mtx); /* Now do the real work */ while (va < endva) { @@ -1913,13 +1922,10 @@ pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) flush |= 1; /* First remove it from the pv_table */ - entry = (data&TLB_PA_MASK); + entry = (data & TLB_PA_MASK); pv = pa_to_pvh(entry); - if (pv != NULL) { - /* Save REF/MOD info */ - pv->pv_va |= pmap_tte2flags(data); + if (pv != NULL) pmap_remove_pv(pm, va, entry); - } /* We need to flip the valid bit and clear the access statistics. */ if (pseg_set(pm, va, 0, 0)) { printf("pmap_remove: gotten pseg empty!\n"); @@ -1935,7 +1941,7 @@ pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) } va += NBPG; } - splx(s); + mtx_leave(&pm->pm_mtx); if (flush) { cache_flush_virt(flushva, endva - flushva); } @@ -1947,7 +1953,6 @@ pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) void pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) { - int s; paddr_t pa; pv_entry_t pv; int64_t data; @@ -1964,7 +1969,7 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) return; } - s = splvm(); + mtx_enter(&pm->pm_mtx); sva = sva & ~PGOFSET; while (sva < eva) { /* @@ -1974,16 +1979,20 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) sva < roundup(ekdata, 4*MEG)) { prom_printf("pmap_protect: va=%08x in locked TLB\r\n", sva); OF_enter(); - splx(s); + mtx_leave(&pm->pm_mtx); return; } if (((data = pseg_get(pm, sva))&TLB_V) /*&& ((data&TLB_TSB_LOCK) == 0)*/) { - pa = data&TLB_PA_MASK; + pa = data & TLB_PA_MASK; pv = pa_to_pvh(pa); if (pv != NULL) { + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); + /* Save REF/MOD info */ + mtx_enter(&pg->mdpage.pvmtx); pv->pv_va |= pmap_tte2flags(data); + mtx_leave(&pg->mdpage.pvmtx); } /* Just do the pmap and TSB, not the pv_list */ if (CPU_ISSUN4V) { @@ -2011,7 +2020,7 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) } sva += NBPG; } - splx(s); + mtx_leave(&pm->pm_mtx); } /* @@ -2036,7 +2045,7 @@ pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) int s; s = splvm(); - pa = (pseg_get(pm, va)&TLB_PA_MASK)+(va&PGOFSET); + pa = (pseg_get(pm, va) & TLB_PA_MASK) + (va & PGOFSET); splx(s); } if (pa == 0) @@ -2230,10 +2239,9 @@ pmap_clear_modify(struct vm_page *pg) paddr_t pa = VM_PAGE_TO_PHYS(pg); int changed = 0; pv_entry_t pv; - int s; /* Clear all mappings */ - s = splvm(); + mtx_enter(&pg->mdpage.pvmtx); pv = pa_to_pvh(pa); if (pv->pv_va & PV_MOD) changed |= 1; @@ -2274,7 +2282,7 @@ pmap_clear_modify(struct vm_page *pg) dcache_flush_page(pa); } } - splx(s); + mtx_leave(&pg->mdpage.pvmtx); return (changed); } @@ -2285,10 +2293,9 @@ pmap_clear_reference(struct vm_page *pg) paddr_t pa = VM_PAGE_TO_PHYS(pg); int changed = 0; pv_entry_t pv; - int s; /* Clear all references */ - s = splvm(); + mtx_enter(&pg->mdpage.pvmtx); pv = pa_to_pvh(pa); if (pv->pv_va & PV_REF) changed = 1; @@ -2328,7 +2335,7 @@ pmap_clear_reference(struct vm_page *pg) } /* Stupid here will take a cache hit even on unmapped pages 8^( */ dcache_flush_page(VM_PAGE_TO_PHYS(pg)); - splx(s); + mtx_leave(&pg->mdpage.pvmtx); return (changed); } @@ -2338,10 +2345,9 @@ pmap_is_modified(struct vm_page *pg) { pv_entry_t pv, npv; int mod = 0; - int s; /* Check if any mapping has been modified */ - s = splvm(); + mtx_enter(&pg->mdpage.pvmtx); pv = &pg->mdpage.pvent; if (pv->pv_va & PV_MOD) mod = 1; @@ -2361,7 +2367,7 @@ pmap_is_modified(struct vm_page *pg) /* Save modify info */ if (mod) pv->pv_va |= PV_MOD; - splx(s); + mtx_leave(&pg->mdpage.pvmtx); return (mod); } @@ -2371,10 +2377,9 @@ pmap_is_referenced(struct vm_page *pg) { pv_entry_t pv, npv; int ref = 0; - int s; /* Check if any mapping has been referenced */ - s = splvm(); + mtx_enter(&pg->mdpage.pvmtx); pv = &pg->mdpage.pvent; if (pv->pv_va & PV_REF) ref = 1; @@ -2394,7 +2399,7 @@ pmap_is_referenced(struct vm_page *pg) /* Save ref info */ if (ref) pv->pv_va |= PV_REF; - splx(s); + mtx_leave(&pg->mdpage.pvmtx); return (ref); } @@ -2410,7 +2415,6 @@ void pmap_unwire(struct pmap *pmap, vaddr_t va) { int64_t data; - int s; if (pmap == NULL) return; @@ -2424,20 +2428,20 @@ pmap_unwire(struct pmap *pmap, vaddr_t va) OF_enter(); return; } - s = splvm(); - data = pseg_get(pmap, va&PV_VAMASK); + mtx_enter(&pmap->pm_mtx); + data = pseg_get(pmap, va & PV_VAMASK); if (CPU_ISSUN4V) data &= ~SUN4V_TLB_TSB_LOCK; else data &= ~SUN4U_TLB_TSB_LOCK; - if (pseg_set(pmap, va&PV_VAMASK, data, 0)) { + if (pseg_set(pmap, va & PV_VAMASK, data, 0)) { printf("pmap_unwire: gotten pseg empty!\n"); Debugger(); /* panic? */ } - splx(s); + mtx_leave(&pmap->pm_mtx); } /* @@ -2451,7 +2455,6 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) paddr_t pa = VM_PAGE_TO_PHYS(pg); pv_entry_t pv; int64_t data, clear, set; - int s; if (prot & PROT_WRITE) return; @@ -2477,7 +2480,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) } pv = pa_to_pvh(pa); - s = splvm(); + mtx_enter(&pg->mdpage.pvmtx); if (pv->pv_pmap != NULL) { for (; pv; pv = pv->pv_next) { data = pseg_get(pv->pv_pmap, pv->pv_va & PV_VAMASK); @@ -2500,38 +2503,39 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) } } } - splx(s); + mtx_leave(&pg->mdpage.pvmtx); } else { - pv_entry_t npv, firstpv; + pv_entry_t firstpv; /* remove mappings */ - firstpv = pv = pa_to_pvh(pa); - s = splvm(); + firstpv = pa_to_pvh(pa); + mtx_enter(&pg->mdpage.pvmtx); /* First remove the entire list of continuation pv's*/ - for (npv = pv->pv_next; npv; npv = pv->pv_next) { - /* We're removing npv from pv->pv_next */ - data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK); + while ((pv = firstpv->pv_next) != NULL) { + data = pseg_get(pv->pv_pmap, pv->pv_va & PV_VAMASK); /* Save REF/MOD info */ firstpv->pv_va |= pmap_tte2flags(data); /* Clear mapping */ - if (pseg_set(npv->pv_pmap, npv->pv_va & PV_VAMASK, 0, 0)) { + if (pseg_set(pv->pv_pmap, pv->pv_va & PV_VAMASK, 0, 0)) { printf("pmap_page_protect: gotten pseg empty!\n"); Debugger(); /* panic? */ } - if (npv->pv_pmap->pm_ctx || npv->pv_pmap == pmap_kernel()) { - tsb_invalidate(npv->pv_pmap->pm_ctx, - (npv->pv_va & PV_VAMASK)); - tlb_flush_pte(npv->pv_va & PV_VAMASK, npv->pv_pmap->pm_ctx); + if (pv->pv_pmap->pm_ctx || pv->pv_pmap == pmap_kernel()) { + tsb_invalidate(pv->pv_pmap->pm_ctx, + (pv->pv_va & PV_VAMASK)); + tlb_flush_pte(pv->pv_va & PV_VAMASK, pv->pv_pmap->pm_ctx); } - npv->pv_pmap->pm_stats.resident_count--; + pv->pv_pmap->pm_stats.resident_count--; /* free the pv */ - pv->pv_next = npv->pv_next; - pool_put(&pv_pool, npv); + firstpv->pv_next = pv->pv_next; + mtx_leave(&pg->mdpage.pvmtx); + pool_put(&pv_pool, pv); + mtx_enter(&pg->mdpage.pvmtx); } pv = firstpv; @@ -2542,7 +2546,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) /* Save REF/MOD info */ pv->pv_va |= pmap_tte2flags(data); - if (pseg_set(pv->pv_pmap, pv->pv_va&PV_VAMASK, 0, 0)) { + if (pseg_set(pv->pv_pmap, pv->pv_va & PV_VAMASK, 0, 0)) { printf("pmap_page_protect: gotten pseg empty!\n"); Debugger(); /* panic? */ @@ -2554,21 +2558,12 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) pv->pv_pmap->pm_ctx); } pv->pv_pmap->pm_stats.resident_count--; - npv = pv->pv_next; + KASSERT(pv->pv_next == NULL); /* dump the first pv */ - if (npv) { - /* First save mod/ref bits */ - pv->pv_va |= (npv->pv_va & PV_MASK); - pv->pv_next = npv->pv_next; - pv->pv_pmap = npv->pv_pmap; - pool_put(&pv_pool, npv); - } else { - pv->pv_pmap = NULL; - pv->pv_next = NULL; - } + pv->pv_pmap = NULL; } dcache_flush_page(pa); - splx(s); + mtx_leave(&pg->mdpage.pvmtx); } /* We should really only flush the pages we demapped. */ } @@ -2657,11 +2652,13 @@ ctx_free(struct pmap *pm) void pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa) { - pv_entry_t pv, npv; - int s; + pv_entry_t pv, npv = NULL; + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); pv = pa_to_pvh(pa); - s = splvm(); + mtx_enter(&pg->mdpage.pvmtx); + +retry: if (pv->pv_pmap == NULL) { /* * No entries yet, use header as the first entry @@ -2669,49 +2666,48 @@ pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa) PV_SETVA(pv, va); pv->pv_pmap = pmap; pv->pv_next = NULL; - } else { - if (!(pv->pv_va & PV_ALIAS)) { - /* - * There is at least one other VA mapping this page. - * Check if they are cache index compatible. If not - * remove all mappings, flush the cache and set page - * to be mapped uncached. Caching will be restored - * when pages are mapped compatible again. - * XXX - caching is not currently being restored, but - * XXX - I haven't seen the pages uncached since - * XXX - using pmap_prefer(). mhitch - */ - if ((pv->pv_va ^ va) & VA_ALIAS_MASK) { - pv->pv_va |= PV_ALIAS; - pmap_page_cache(pmap, pa, 0); - } - } + mtx_leave(&pg->mdpage.pvmtx); + if (npv) + pool_put(&pv_pool, npv); + return; + } + + if (npv == NULL) { + mtx_leave(&pg->mdpage.pvmtx); + npv = pool_get(&pv_pool, PR_NOWAIT); + if (npv == NULL) + panic("%s: no pv entries available", __func__); + mtx_enter(&pg->mdpage.pvmtx); + goto retry; + } + + if (!(pv->pv_va & PV_ALIAS)) { /* * There is at least one other VA mapping this page. - * Place this entry after the header. - * - * Note: the entry may already be in the table if - * we are only changing the protection bits. + * Check if they are cache index compatible. If not + * remove all mappings, flush the cache and set page + * to be mapped uncached. Caching will be restored + * when pages are mapped compatible again. + * XXX - caching is not currently being restored, but + * XXX - I haven't seen the pages uncached since + * XXX - using pmap_prefer(). mhitch */ - for (npv = pv; npv; npv = npv->pv_next) { - if (pmap == npv->pv_pmap && PV_MATCH(npv, va)) { - goto found; - } + if ((pv->pv_va ^ va) & VA_ALIAS_MASK) { + pv->pv_va |= PV_ALIAS; + pmap_page_cache(pmap, pa, 0); } - /* - * XXXXX can this cause us to recurse forever? - */ - npv = pool_get(&pv_pool, PR_NOWAIT); - if (npv == NULL) - panic("pmap_enter: new pv malloc() failed"); - npv->pv_va = va&PV_VAMASK; - npv->pv_pmap = pmap; - npv->pv_next = pv->pv_next; - pv->pv_next = npv; -found: - ; } - splx(s); + + /* + * There is at least one other VA mapping this page. + * Place this entry after the header. + */ + npv->pv_va = va & PV_VAMASK; + npv->pv_pmap = pmap; + npv->pv_next = pv->pv_next; + pv->pv_next = npv; + + mtx_leave(&pg->mdpage.pvmtx); } /* @@ -2720,16 +2716,12 @@ found: void pmap_remove_pv(struct pmap *pmap, vaddr_t va, paddr_t pa) { - pv_entry_t pv, opv, npv; + pv_entry_t pv, opv, npv = NULL; + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); int64_t data = 0LL; - int s; - /* - * Remove page from the PV table (raise IPL since we - * may be called at interrupt time). - */ opv = pv = pa_to_pvh(pa); - s = splvm(); + mtx_enter(&pg->mdpage.pvmtx); /* * If it is the first entry on the list, it is actually @@ -2746,7 +2738,6 @@ pmap_remove_pv(struct pmap *pmap, vaddr_t va, paddr_t pa) pv->pv_va = (pv->pv_va & PV_MASK) | npv->pv_va; pv->pv_next = npv->pv_next; pv->pv_pmap = npv->pv_pmap; - pool_put(&pv_pool, npv); } else { pv->pv_pmap = NULL; pv->pv_next = NULL; @@ -2762,7 +2753,7 @@ pmap_remove_pv(struct pmap *pmap, vaddr_t va, paddr_t pa) * Sometimes UVM gets confused and calls pmap_remove() instead * of pmap_kremove() */ - splx(s); + mtx_leave(&pg->mdpage.pvmtx); return; found: pv->pv_next = npv->pv_next; @@ -2777,7 +2768,6 @@ found: * alias that may have occurred. However, that's a complicated * operation involving multiple scans of the pv list. */ - pool_put(&pv_pool, npv); } /* Save REF/MOD info */ @@ -2786,15 +2776,19 @@ found: /* Check to see if the alias went away */ if (opv->pv_va & PV_ALIAS) { opv->pv_va &= ~PV_ALIAS; - for (npv = opv; npv; npv = npv->pv_next) { - if ((npv->pv_va ^ opv->pv_va) & VA_ALIAS_MASK) { + for (pv = opv; pv; pv = pv->pv_next) { + if ((pv->pv_va ^ opv->pv_va) & VA_ALIAS_MASK) { opv->pv_va |= PV_ALIAS; } } if (!(opv->pv_va & PV_ALIAS)) pmap_page_cache(pmap, pa, 1); } - splx(s); + + mtx_leave(&pg->mdpage.pvmtx); + + if (npv) + pool_put(&pv_pool, npv); } /* @@ -2806,7 +2800,7 @@ void pmap_page_cache(struct pmap *pm, paddr_t pa, int mode) { pv_entry_t pv; - int s; + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); if (CPU_ISSUN4US || CPU_ISSUN4V) return; @@ -2814,7 +2808,8 @@ pmap_page_cache(struct pmap *pm, paddr_t pa, int mode) pv = pa_to_pvh(pa); if (pv == NULL) return; - s = splvm(); + + MUTEX_ASSERT_LOCKED(&pg->mdpage.pvmtx); while (pv) { vaddr_t va; @@ -2854,8 +2849,6 @@ pmap_page_cache(struct pmap *pm, paddr_t pa, int mode) pv = pv->pv_next; } - - splx(s); } int -- 2.20.1