-/* $OpenBSD: pmap7.c,v 1.33 2016/08/06 16:46:25 kettenis Exp $ */
+/* $OpenBSD: pmap7.c,v 1.34 2016/08/08 09:06:47 kettenis Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
ptep = &l2b->l2b_kva[l2pte_index(va)];
pte = *ptep;
- /* XXX redundant with PTE_SYNC_RANGE() ? */
- cpu_idcache_wbinv_range(va, PAGE_SIZE);
- cpu_sdcache_wbinv_range(va, pte & L2_S_FRAME, PAGE_SIZE);
if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
*ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
PTE_SYNC(ptep);
PTE_SYNC(pte);
cpu_tlb_flushD_SE(va);
cpu_cpwait();
+
+ cpu_dcache_wbinv_range(va, PAGE_SIZE);
+ cpu_sdcache_wbinv_range(va, pa, PAGE_SIZE);
}
/*
{
pmap_t pm;
struct pv_entry *pv;
- boolean_t wb = FALSE;
/*
* To save time, we are only walking the pv list if an I$ invalidation
if (PV_BEEN_EXECD(pv->pv_flags))
cpu_icache_sync_range(pv->pv_va, PAGE_SIZE);
-
- /*
- * If we have not written back that page yet, do this
- * now while we still have a valid mapping for it.
- */
- if (!wb) {
- cpu_dcache_wb_range(pv->pv_va, PAGE_SIZE);
- cpu_sdcache_wb_range(pv->pv_va,
- VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
- wb = TRUE;
- }
}
}
-
- /*
- * If there is no active mapping left, or we did not bother checking
- * for one, this does not mean the page doesn't have stale data. Map
- * it in a working page and writeback.
- */
- if (!wb) {
- *cwb_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
- L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
- PTE_SYNC(cwb_pte);
- cpu_tlb_flushD_SE(cwbp);
- cpu_cpwait();
- cpu_dcache_wb_range(cwbp, PAGE_SIZE);
- cpu_sdcache_wb_range(cwbp, VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
- }
}
/*
(pm == curpm || pm == pmap_kernel())) {
if (PV_BEEN_EXECD(pv->pv_flags))
cpu_icache_sync_range(pv->pv_va, PAGE_SIZE);
- if (flush == FALSE) {
- paddr_t pa;
- cpu_dcache_wb_range(pv->pv_va,
- PAGE_SIZE);
- if (pmap_extract(pm, (vaddr_t)pv->pv_va,
- &pa))
- cpu_sdcache_wb_range(pv->pv_va,
- pa, PAGE_SIZE);
- }
flush = TRUE;
}
oflags = pmap_modify_pv(pg, pm, va,
PVF_WRITE | PVF_EXEC | PVF_WIRED |
PVF_MOD | PVF_REF, nflags);
-
- /*
- * We may need to flush the cache if we're
- * doing rw-ro...
- */
- if ((oflags & PVF_NC) == 0 &&
- l2pte_is_writeable(opte, pm) &&
- (prot & PROT_WRITE) == 0) {
- cpu_dcache_wb_range(va, PAGE_SIZE);
- cpu_sdcache_wb_range(va, opte & L2_S_FRAME,
- PAGE_SIZE);
- }
} else {
/*
* New mapping, or changing the backing page
ptep = &l2b->l2b_kva[l2pte_index(va)];
opte = *ptep;
- if (l2pte_valid(opte)) {
- cpu_dcache_wb_range(va, PAGE_SIZE);
- cpu_sdcache_wb_range(va, opte & L2_S_FRAME, PAGE_SIZE);
- } else
if (opte == 0)
l2b->l2b_occupancy++;
while (va < next_bucket) {
opte = *ptep;
- if (l2pte_valid(opte)) {
- cpu_dcache_wb_range(va, PAGE_SIZE);
- cpu_sdcache_wb_range(va, opte & L2_S_FRAME,
- PAGE_SIZE);
- }
if (opte != 0) { /* !! not l2pte_valid */
*ptep = L2_TYPE_INV;
PTE_SYNC(ptep);
/*
* Copy the kernel's L1 entries to each new L1.
*/
- if (pmap_initialized) {
+ if (pmap_initialized)
memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
-/* XXX overkill? */
- cpu_dcache_wb_range((vaddr_t)l1pt, L1_TABLE_SIZE);
- }
if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt,
&l1->l1_physaddr) == FALSE)