From 1e7d0b9f1dc277cda5e619c8498584b3b2141865 Mon Sep 17 00:00:00 2001 From: mpi Date: Thu, 22 Jan 2015 17:55:45 +0000 Subject: [PATCH] Let powerpc's bus_space(9) use the same pmap and uvm interfaces than the other archs. Specify the caching policy by passing PMAP_* flags to pmap_kenter_pa() like the majority of our archs do and kill pmap_kenter_cache(). Spread some pmap_update() along the way. While here remove the unused flag argument from pmap_fill_pte(). Finally convert the bus map/unmap functions to km_alloc/free() instead of uvm_km_valloc/free(). Inputs from kettenis@ and miod@, ok miod@ --- sys/arch/powerpc/include/pmap.h | 12 ++--- sys/arch/powerpc/powerpc/bus_space.c | 81 +++++++++++++--------------- sys/arch/powerpc/powerpc/pmap.c | 72 ++++++++++++------------- 3 files changed, 76 insertions(+), 89 deletions(-) diff --git a/sys/arch/powerpc/include/pmap.h b/sys/arch/powerpc/include/pmap.h index 3f1db055a1d..74d188675d0 100644 --- a/sys/arch/powerpc/include/pmap.h +++ b/sys/arch/powerpc/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.51 2015/01/20 17:04:21 mpi Exp $ */ +/* $OpenBSD: pmap.h,v 1.52 2015/01/22 17:55:45 mpi Exp $ */ /* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */ /*- @@ -71,9 +71,6 @@ typedef u_int sr_t; #define VP_IDX2_MASK (VP_IDX2_SIZE-1) #define VP_IDX2_POS 12 -/* functions used by the bus layer for device accesses */ -void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable); - /* cache flags */ #define PMAP_CACHE_DEFAULT 0 /* WB cache managed mem, devices not */ #define PMAP_CACHE_CI 1 /* cache inhibit */ @@ -158,12 +155,11 @@ int reserve_dumppages(caddr_t p); #define PG_PMAP_EXE PG_PMAP2 /* - * MD flags to pmap_enter: + * MD flags that we use for pmap_enter (in the pa): */ - -/* to get just the pa from params to pmap_enter */ -#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) +#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */ #define PMAP_NOCACHE 0x1 /* map uncached */ +#define PMAP_WT 0x2 /* map write-through */ #endif /* _KERNEL */ diff --git a/sys/arch/powerpc/powerpc/bus_space.c b/sys/arch/powerpc/powerpc/bus_space.c index 8aee3581309..e87d44c5acf 100644 --- a/sys/arch/powerpc/powerpc/bus_space.c +++ b/sys/arch/powerpc/powerpc/bus_space.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bus_space.c,v 1.1 2015/01/20 18:34:00 mpi Exp $ */ +/* $OpenBSD: bus_space.c,v 1.2 2015/01/22 17:55:46 mpi Exp $ */ /* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */ /* @@ -46,7 +46,6 @@ extern struct extent *devio_ex; int bus_mem_add_mapping(bus_addr_t, bus_size_t, int, bus_space_handle_t *); bus_addr_t bus_space_unmap_p(bus_space_tag_t, bus_space_handle_t, bus_size_t); -void bus_space_unmap(bus_space_tag_t, bus_space_handle_t, bus_size_t); /* BUS functions */ @@ -94,28 +93,27 @@ bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size) bus_size_t off, len; bus_addr_t bpa; - /* should this verify that the proper size is freed? */ sva = trunc_page(bsh); off = bsh - sva; - len = size+off; + len = round_page(size+off); if (pmap_extract(pmap_kernel(), sva, &bpa) == TRUE) { - if (extent_free(devio_ex, bpa | (bsh & PAGE_MASK), size, EX_NOWAIT | - (ppc_malloc_ok ? EX_MALLOCOK : 0))) + if (extent_free(devio_ex, bpa | (bsh & PAGE_MASK), size, + EX_NOWAIT | (ppc_malloc_ok ? EX_MALLOCOK : 0))) { printf("bus_space_map: pa 0x%lx, size 0x%lx\n", bpa, size); printf("bus_space_map: can't free region\n"); } } + + pmap_kremove(sva, len); + pmap_update(pmap_kernel()); + /* do not free memory which was stolen from the vm system */ if (ppc_malloc_ok && ((sva >= VM_MIN_KERNEL_ADDRESS) && (sva < VM_MAX_KERNEL_ADDRESS))) - uvm_km_free(kernel_map, sva, len); - else { - pmap_remove(pmap_kernel(), sva, sva + len); - pmap_update(pmap_kernel()); - } + km_free((void *)sva, len, &kv_any, &kp_none); } paddr_t @@ -143,48 +141,46 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags, { bus_addr_t vaddr; bus_addr_t spa, epa; - bus_size_t off; - int len; + bus_size_t off, len; + int pmapflags; spa = trunc_page(bpa); epa = bpa + size; off = bpa - spa; - len = size+off; + len = round_page(size+off); -#if 0 - if (epa <= spa) { +#ifdef DIAGNOSTIC + if (epa <= spa && epa != 0) panic("bus_mem_add_mapping: overflow"); - } #endif - if (ppc_malloc_ok == 0) { - bus_size_t alloc_size; + if (ppc_malloc_ok == 0) { /* need to steal vm space before kernel vm is initialized */ - alloc_size = round_page(len); - vaddr = VM_MIN_KERNEL_ADDRESS + ppc_kvm_stolen; - ppc_kvm_stolen += alloc_size; + ppc_kvm_stolen += len; if (ppc_kvm_stolen > PPC_SEGMENT_LENGTH) { panic("ppc_kvm_stolen, out of space"); } } else { - vaddr = uvm_km_valloc(kernel_map, len); + vaddr = (vaddr_t)km_alloc(len, &kv_any, &kp_none, &kd_nowait); if (vaddr == 0) return (ENOMEM); } *bshp = vaddr + off; -#ifdef DEBUG_BUS_MEM_ADD_MAPPING - printf("mapping %x size %x to %x vbase %x\n", - bpa, size, *bshp, spa); -#endif + + if (flags & BUS_SPACE_MAP_CACHEABLE) + pmapflags = PMAP_WT; + else + pmapflags = PMAP_NOCACHE; + for (; len > 0; len -= PAGE_SIZE) { - pmap_kenter_cache(vaddr, spa, PROT_READ | PROT_WRITE, - (flags & BUS_SPACE_MAP_CACHEABLE) ? - PMAP_CACHE_WT : PMAP_CACHE_CI); + pmap_kenter_pa(vaddr, spa | pmapflags, PROT_READ | PROT_WRITE); spa += PAGE_SIZE; vaddr += PAGE_SIZE; } - return 0; + pmap_update(pmap_kernel()); + + return (0); } int @@ -222,18 +218,18 @@ mapiodev(paddr_t pa, psize_t len) panic("ppc_kvm_stolen, out of space"); } } else { - va = uvm_km_valloc(kernel_map, size); + va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait); + if (va == 0) + return (NULL); } - if (va == 0) - return NULL; - for (vaddr = va; size > 0; size -= PAGE_SIZE) { - pmap_kenter_cache(vaddr, spa, - PROT_READ | PROT_WRITE, PMAP_CACHE_DEFAULT); + pmap_kenter_pa(vaddr, spa, PROT_READ | PROT_WRITE); spa += PAGE_SIZE; vaddr += PAGE_SIZE; } + pmap_update(pmap_kernel()); + return (void *) (va+off); } @@ -243,17 +239,14 @@ unmapiodev(void *kva, psize_t p_size) vaddr_t vaddr; int size; - size = p_size; + size = round_page(p_size); vaddr = trunc_page((vaddr_t)kva); - uvm_km_free(kernel_map, vaddr, size); - - for (; size > 0; size -= PAGE_SIZE) { - pmap_remove(pmap_kernel(), vaddr, vaddr + PAGE_SIZE - 1); - vaddr += PAGE_SIZE; - } + pmap_kremove(vaddr, size); pmap_update(pmap_kernel()); + + km_free((void *)vaddr, size, &kv_any, &kp_none); } diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c index 97a8010c7b4..9ed22a7b2a2 100644 --- a/sys/arch/powerpc/powerpc/pmap.c +++ b/sys/arch/powerpc/powerpc/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.138 2015/01/21 19:10:26 mpi Exp $ */ +/* $OpenBSD: pmap.c,v 1.139 2015/01/22 17:55:46 mpi Exp $ */ /* * Copyright (c) 2001, 2002, 2007 Dale Rahn. @@ -153,9 +153,9 @@ void pmap_hash_remove(struct pte_desc *); void pte_insert32(struct pte_desc *) __noprof; void pte_insert64(struct pte_desc *) __noprof; void pmap_fill_pte64(pmap_t, vaddr_t, paddr_t, struct pte_desc *, vm_prot_t, - int, int) __noprof; + int) __noprof; void pmap_fill_pte32(pmap_t, vaddr_t, paddr_t, struct pte_desc *, vm_prot_t, - int, int) __noprof; + int) __noprof; void pmap_syncicache_user_virt(pmap_t pm, vaddr_t va); @@ -535,11 +535,15 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) struct pte_desc *pted; struct vm_page *pg; boolean_t nocache = (pa & PMAP_NOCACHE) != 0; + boolean_t wt = (pa & PMAP_WT) != 0; int s; int need_sync = 0; int cache; int error; + KASSERT(!(wt && nocache)); + pa &= PMAP_PA_MASK; + /* MP - Acquire lock for this pmap */ s = splvm(); @@ -571,19 +575,19 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) } } - pa &= PMAP_PA_MASK; - - /* Calculate PTE */ pg = PHYS_TO_VM_PAGE(pa); - if (pg != NULL && !nocache) - cache = PMAP_CACHE_WB; /* managed memory is cacheable */ + if (wt) + cache = PMAP_CACHE_WT; + else if (pg != NULL && !(pg->pg_flags & PG_DEV) && !nocache) + cache = PMAP_CACHE_WB; else cache = PMAP_CACHE_CI; + /* Calculate PTE */ if (ppc_proc_is_64b) - pmap_fill_pte64(pm, va, pa, pted, prot, flags, cache); + pmap_fill_pte64(pm, va, pa, pted, prot, cache); else - pmap_fill_pte32(pm, va, pa, pted, prot, flags, cache); + pmap_fill_pte32(pm, va, pa, pted, prot, cache); if (pg != NULL) { pmap_enter_pv(pted, pg); /* only managed mem */ @@ -760,12 +764,17 @@ pmap_remove_pg(pmap_t pm, vaddr_t va) * */ void -_pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache) +pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) { struct pte_desc *pted; struct vm_page *pg; - int s; + boolean_t nocache = (pa & PMAP_NOCACHE) != 0; + boolean_t wt = (pa & PMAP_WT) != 0; pmap_t pm; + int cache, s; + + KASSERT(!(wt && nocache)); + pa &= PMAP_PA_MASK; pm = pmap_kernel(); @@ -777,6 +786,7 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache) pmap_remove_pg(pm, va); /* pted is reused */ pm->pm_stats.resident_count++; + if (prot & PROT_WRITE) { pg = PHYS_TO_VM_PAGE(pa); if (pg != NULL) @@ -789,19 +799,19 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache) va, pa); } - if (cache == PMAP_CACHE_DEFAULT) { - pg = PHYS_TO_VM_PAGE(pa); - if (pg != NULL && (pg->pg_flags & PG_DEV) == 0) - cache = PMAP_CACHE_WB; - else - cache = PMAP_CACHE_CI; - } + pg = PHYS_TO_VM_PAGE(pa); + if (wt) + cache = PMAP_CACHE_WT; + else if (pg != NULL && !(pg->pg_flags & PG_DEV) && !nocache) + cache = PMAP_CACHE_WB; + else + cache = PMAP_CACHE_CI; /* Calculate PTE */ if (ppc_proc_is_64b) - pmap_fill_pte64(pm, va, pa, pted, prot, flags, cache); + pmap_fill_pte64(pm, va, pa, pted, prot, cache); else - pmap_fill_pte32(pm, va, pa, pted, prot, flags, cache); + pmap_fill_pte32(pm, va, pa, pted, prot, cache); /* * Insert into HTAB @@ -826,18 +836,6 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache) splx(s); } -void -pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) -{ - _pmap_kenter_pa(va, pa, prot, 0, PMAP_CACHE_DEFAULT); -} - -void -pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable) -{ - _pmap_kenter_pa(va, pa, prot, 0, cacheable); -} - /* * remove kernel (pmap_kernel()) mappings */ @@ -951,7 +949,7 @@ pmap_hash_remove(struct pte_desc *pted) */ void pmap_fill_pte64(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted, - vm_prot_t prot, int flags, int cache) + vm_prot_t prot, int cache) { sr_t sr; struct pte_64 *pte64; @@ -991,7 +989,7 @@ pmap_fill_pte64(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted, */ void pmap_fill_pte32(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted, - vm_prot_t prot, int flags, int cache) + vm_prot_t prot, int cache) { sr_t sr; struct pte_32 *pte32; @@ -2242,12 +2240,12 @@ pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t dsisr, int exec_fault) aligned_va = trunc_page(va); if (ppc_proc_is_64b) { pmap_fill_pte64(pm, aligned_va, aligned_va, - pted, prot, 0, PMAP_CACHE_WB); + pted, prot, PMAP_CACHE_WB); pte_insert64(pted); return 1; } else { pmap_fill_pte32(pm, aligned_va, aligned_va, - pted, prot, 0, PMAP_CACHE_WB); + pted, prot, PMAP_CACHE_WB); pte_insert32(pted); return 1; } -- 2.20.1