other archs.
Specify the caching policy by passing PMAP_* flags to pmap_kenter_pa()
like the majority of our archs do and kill pmap_kenter_cache().
Spread some pmap_update() along the way.
While here remove the unused flag argument from pmap_fill_pte().
Finally convert the bus map/unmap functions to km_alloc/free() instead
of uvm_km_valloc/free().
Inputs from kettenis@ and miod@, ok miod@
-/* $OpenBSD: pmap.h,v 1.51 2015/01/20 17:04:21 mpi Exp $ */
+/* $OpenBSD: pmap.h,v 1.52 2015/01/22 17:55:45 mpi Exp $ */
/* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */
/*-
#define VP_IDX2_MASK (VP_IDX2_SIZE-1)
#define VP_IDX2_POS 12
-/* functions used by the bus layer for device accesses */
-void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
-
/* cache flags */
#define PMAP_CACHE_DEFAULT 0 /* WB cache managed mem, devices not */
#define PMAP_CACHE_CI 1 /* cache inhibit */
#define PG_PMAP_EXE PG_PMAP2
/*
- * MD flags to pmap_enter:
+ * MD flags that we use for pmap_enter (in the pa):
*/
-
-/* to get just the pa from params to pmap_enter */
-#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK)
+#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */
#define PMAP_NOCACHE 0x1 /* map uncached */
+#define PMAP_WT 0x2 /* map write-through */
#endif /* _KERNEL */
-/* $OpenBSD: bus_space.c,v 1.1 2015/01/20 18:34:00 mpi Exp $ */
+/* $OpenBSD: bus_space.c,v 1.2 2015/01/22 17:55:46 mpi Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
int bus_mem_add_mapping(bus_addr_t, bus_size_t, int, bus_space_handle_t *);
bus_addr_t bus_space_unmap_p(bus_space_tag_t, bus_space_handle_t, bus_size_t);
-void bus_space_unmap(bus_space_tag_t, bus_space_handle_t, bus_size_t);
/* BUS functions */
bus_size_t off, len;
bus_addr_t bpa;
- /* should this verify that the proper size is freed? */
sva = trunc_page(bsh);
off = bsh - sva;
- len = size+off;
+ len = round_page(size+off);
if (pmap_extract(pmap_kernel(), sva, &bpa) == TRUE) {
- if (extent_free(devio_ex, bpa | (bsh & PAGE_MASK), size, EX_NOWAIT |
- (ppc_malloc_ok ? EX_MALLOCOK : 0)))
+ if (extent_free(devio_ex, bpa | (bsh & PAGE_MASK), size,
+ EX_NOWAIT | (ppc_malloc_ok ? EX_MALLOCOK : 0)))
{
printf("bus_space_map: pa 0x%lx, size 0x%lx\n",
bpa, size);
printf("bus_space_map: can't free region\n");
}
}
+
+ pmap_kremove(sva, len);
+ pmap_update(pmap_kernel());
+
/* do not free memory which was stolen from the vm system */
if (ppc_malloc_ok &&
((sva >= VM_MIN_KERNEL_ADDRESS) && (sva < VM_MAX_KERNEL_ADDRESS)))
- uvm_km_free(kernel_map, sva, len);
- else {
- pmap_remove(pmap_kernel(), sva, sva + len);
- pmap_update(pmap_kernel());
- }
+ km_free((void *)sva, len, &kv_any, &kp_none);
}
paddr_t
{
bus_addr_t vaddr;
bus_addr_t spa, epa;
- bus_size_t off;
- int len;
+ bus_size_t off, len;
+ int pmapflags;
spa = trunc_page(bpa);
epa = bpa + size;
off = bpa - spa;
- len = size+off;
+ len = round_page(size+off);
-#if 0
- if (epa <= spa) {
+#ifdef DIAGNOSTIC
+ if (epa <= spa && epa != 0)
panic("bus_mem_add_mapping: overflow");
- }
#endif
- if (ppc_malloc_ok == 0) {
- bus_size_t alloc_size;
+ if (ppc_malloc_ok == 0) {
/* need to steal vm space before kernel vm is initialized */
- alloc_size = round_page(len);
-
vaddr = VM_MIN_KERNEL_ADDRESS + ppc_kvm_stolen;
- ppc_kvm_stolen += alloc_size;
+ ppc_kvm_stolen += len;
if (ppc_kvm_stolen > PPC_SEGMENT_LENGTH) {
panic("ppc_kvm_stolen, out of space");
}
} else {
- vaddr = uvm_km_valloc(kernel_map, len);
+ vaddr = (vaddr_t)km_alloc(len, &kv_any, &kp_none, &kd_nowait);
if (vaddr == 0)
return (ENOMEM);
}
*bshp = vaddr + off;
-#ifdef DEBUG_BUS_MEM_ADD_MAPPING
- printf("mapping %x size %x to %x vbase %x\n",
- bpa, size, *bshp, spa);
-#endif
+
+ if (flags & BUS_SPACE_MAP_CACHEABLE)
+ pmapflags = PMAP_WT;
+ else
+ pmapflags = PMAP_NOCACHE;
+
for (; len > 0; len -= PAGE_SIZE) {
- pmap_kenter_cache(vaddr, spa, PROT_READ | PROT_WRITE,
- (flags & BUS_SPACE_MAP_CACHEABLE) ?
- PMAP_CACHE_WT : PMAP_CACHE_CI);
+ pmap_kenter_pa(vaddr, spa | pmapflags, PROT_READ | PROT_WRITE);
spa += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
- return 0;
+ pmap_update(pmap_kernel());
+
+ return (0);
}
int
panic("ppc_kvm_stolen, out of space");
}
} else {
- va = uvm_km_valloc(kernel_map, size);
+ va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait);
+ if (va == 0)
+ return (NULL);
}
- if (va == 0)
- return NULL;
-
for (vaddr = va; size > 0; size -= PAGE_SIZE) {
- pmap_kenter_cache(vaddr, spa,
- PROT_READ | PROT_WRITE, PMAP_CACHE_DEFAULT);
+ pmap_kenter_pa(vaddr, spa, PROT_READ | PROT_WRITE);
spa += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
+ pmap_update(pmap_kernel());
+
return (void *) (va+off);
}
vaddr_t vaddr;
int size;
- size = p_size;
+ size = round_page(p_size);
vaddr = trunc_page((vaddr_t)kva);
- uvm_km_free(kernel_map, vaddr, size);
-
- for (; size > 0; size -= PAGE_SIZE) {
- pmap_remove(pmap_kernel(), vaddr, vaddr + PAGE_SIZE - 1);
- vaddr += PAGE_SIZE;
- }
+ pmap_kremove(vaddr, size);
pmap_update(pmap_kernel());
+
+ km_free((void *)vaddr, size, &kv_any, &kp_none);
}
-/* $OpenBSD: pmap.c,v 1.138 2015/01/21 19:10:26 mpi Exp $ */
+/* $OpenBSD: pmap.c,v 1.139 2015/01/22 17:55:46 mpi Exp $ */
/*
* Copyright (c) 2001, 2002, 2007 Dale Rahn.
void pte_insert32(struct pte_desc *) __noprof;
void pte_insert64(struct pte_desc *) __noprof;
void pmap_fill_pte64(pmap_t, vaddr_t, paddr_t, struct pte_desc *, vm_prot_t,
- int, int) __noprof;
+ int) __noprof;
void pmap_fill_pte32(pmap_t, vaddr_t, paddr_t, struct pte_desc *, vm_prot_t,
- int, int) __noprof;
+ int) __noprof;
void pmap_syncicache_user_virt(pmap_t pm, vaddr_t va);
struct pte_desc *pted;
struct vm_page *pg;
boolean_t nocache = (pa & PMAP_NOCACHE) != 0;
+ boolean_t wt = (pa & PMAP_WT) != 0;
int s;
int need_sync = 0;
int cache;
int error;
+ KASSERT(!(wt && nocache));
+ pa &= PMAP_PA_MASK;
+
/* MP - Acquire lock for this pmap */
s = splvm();
}
}
- pa &= PMAP_PA_MASK;
-
- /* Calculate PTE */
pg = PHYS_TO_VM_PAGE(pa);
- if (pg != NULL && !nocache)
- cache = PMAP_CACHE_WB; /* managed memory is cacheable */
+ if (wt)
+ cache = PMAP_CACHE_WT;
+ else if (pg != NULL && !(pg->pg_flags & PG_DEV) && !nocache)
+ cache = PMAP_CACHE_WB;
else
cache = PMAP_CACHE_CI;
+ /* Calculate PTE */
if (ppc_proc_is_64b)
- pmap_fill_pte64(pm, va, pa, pted, prot, flags, cache);
+ pmap_fill_pte64(pm, va, pa, pted, prot, cache);
else
- pmap_fill_pte32(pm, va, pa, pted, prot, flags, cache);
+ pmap_fill_pte32(pm, va, pa, pted, prot, cache);
if (pg != NULL) {
pmap_enter_pv(pted, pg); /* only managed mem */
*
*/
void
-_pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
{
struct pte_desc *pted;
struct vm_page *pg;
- int s;
+ boolean_t nocache = (pa & PMAP_NOCACHE) != 0;
+ boolean_t wt = (pa & PMAP_WT) != 0;
pmap_t pm;
+ int cache, s;
+
+ KASSERT(!(wt && nocache));
+ pa &= PMAP_PA_MASK;
pm = pmap_kernel();
pmap_remove_pg(pm, va); /* pted is reused */
pm->pm_stats.resident_count++;
+
if (prot & PROT_WRITE) {
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL)
va, pa);
}
- if (cache == PMAP_CACHE_DEFAULT) {
- pg = PHYS_TO_VM_PAGE(pa);
- if (pg != NULL && (pg->pg_flags & PG_DEV) == 0)
- cache = PMAP_CACHE_WB;
- else
- cache = PMAP_CACHE_CI;
- }
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (wt)
+ cache = PMAP_CACHE_WT;
+ else if (pg != NULL && !(pg->pg_flags & PG_DEV) && !nocache)
+ cache = PMAP_CACHE_WB;
+ else
+ cache = PMAP_CACHE_CI;
/* Calculate PTE */
if (ppc_proc_is_64b)
- pmap_fill_pte64(pm, va, pa, pted, prot, flags, cache);
+ pmap_fill_pte64(pm, va, pa, pted, prot, cache);
else
- pmap_fill_pte32(pm, va, pa, pted, prot, flags, cache);
+ pmap_fill_pte32(pm, va, pa, pted, prot, cache);
/*
* Insert into HTAB
splx(s);
}
-void
-pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
-{
- _pmap_kenter_pa(va, pa, prot, 0, PMAP_CACHE_DEFAULT);
-}
-
-void
-pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable)
-{
- _pmap_kenter_pa(va, pa, prot, 0, cacheable);
-}
-
/*
* remove kernel (pmap_kernel()) mappings
*/
*/
void
pmap_fill_pte64(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
- vm_prot_t prot, int flags, int cache)
+ vm_prot_t prot, int cache)
{
sr_t sr;
struct pte_64 *pte64;
*/
void
pmap_fill_pte32(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
- vm_prot_t prot, int flags, int cache)
+ vm_prot_t prot, int cache)
{
sr_t sr;
struct pte_32 *pte32;
aligned_va = trunc_page(va);
if (ppc_proc_is_64b) {
pmap_fill_pte64(pm, aligned_va, aligned_va,
- pted, prot, 0, PMAP_CACHE_WB);
+ pted, prot, PMAP_CACHE_WB);
pte_insert64(pted);
return 1;
} else {
pmap_fill_pte32(pm, aligned_va, aligned_va,
- pted, prot, 0, PMAP_CACHE_WB);
+ pted, prot, PMAP_CACHE_WB);
pte_insert32(pted);
return 1;
}