Convert KVA allocation to km_alloc(9).
authorkettenis <kettenis@openbsd.org>
Fri, 27 May 2022 18:55:30 +0000 (18:55 +0000)
committerkettenis <kettenis@openbsd.org>
Fri, 27 May 2022 18:55:30 +0000 (18:55 +0000)
ok deraadt@, mpi@

sys/arch/sh/sh/vm_machdep.c
sys/arch/sparc64/sparc64/vm_machdep.c

index 8ea9cdd..5227ab7 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: vm_machdep.c,v 1.16 2021/05/16 06:20:29 jsg Exp $     */
+/*     $OpenBSD: vm_machdep.c,v 1.17 2022/05/27 18:55:30 kettenis Exp $        */
 /*     $NetBSD: vm_machdep.c,v 1.53 2006/08/31 16:49:21 matt Exp $     */
 
 /*
@@ -243,71 +243,66 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack, void *tcb,
 #endif
 }
 
+struct kmem_va_mode kv_physwait = {
+       .kv_map = &phys_map,
+       .kv_wait = 1,
+};
+
 /*
  * Map an IO request into kernel virtual address space.
- * All requests are (re)mapped into kernel VA space via the phys_map
- * (a name with only slightly more meaning than "kernel_map")
  */
-
 void
 vmapbuf(struct buf *bp, vsize_t len)
 {
-       vaddr_t faddr, taddr, off;
-       paddr_t fpa;
-       pmap_t kpmap, upmap;
+       struct kmem_dyn_mode kd_prefer = { .kd_waitok = 1 };
+       struct pmap *pm = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
+       vaddr_t kva, uva;
+       vsize_t size, off;
 
+#ifdef DIAGNOSTIC
        if ((bp->b_flags & B_PHYS) == 0)
                panic("vmapbuf");
+#endif
        bp->b_saveaddr = bp->b_data;
-       faddr = trunc_page((vaddr_t)bp->b_data);
-       off = (vaddr_t)bp->b_data - faddr;
-       len = round_page(off + len);
-       taddr = uvm_km_valloc_prefer_wait(phys_map, len, faddr);
-       bp->b_data = (caddr_t)(taddr + off);
-       /*
-        * The region is locked, so we expect that pmap_pte() will return
-        * non-NULL.
-        * XXX: unwise to expect this in a multithreaded environment.
-        * anything can happen to a pmap between the time we lock a
-        * region, release the pmap lock, and then relock it for
-        * the pmap_extract().
-        *
-        * no need to flush TLB since we expect nothing to be mapped
-        * where we we just allocated (TLB will be flushed when our
-        * mapping is removed).
-        */
-       upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
-       kpmap = vm_map_pmap(phys_map);
-       while (len) {
-               pmap_extract(upmap, faddr, &fpa);
-               pmap_enter(kpmap, taddr, fpa,
-                   PROT_READ | PROT_WRITE, PMAP_WIRED);
-               faddr += PAGE_SIZE;
-               taddr += PAGE_SIZE;
-               len -= PAGE_SIZE;
+       uva = trunc_page((vaddr_t)bp->b_data);
+       off = (vaddr_t)bp->b_data - uva;
+       size = round_page(off + len);
+
+       kd_prefer.kd_prefer = uva;
+       kva = (vaddr_t)km_alloc(size, &kv_physwait, &kp_none, &kd_prefer);
+       bp->b_data = (caddr_t)(kva + off);
+       while (size > 0) {
+               paddr_t pa;
+
+               if (pmap_extract(pm, uva, &pa) == FALSE)
+                       panic("vmapbuf: null page frame");
+               else
+                       pmap_kenter_pa(kva, pa, PROT_READ | PROT_WRITE);
+               uva += PAGE_SIZE;
+               kva += PAGE_SIZE;
+               size -= PAGE_SIZE;
        }
-       pmap_update(kpmap);
+       pmap_update(pmap_kernel());
 }
 
 /*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
+ * Unmap IO request from the kernel virtual address space.
  */
 void
 vunmapbuf(struct buf *bp, vsize_t len)
 {
        vaddr_t addr, off;
-       pmap_t kpmap;
 
+#ifdef DIAGNOSTIC
        if ((bp->b_flags & B_PHYS) == 0)
                panic("vunmapbuf");
+#endif
        addr = trunc_page((vaddr_t)bp->b_data);
        off = (vaddr_t)bp->b_data - addr;
        len = round_page(off + len);
-       kpmap = vm_map_pmap(phys_map);
-       pmap_remove(kpmap, addr, addr + len);
-       pmap_update(kpmap);
-       uvm_km_free_wakeup(phys_map, addr, len);
+       pmap_kremove(addr, len);
+       pmap_update(pmap_kernel());
+       km_free((void *)addr, len, &kv_physwait, &kp_none);
        bp->b_data = bp->b_saveaddr;
        bp->b_saveaddr = NULL;
 }
index e6f4f5e..e6aa1bc 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: vm_machdep.c,v 1.39 2017/08/17 20:50:51 tom Exp $     */
+/*     $OpenBSD: vm_machdep.c,v 1.40 2022/05/27 18:55:30 kettenis Exp $        */
 /*     $NetBSD: vm_machdep.c,v 1.38 2001/06/30 00:02:20 eeh Exp $ */
 
 /*
 
 #include <sparc64/sparc64/cache.h>
 
-/*
- * Map a user I/O request into kernel virtual address space.
- * Note: the pages are already locked by uvm_vslock(), so we
- * do not need to pass an access_type to pmap_enter().   
- */
-void
-vmapbuf(struct buf *bp, vsize_t len)
-{
-       struct pmap *upmap, *kpmap;
-       vaddr_t uva;    /* User VA (map from) */
-       vaddr_t kva;    /* Kernel VA (new to) */
-       paddr_t pa;     /* physical address */
-       vsize_t off;
-
-       if ((bp->b_flags & B_PHYS) == 0)
-               panic("vmapbuf");
-
-       /*
-        * XXX:  It might be better to round/trunc to a
-        * segment boundary to avoid VAC problems!
-        */
-       bp->b_saveaddr = bp->b_data;
-       uva = trunc_page((vaddr_t)bp->b_data);
-       off = (vaddr_t)bp->b_data - uva;
-       len = round_page(off + len);
-       kva = uvm_km_valloc_prefer_wait(phys_map, len, uva);
-       bp->b_data = (caddr_t)(kva + off);
-
-       upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
-       kpmap = vm_map_pmap(kernel_map);
-       do {
-               if (pmap_extract(upmap, uva, &pa) == FALSE)
-                       panic("vmapbuf: null page frame");
-               /* Now map the page into kernel space. */
-               pmap_enter(pmap_kernel(), kva,
-                   pa /* | PMAP_NC */,
-                   PROT_READ | PROT_WRITE,
-                   PROT_READ | PROT_WRITE | PMAP_WIRED);
-
-               uva += PAGE_SIZE;
-               kva += PAGE_SIZE;
-               len -= PAGE_SIZE;
-       } while (len);
-       pmap_update(pmap_kernel());
-}
-
-/*
- * Unmap a previously-mapped user I/O request.
- */
-void
-vunmapbuf(struct buf *bp, vsize_t len)
-{
-       vaddr_t kva;
-       vsize_t off;
-
-       if ((bp->b_flags & B_PHYS) == 0)
-               panic("vunmapbuf");
-
-       kva = trunc_page((vaddr_t)bp->b_data);
-       off = (vaddr_t)bp->b_data - kva;
-       len = round_page(off + len);
-
-       pmap_remove(pmap_kernel(), kva, kva + len);
-       pmap_update(pmap_kernel());
-       uvm_km_free_wakeup(phys_map, kva, len);
-       bp->b_data = bp->b_saveaddr;
-       bp->b_saveaddr = NULL;
-}
-
-
 /*
  * The offset of the topmost frame in the kernel stack.
  */
@@ -341,3 +271,68 @@ cpu_exit(struct proc *p)
        pmap_deactivate(p);
        sched_exit(p);
 }
+
+
+struct kmem_va_mode kv_physwait = {
+       .kv_map = &phys_map,
+       .kv_wait = 1,
+};
+
+/*
+ * Map an IO request into kernel virtual address space.
+ */
+void
+vmapbuf(struct buf *bp, vsize_t len)
+{
+       struct kmem_dyn_mode kd_prefer = { .kd_waitok = 1 };
+       struct pmap *pm = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
+       vaddr_t kva, uva;
+       vsize_t size, off;
+
+#ifdef DIAGNOSTIC
+       if ((bp->b_flags & B_PHYS) == 0)
+               panic("vmapbuf");
+#endif
+       bp->b_saveaddr = bp->b_data;
+       uva = trunc_page((vaddr_t)bp->b_data);
+       off = (vaddr_t)bp->b_data - uva;
+       size = round_page(off + len);
+
+       kd_prefer.kd_prefer = uva;
+       kva = (vaddr_t)km_alloc(size, &kv_physwait, &kp_none, &kd_prefer);
+       bp->b_data = (caddr_t)(kva + off);
+       while (size > 0) {
+               paddr_t pa;
+
+               if (pmap_extract(pm, uva, &pa) == FALSE)
+                       panic("vmapbuf: null page frame");
+               else
+                       pmap_kenter_pa(kva, pa, PROT_READ | PROT_WRITE);
+               uva += PAGE_SIZE;
+               kva += PAGE_SIZE;
+               size -= PAGE_SIZE;
+       }
+       pmap_update(pmap_kernel());
+}
+
+/*
+ * Unmap IO request from the kernel virtual address space.
+ */
+void
+vunmapbuf(struct buf *bp, vsize_t len)
+{
+       vaddr_t addr, off;
+
+#ifdef DIAGNOSTIC
+       if ((bp->b_flags & B_PHYS) == 0)
+               panic("vunmapbuf");
+#endif
+       addr = trunc_page((vaddr_t)bp->b_data);
+       off = (vaddr_t)bp->b_data - addr;
+       len = round_page(off + len);
+       pmap_kremove(addr, len);
+       pmap_update(pmap_kernel());
+       km_free((void *)addr, len, &kv_physwait, &kp_none);
+       bp->b_data = bp->b_saveaddr;
+       bp->b_saveaddr = NULL;
+}