Move pmap_{,k}remove() inside uvm_km_pgremove{,_intrsafe}().
authormpi <mpi@openbsd.org>
Sun, 24 Oct 2021 15:23:52 +0000 (15:23 +0000)
committermpi <mpi@openbsd.org>
Sun, 24 Oct 2021 15:23:52 +0000 (15:23 +0000)
Reduce differences with NetBSD, tested by many as part of a larger diff.

ok kettenis@

sys/uvm/uvm_km.c
sys/uvm/uvm_map.c

index 157a6f8..fc31ae9 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_km.c,v 1.145 2021/06/15 16:38:09 mpi Exp $        */
+/*     $OpenBSD: uvm_km.c,v 1.146 2021/10/24 15:23:52 mpi Exp $        */
 /*     $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $      */
 
 /* 
@@ -239,8 +239,10 @@ uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
  *    the pages right away.    (this gets called from uvm_unmap_...).
  */
 void
-uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end)
+uvm_km_pgremove(struct uvm_object *uobj, vaddr_t startva, vaddr_t endva)
 {
+       const voff_t start = startva - vm_map_min(kernel_map);
+       const voff_t end = endva - vm_map_min(kernel_map);
        struct vm_page *pp;
        voff_t curoff;
        int slot;
@@ -248,6 +250,7 @@ uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end)
 
        KASSERT(UVM_OBJ_IS_AOBJ(uobj));
 
+       pmap_remove(pmap_kernel(), startva, endva);
        for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
                pp = uvm_pagelookup(uobj, curoff);
                if (pp && pp->pg_flags & PG_BUSY) {
@@ -301,6 +304,7 @@ uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
                        panic("uvm_km_pgremove_intrsafe: no page");
                uvm_pagefree(pg);
        }
+       pmap_kremove(start, end - start);
 }
 
 /*
index e36c761..d153bbf 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_map.c,v 1.278 2021/10/05 15:37:21 mpi Exp $       */
+/*     $OpenBSD: uvm_map.c,v 1.279 2021/10/24 15:23:52 mpi Exp $       */
 /*     $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
 
 /*
@@ -2116,8 +2116,8 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
                /* Nothing to be done for holes. */
        } else if (map->flags & VM_MAP_INTRSAFE) {
                KASSERT(vm_map_pmap(map) == pmap_kernel());
+
                uvm_km_pgremove_intrsafe(entry->start, entry->end);
-               pmap_kremove(entry->start, entry->end - entry->start);
        } else if (UVM_ET_ISOBJ(entry) &&
            UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
                KASSERT(vm_map_pmap(map) == pmap_kernel());
@@ -2155,10 +2155,8 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
                 * from the object.  offsets are always relative
                 * to vm_map_min(kernel_map).
                 */
-               pmap_remove(pmap_kernel(), entry->start, entry->end);
-               uvm_km_pgremove(entry->object.uvm_obj,
-                   entry->start - vm_map_min(kernel_map),
-                   entry->end - vm_map_min(kernel_map));
+               uvm_km_pgremove(entry->object.uvm_obj, entry->start,
+                   entry->end);
 
                /*
                 * null out kernel_object reference, we've just