Add mimmutable(2) system call which locks the permissions (PROT_*) of
authorderaadt <deraadt@openbsd.org>
Fri, 7 Oct 2022 14:59:39 +0000 (14:59 +0000)
committerderaadt <deraadt@openbsd.org>
Fri, 7 Oct 2022 14:59:39 +0000 (14:59 +0000)
memory mappings so they cannot be changed by a later mmap(), mprotect(),
or munmap(), which will error with EPERM instead.
ok kettenis

12 files changed:
sys/arch/mips64/mips64/trap.c
sys/kern/exec_subr.c
sys/kern/kern_exec.c
sys/kern/kern_pledge.c
sys/kern/kern_resource.c
sys/kern/syscalls.master
sys/sys/mman.h
sys/uvm/uvm_extern.h
sys/uvm/uvm_io.c
sys/uvm/uvm_map.c
sys/uvm/uvm_map.h
sys/uvm/uvm_mmap.c

index 8620aaa..f4a0691 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: trap.c,v 1.159 2022/02/28 15:49:57 visa Exp $ */
+/*     $OpenBSD: trap.c,v 1.160 2022/10/07 14:59:39 deraadt Exp $      */
 
 /*
  * Copyright (c) 1988 University of Utah.
@@ -616,7 +616,7 @@ fault_common_no_miss:
                                KERNEL_UNLOCK();
                                (void)uvm_map_protect(map, p->p_md.md_fppgva,
                                    p->p_md.md_fppgva + PAGE_SIZE,
-                                   PROT_NONE, FALSE);
+                                   PROT_NONE, FALSE, FALSE);
                                return;
                        }
                        /* FALLTHROUGH */
@@ -1587,7 +1587,8 @@ fpe_branch_emulate(struct proc *p, struct trapframe *tf, uint32_t insn,
         */
 
        rc = uvm_map_protect(map, p->p_md.md_fppgva,
-           p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_WRITE, FALSE);
+           p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_WRITE, FALSE,
+           FALSE);
        if (rc != 0) {
 #ifdef DEBUG
                printf("%s: uvm_map_protect on %p failed: %d\n",
@@ -1626,7 +1627,7 @@ fpe_branch_emulate(struct proc *p, struct trapframe *tf, uint32_t insn,
        }
 
        (void)uvm_map_protect(map, p->p_md.md_fppgva,
-           p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_EXEC, FALSE);
+           p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_EXEC, FALSE, FALSE);
        p->p_md.md_fpbranchva = dest;
        p->p_md.md_fpslotva = (vaddr_t)tf->pc + 4;
        p->p_md.md_flags |= MDP_FPUSED;
@@ -1640,7 +1641,7 @@ err:
        KERNEL_UNLOCK();
 err2:
        (void)uvm_map_protect(map, p->p_md.md_fppgva,
-           p->p_md.md_fppgva + PAGE_SIZE, PROT_NONE, FALSE);
+           p->p_md.md_fppgva + PAGE_SIZE, PROT_NONE, FALSE, FALSE);
        return rc;
 }
 #endif
index 0a1bad7..8a949be 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: exec_subr.c,v 1.57 2019/11/29 06:34:45 deraadt Exp $  */
+/*     $OpenBSD: exec_subr.c,v 1.58 2022/10/07 14:59:39 deraadt Exp $  */
 /*     $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $    */
 
 /*
@@ -259,7 +259,7 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
                return (uvm_map_protect(&p->p_vmspace->vm_map,
                    trunc_page(cmd->ev_addr),
                    round_page(cmd->ev_addr + cmd->ev_len),
-                   prot, FALSE));
+                   prot, FALSE, TRUE));
        }
        return (0);
 }
index 54aefc0..761441c 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: kern_exec.c,v 1.231 2022/08/14 01:58:27 jsg Exp $     */
+/*     $OpenBSD: kern_exec.c,v 1.232 2022/10/07 14:59:39 deraadt Exp $ */
 /*     $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $  */
 
 /*-
@@ -466,13 +466,13 @@ sys_execve(struct proc *p, void *v, register_t *retval)
 #ifdef MACHINE_STACK_GROWS_UP
        pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
         if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
-            trunc_page(pr->ps_strings), PROT_NONE, TRUE))
+            trunc_page(pr->ps_strings), PROT_NONE, TRUE, TRUE))
                 goto exec_abort;
 #else
        pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
         if (uvm_map_protect(&vm->vm_map,
             round_page(pr->ps_strings + sizeof(arginfo)),
-            (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
+            (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE, TRUE))
                 goto exec_abort;
 #endif
 
index 30d6d48..86a7780 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: kern_pledge.c,v 1.295 2022/09/05 16:37:47 mbuhl Exp $ */
+/*     $OpenBSD: kern_pledge.c,v 1.296 2022/10/07 14:59:39 deraadt Exp $       */
 
 /*
  * Copyright (c) 2015 Nicholas Marriott <nicm@openbsd.org>
@@ -150,6 +150,7 @@ const uint64_t pledge_syscalls[SYS_MAXSYSCALL] = {
        [SYS_minherit] = PLEDGE_STDIO,
        [SYS_mmap] = PLEDGE_STDIO,
        [SYS_mprotect] = PLEDGE_STDIO,
+       [SYS_mimmutable] = PLEDGE_STDIO,
        [SYS_mquery] = PLEDGE_STDIO,
        [SYS_munmap] = PLEDGE_STDIO,
        [SYS_msync] = PLEDGE_STDIO,
index 0c69d71..38da9b1 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: kern_resource.c,v 1.74 2022/05/28 03:47:43 deraadt Exp $      */
+/*     $OpenBSD: kern_resource.c,v 1.75 2022/10/07 14:59:39 deraadt Exp $      */
 /*     $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $      */
 
 /*-
@@ -328,8 +328,8 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
                        addr = trunc_page(addr);
                        size = round_page(size);
                        KERNEL_LOCK();
-                       (void) uvm_map_protect(&vm->vm_map,
-                                             addr, addr+size, prot, FALSE);
+                       (void) uvm_map_protect(&vm->vm_map, addr,
+                           addr+size, prot, FALSE, FALSE);
                        KERNEL_UNLOCK();
                }
        }
index 63f3c65..0d382d3 100644 (file)
@@ -1,4 +1,4 @@
-;      $OpenBSD: syscalls.master,v 1.232 2022/09/03 21:13:48 mbuhl Exp $
+;      $OpenBSD: syscalls.master,v 1.233 2022/10/07 14:59:39 deraadt Exp $
 ;      $NetBSD: syscalls.master,v 1.32 1996/04/23 10:24:21 mycroft Exp $
 
 ;      @(#)syscalls.master     8.2 (Berkeley) 1/13/94
 156    OBSOL           ogetdirentries
 157    OBSOL           statfs25
 158    OBSOL           fstatfs25
-159    UNIMPL
+159    STD             { int sys_mimmutable(void *addr, size_t len); }
 160    UNIMPL
 161    STD             { int sys_getfh(const char *fname, fhandle_t *fhp); }
 162    OBSOL           ogetdomainname
index 47972b5..c36687f 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: mman.h,v 1.34 2019/03/01 01:46:18 cheloha Exp $       */
+/*     $OpenBSD: mman.h,v 1.35 2022/10/07 14:59:39 deraadt Exp $       */
 /*     $NetBSD: mman.h,v 1.11 1995/03/26 20:24:23 jtc Exp $    */
 
 /*-
@@ -154,6 +154,7 @@ int munlockall(void);
 #if __BSD_VISIBLE
 int    madvise(void *, size_t, int);
 int    minherit(void *, size_t, int);
+int    mimmutable(void *, size_t);
 void * mquery(void *, size_t, int, int, int, off_t);
 #endif
 int    posix_madvise(void *, size_t, int);
index 31b35d8..cfdfe88 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_extern.h,v 1.163 2022/08/15 03:21:04 jsg Exp $    */
+/*     $OpenBSD: uvm_extern.h,v 1.164 2022/10/07 14:59:39 deraadt Exp $        */
 /*     $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $      */
 
 /*
@@ -399,7 +399,7 @@ int                 uvm_map_pageable_all(vm_map_t, int, vsize_t);
 boolean_t              uvm_map_checkprot(vm_map_t, vaddr_t,
                            vaddr_t, vm_prot_t);
 int                    uvm_map_protect(vm_map_t, vaddr_t, 
-                           vaddr_t, vm_prot_t, boolean_t);
+                           vaddr_t, vm_prot_t, boolean_t, boolean_t);
 struct vmspace         *uvmspace_alloc(vaddr_t, vaddr_t,
                            boolean_t, boolean_t);
 void                   uvmspace_init(struct vmspace *, struct pmap *,
index b3d4b87..3bd0b4e 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_io.c,v 1.29 2022/03/12 08:11:07 mpi Exp $ */
+/*     $OpenBSD: uvm_io.c,v 1.30 2022/10/07 14:59:39 deraadt Exp $     */
 /*     $NetBSD: uvm_io.c,v 1.12 2000/06/27 17:29:23 mrg Exp $  */
 
 /*
@@ -127,7 +127,7 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
                vm_map_lock(kernel_map);
                TAILQ_INIT(&dead_entries);
                uvm_unmap_remove(kernel_map, kva, kva+chunksz,
-                   &dead_entries, FALSE, TRUE);
+                   &dead_entries, FALSE, TRUE, FALSE);
                vm_map_unlock(kernel_map);
                uvm_unmap_detach(&dead_entries, AMAP_REFALL);
 
index 6338052..4fe00db 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_map.c,v 1.294 2022/08/15 15:53:45 jsg Exp $       */
+/*     $OpenBSD: uvm_map.c,v 1.295 2022/10/07 14:59:39 deraadt Exp $   */
 /*     $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
 
 /*
@@ -797,7 +797,11 @@ uvm_mapanon(struct vm_map *map, vaddr_t *addr, vsize_t sz,
                                error = EINVAL;
                                goto unlock;
                        }
-                       uvm_unmap_remove(map, *addr, *addr + sz, &dead, FALSE, TRUE);
+                       if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
+                           FALSE, TRUE, TRUE) != 0) {
+                               error = EPERM;  /* immutable entries found */
+                               goto unlock;
+                       }
                }
                if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
                        error = ENOMEM;
@@ -1038,8 +1042,13 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
                }
 
                /* Check that the space is available. */
-               if (flags & UVM_FLAG_UNMAP)
-                       uvm_unmap_remove(map, *addr, *addr + sz, &dead, FALSE, TRUE);
+               if (flags & UVM_FLAG_UNMAP) {
+                       if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
+                           FALSE, TRUE, TRUE) != 0) {
+                               error = EPERM;  /* immutable entries found */
+                               goto unlock;
+                       }
+               }
                if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
                        error = ENOMEM;
                        goto unlock;
@@ -1817,7 +1826,7 @@ uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
            (end & (vaddr_t)PAGE_MASK) == 0);
        TAILQ_INIT(&dead);
        vm_map_lock(map);
-       uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE);
+       uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE, FALSE);
        vm_map_unlock(map);
 
        if (map->flags & VM_MAP_INTRSAFE)
@@ -1959,17 +1968,17 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
  * If markfree, entry will be properly marked free, otherwise, no replacement
  * entry will be put in the tree (corrupting the tree).
  */
-void
+int
 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
     struct uvm_map_deadq *dead, boolean_t remove_holes,
-    boolean_t markfree)
+    boolean_t markfree, boolean_t checkimmutable)
 {
        struct vm_map_entry *prev_hint, *next, *entry;
 
        start = MAX(start, map->min_offset);
        end = MIN(end, map->max_offset);
        if (start >= end)
-               return;
+               return 0;
 
        if ((map->flags & VM_MAP_INTRSAFE) == 0)
                splassert(IPL_NONE);
@@ -1979,6 +1988,19 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
        /* Find first affected entry. */
        entry = uvm_map_entrybyaddr(&map->addr, start);
        KDASSERT(entry != NULL && entry->start <= start);
+
+       if (checkimmutable) {
+               struct vm_map_entry *entry1 = entry;
+
+               /* Refuse to unmap if any entries are immutable */
+               for (; entry1 != NULL && entry1->start < end; entry1 = next) {
+                       KDASSERT(entry1->start >= start);
+                       next = RBT_NEXT(uvm_map_addr, entry1);
+                       if (entry1->etype & UVM_ET_IMMUTABLE)
+                               return EPERM;
+               }
+       }
+
        if (entry->end <= start && markfree)
                entry = RBT_NEXT(uvm_map_addr, entry);
        else
@@ -2043,6 +2065,7 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
                        KDASSERT(uvm_map_entrybyaddr(&map->addr, a) == NULL);
        }
 #endif
+       return 0;
 }
 
 /*
@@ -3063,7 +3086,7 @@ uvm_page_printit(struct vm_page *pg, boolean_t full,
  */
 int
 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
-    vm_prot_t new_prot, boolean_t set_max)
+    vm_prot_t new_prot, boolean_t set_max, boolean_t checkimmutable)
 {
        struct vm_map_entry *first, *iter;
        vm_prot_t old_prot;
@@ -3098,6 +3121,11 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
                if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
                        continue;
 
+               if (checkimmutable &&
+                   (iter->etype & UVM_ET_IMMUTABLE)) {
+                       error = EPERM;
+                       goto out;
+               }
                old_prot = iter->protection;
                if (old_prot == PROT_NONE && new_prot != old_prot) {
                        dused += uvmspace_dused(
@@ -3356,7 +3384,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
                 * (as in, not replace them with free-memory entries).
                 */
                uvm_unmap_remove(map, map->min_offset, map->max_offset,
-                   &dead_entries, TRUE, FALSE);
+                   &dead_entries, TRUE, FALSE, FALSE);
 
                KDASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
 
@@ -3529,7 +3557,7 @@ uvm_share(struct vm_map *dstmap, vaddr_t dstaddr, vm_prot_t prot,
        }
 
        ret = EINVAL;
-       uvm_unmap_remove(dstmap, dstaddr, unmap_end, &dead, FALSE, TRUE);
+       uvm_unmap_remove(dstmap, dstaddr, unmap_end, &dead, FALSE, TRUE, FALSE);
 
 exit_unlock:
        vm_map_unlock_read(srcmap);
@@ -4088,7 +4116,7 @@ uvm_map_deallocate(vm_map_t map)
        TAILQ_INIT(&dead);
        uvm_tree_sanity(map, __FILE__, __LINE__);
        uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
-           TRUE, FALSE);
+           TRUE, FALSE, FALSE);
        pmap_destroy(map->pmap);
        KASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
        free(map, M_VMMAP, sizeof *map);
@@ -4183,6 +4211,45 @@ uvm_map_syscall(struct vm_map *map, vaddr_t start, vaddr_t end)
        return (0);
 }
 
+/* 
+ * uvm_map_immutable: block mapping/mprotect for range of addrs in map.
+ *
+ * => map must be unlocked
+ */
+int
+uvm_map_immutable(struct vm_map *map, vaddr_t start, vaddr_t end, int imut, char *name)
+{
+       struct vm_map_entry *entry;
+
+       if (start > end)
+               return EINVAL;
+       start = MAX(start, map->min_offset);
+       end = MIN(end, map->max_offset);
+       if (start >= end)
+               return 0;
+
+       vm_map_lock(map);
+
+       entry = uvm_map_entrybyaddr(&map->addr, start);
+       if (entry->end > start)
+               UVM_MAP_CLIP_START(map, entry, start);
+       else
+               entry = RBT_NEXT(uvm_map_addr, entry);
+
+       while (entry != NULL && entry->start < end) {
+               UVM_MAP_CLIP_END(map, entry, end);
+               if (imut)
+                       entry->etype |= UVM_ET_IMMUTABLE;
+               else
+                       entry->etype &= ~UVM_ET_IMMUTABLE;
+               entry = RBT_NEXT(uvm_map_addr, entry);
+       }
+
+       map->wserial++;
+       vm_map_unlock(map);
+       return (0);
+}
+
 /*
  * uvm_map_advice: set advice code for range of addrs in map.
  *
@@ -4367,7 +4434,7 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
 fail2_unmap:
        if (error) {
                uvm_unmap_remove(kernel_map, dstaddr, dstaddr + len, &dead,
-                   FALSE, TRUE);
+                   FALSE, TRUE, FALSE);
        }
 
        /* Release maps, release dead entries. */
index d3482f3..566fd98 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_map.h,v 1.75 2022/03/12 08:11:07 mpi Exp $        */
+/*     $OpenBSD: uvm_map.h,v 1.76 2022/10/07 14:59:39 deraadt Exp $    */
 /*     $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
 
 /*
@@ -350,6 +350,7 @@ struct vm_map *     uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
 vaddr_t                uvm_map_pie(vaddr_t);
 vaddr_t                uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t);
 int            uvm_map_syscall(struct vm_map *, vaddr_t, vaddr_t);
+int            uvm_map_immutable(struct vm_map *, vaddr_t, vaddr_t, int, char *);
 int            uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t);
 int            uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
 void           uvm_map_init(void);
@@ -365,8 +366,8 @@ int         uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
                    struct vm_map *);
 void           uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
 void           uvm_unmap_detach(struct uvm_map_deadq *, int);
-void           uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
-                   struct uvm_map_deadq *, boolean_t, boolean_t);
+int            uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
+                   struct uvm_map_deadq *, boolean_t, boolean_t, boolean_t);
 void           uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**,
                    struct uvm_addr_state*);
 int            uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int);
index 6f17349..f5aba6a 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_mmap.c,v 1.172 2022/08/01 14:56:59 deraadt Exp $  */
+/*     $OpenBSD: uvm_mmap.c,v 1.173 2022/10/07 14:59:39 deraadt Exp $  */
 /*     $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $        */
 
 /*
@@ -569,7 +569,11 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
        }
 
        TAILQ_INIT(&dead_entries);
-       uvm_unmap_remove(map, addr, addr + size, &dead_entries, FALSE, TRUE);
+       if (uvm_unmap_remove(map, addr, addr + size, &dead_entries,
+           FALSE, TRUE, TRUE) != 0) {
+               vm_map_unlock(map);
+               return EPERM;   /* immutable entries found */
+       }
        vm_map_unlock(map);     /* and unlock */
 
        uvm_unmap_detach(&dead_entries, 0);
@@ -619,7 +623,7 @@ sys_mprotect(struct proc *p, void *v, register_t *retval)
                return EINVAL;          /* disallow wrap-around. */
 
        return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size,
-           prot, FALSE));
+           prot, FALSE, TRUE));
 }
 
 /*
@@ -648,6 +652,32 @@ sys_msyscall(struct proc *p, void *v, register_t *retval)
        return uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size);
 }
 
+/*
+ * sys_mimmutable: the mimmutable system call
+ */
+int
+sys_mimmutable(struct proc *p, void *v, register_t *retval)
+{
+       struct sys_mimmutable_args /* {
+               immutablearg(void *) addr;
+               immutablearg(size_t) len;
+       } */ *uap = v;
+       vaddr_t addr;
+       vsize_t size, pageoff;
+
+       addr = (vaddr_t)SCARG(uap, addr);
+       size = (vsize_t)SCARG(uap, len);
+
+       /*
+        * align the address to a page boundary, and adjust the size accordingly
+        */
+       ALIGN_ADDR(addr, size, pageoff);
+       if (addr > SIZE_MAX - size)
+               return EINVAL;          /* disallow wrap-around. */
+
+       return uvm_map_immutable(&p->p_vmspace->vm_map, addr, addr+size, 1, "sys");
+}
+
 /*
  * sys_minherit: the minherit system call
  */
@@ -1228,7 +1258,8 @@ redo:
                        if (kva != 0) {
                                vm_map_lock(kernel_map);
                                uvm_unmap_remove(kernel_map, kva,
-                                   kva+PAGE_SIZE, &dead_entries, FALSE, TRUE);
+                                   kva+PAGE_SIZE, &dead_entries,
+                                   FALSE, TRUE, FALSE);        /* XXX */
                                vm_map_unlock(kernel_map);
                                kva = 0;
                        }
@@ -1255,7 +1286,7 @@ redo:
        if (kva != 0) {
                vm_map_lock(kernel_map);
                uvm_unmap_remove(kernel_map, kva, kva+PAGE_SIZE,
-                   &dead_entries, FALSE, TRUE);
+                   &dead_entries, FALSE, TRUE, FALSE);         /* XXX */
                vm_map_unlock(kernel_map);
        }
        uvm_unmap_detach(&dead_entries, AMAP_REFALL);