-/* $OpenBSD: trap.c,v 1.159 2022/02/28 15:49:57 visa Exp $ */
+/* $OpenBSD: trap.c,v 1.160 2022/10/07 14:59:39 deraadt Exp $ */
/*
* Copyright (c) 1988 University of Utah.
KERNEL_UNLOCK();
(void)uvm_map_protect(map, p->p_md.md_fppgva,
p->p_md.md_fppgva + PAGE_SIZE,
- PROT_NONE, FALSE);
+ PROT_NONE, FALSE, FALSE);
return;
}
/* FALLTHROUGH */
*/
rc = uvm_map_protect(map, p->p_md.md_fppgva,
- p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_WRITE, FALSE);
+ p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_WRITE, FALSE,
+ FALSE);
if (rc != 0) {
#ifdef DEBUG
printf("%s: uvm_map_protect on %p failed: %d\n",
}
(void)uvm_map_protect(map, p->p_md.md_fppgva,
- p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_EXEC, FALSE);
+ p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_EXEC, FALSE, FALSE);
p->p_md.md_fpbranchva = dest;
p->p_md.md_fpslotva = (vaddr_t)tf->pc + 4;
p->p_md.md_flags |= MDP_FPUSED;
KERNEL_UNLOCK();
err2:
(void)uvm_map_protect(map, p->p_md.md_fppgva,
- p->p_md.md_fppgva + PAGE_SIZE, PROT_NONE, FALSE);
+ p->p_md.md_fppgva + PAGE_SIZE, PROT_NONE, FALSE, FALSE);
return rc;
}
#endif
-/* $OpenBSD: exec_subr.c,v 1.57 2019/11/29 06:34:45 deraadt Exp $ */
+/* $OpenBSD: exec_subr.c,v 1.58 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */
/*
return (uvm_map_protect(&p->p_vmspace->vm_map,
trunc_page(cmd->ev_addr),
round_page(cmd->ev_addr + cmd->ev_len),
- prot, FALSE));
+ prot, FALSE, TRUE));
}
return (0);
}
-/* $OpenBSD: kern_exec.c,v 1.231 2022/08/14 01:58:27 jsg Exp $ */
+/* $OpenBSD: kern_exec.c,v 1.232 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
#ifdef MACHINE_STACK_GROWS_UP
pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
- trunc_page(pr->ps_strings), PROT_NONE, TRUE))
+ trunc_page(pr->ps_strings), PROT_NONE, TRUE, TRUE))
goto exec_abort;
#else
pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
if (uvm_map_protect(&vm->vm_map,
round_page(pr->ps_strings + sizeof(arginfo)),
- (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
+ (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE, TRUE))
goto exec_abort;
#endif
-/* $OpenBSD: kern_pledge.c,v 1.295 2022/09/05 16:37:47 mbuhl Exp $ */
+/* $OpenBSD: kern_pledge.c,v 1.296 2022/10/07 14:59:39 deraadt Exp $ */
/*
* Copyright (c) 2015 Nicholas Marriott <nicm@openbsd.org>
[SYS_minherit] = PLEDGE_STDIO,
[SYS_mmap] = PLEDGE_STDIO,
[SYS_mprotect] = PLEDGE_STDIO,
+ [SYS_mimmutable] = PLEDGE_STDIO,
[SYS_mquery] = PLEDGE_STDIO,
[SYS_munmap] = PLEDGE_STDIO,
[SYS_msync] = PLEDGE_STDIO,
-/* $OpenBSD: kern_resource.c,v 1.74 2022/05/28 03:47:43 deraadt Exp $ */
+/* $OpenBSD: kern_resource.c,v 1.75 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
addr = trunc_page(addr);
size = round_page(size);
KERNEL_LOCK();
- (void) uvm_map_protect(&vm->vm_map,
- addr, addr+size, prot, FALSE);
+ (void) uvm_map_protect(&vm->vm_map, addr,
+ addr+size, prot, FALSE, FALSE);
KERNEL_UNLOCK();
}
}
-; $OpenBSD: syscalls.master,v 1.232 2022/09/03 21:13:48 mbuhl Exp $
+; $OpenBSD: syscalls.master,v 1.233 2022/10/07 14:59:39 deraadt Exp $
; $NetBSD: syscalls.master,v 1.32 1996/04/23 10:24:21 mycroft Exp $
; @(#)syscalls.master 8.2 (Berkeley) 1/13/94
156 OBSOL ogetdirentries
157 OBSOL statfs25
158 OBSOL fstatfs25
-159 UNIMPL
+159 STD { int sys_mimmutable(void *addr, size_t len); }
160 UNIMPL
161 STD { int sys_getfh(const char *fname, fhandle_t *fhp); }
162 OBSOL ogetdomainname
-/* $OpenBSD: mman.h,v 1.34 2019/03/01 01:46:18 cheloha Exp $ */
+/* $OpenBSD: mman.h,v 1.35 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: mman.h,v 1.11 1995/03/26 20:24:23 jtc Exp $ */
/*-
#if __BSD_VISIBLE
int madvise(void *, size_t, int);
int minherit(void *, size_t, int);
+int mimmutable(void *, size_t);
void * mquery(void *, size_t, int, int, int, off_t);
#endif
int posix_madvise(void *, size_t, int);
-/* $OpenBSD: uvm_extern.h,v 1.163 2022/08/15 03:21:04 jsg Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.164 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
boolean_t uvm_map_checkprot(vm_map_t, vaddr_t,
vaddr_t, vm_prot_t);
int uvm_map_protect(vm_map_t, vaddr_t,
- vaddr_t, vm_prot_t, boolean_t);
+ vaddr_t, vm_prot_t, boolean_t, boolean_t);
struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t,
boolean_t, boolean_t);
void uvmspace_init(struct vmspace *, struct pmap *,
-/* $OpenBSD: uvm_io.c,v 1.29 2022/03/12 08:11:07 mpi Exp $ */
+/* $OpenBSD: uvm_io.c,v 1.30 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_io.c,v 1.12 2000/06/27 17:29:23 mrg Exp $ */
/*
vm_map_lock(kernel_map);
TAILQ_INIT(&dead_entries);
uvm_unmap_remove(kernel_map, kva, kva+chunksz,
- &dead_entries, FALSE, TRUE);
+ &dead_entries, FALSE, TRUE, FALSE);
vm_map_unlock(kernel_map);
uvm_unmap_detach(&dead_entries, AMAP_REFALL);
-/* $OpenBSD: uvm_map.c,v 1.294 2022/08/15 15:53:45 jsg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.295 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
error = EINVAL;
goto unlock;
}
- uvm_unmap_remove(map, *addr, *addr + sz, &dead, FALSE, TRUE);
+ if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
+ FALSE, TRUE, TRUE) != 0) {
+ error = EPERM; /* immutable entries found */
+ goto unlock;
+ }
}
if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
error = ENOMEM;
}
/* Check that the space is available. */
- if (flags & UVM_FLAG_UNMAP)
- uvm_unmap_remove(map, *addr, *addr + sz, &dead, FALSE, TRUE);
+ if (flags & UVM_FLAG_UNMAP) {
+ if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
+ FALSE, TRUE, TRUE) != 0) {
+ error = EPERM; /* immutable entries found */
+ goto unlock;
+ }
+ }
if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
error = ENOMEM;
goto unlock;
(end & (vaddr_t)PAGE_MASK) == 0);
TAILQ_INIT(&dead);
vm_map_lock(map);
- uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE);
+ uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE, FALSE);
vm_map_unlock(map);
if (map->flags & VM_MAP_INTRSAFE)
* If markfree, entry will be properly marked free, otherwise, no replacement
* entry will be put in the tree (corrupting the tree).
*/
-void
+int
uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
struct uvm_map_deadq *dead, boolean_t remove_holes,
- boolean_t markfree)
+ boolean_t markfree, boolean_t checkimmutable)
{
struct vm_map_entry *prev_hint, *next, *entry;
start = MAX(start, map->min_offset);
end = MIN(end, map->max_offset);
if (start >= end)
- return;
+ return 0;
if ((map->flags & VM_MAP_INTRSAFE) == 0)
splassert(IPL_NONE);
/* Find first affected entry. */
entry = uvm_map_entrybyaddr(&map->addr, start);
KDASSERT(entry != NULL && entry->start <= start);
+
+ if (checkimmutable) {
+ struct vm_map_entry *entry1 = entry;
+
+ /* Refuse to unmap if any entries are immutable */
+ for (; entry1 != NULL && entry1->start < end; entry1 = next) {
+ KDASSERT(entry1->start >= start);
+ next = RBT_NEXT(uvm_map_addr, entry1);
+ if (entry1->etype & UVM_ET_IMMUTABLE)
+ return EPERM;
+ }
+ }
+
if (entry->end <= start && markfree)
entry = RBT_NEXT(uvm_map_addr, entry);
else
KDASSERT(uvm_map_entrybyaddr(&map->addr, a) == NULL);
}
#endif
+ return 0;
}
/*
*/
int
uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
- vm_prot_t new_prot, boolean_t set_max)
+ vm_prot_t new_prot, boolean_t set_max, boolean_t checkimmutable)
{
struct vm_map_entry *first, *iter;
vm_prot_t old_prot;
if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
continue;
+ if (checkimmutable &&
+ (iter->etype & UVM_ET_IMMUTABLE)) {
+ error = EPERM;
+ goto out;
+ }
old_prot = iter->protection;
if (old_prot == PROT_NONE && new_prot != old_prot) {
dused += uvmspace_dused(
* (as in, not replace them with free-memory entries).
*/
uvm_unmap_remove(map, map->min_offset, map->max_offset,
- &dead_entries, TRUE, FALSE);
+ &dead_entries, TRUE, FALSE, FALSE);
KDASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
}
ret = EINVAL;
- uvm_unmap_remove(dstmap, dstaddr, unmap_end, &dead, FALSE, TRUE);
+ uvm_unmap_remove(dstmap, dstaddr, unmap_end, &dead, FALSE, TRUE, FALSE);
exit_unlock:
vm_map_unlock_read(srcmap);
TAILQ_INIT(&dead);
uvm_tree_sanity(map, __FILE__, __LINE__);
uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
- TRUE, FALSE);
+ TRUE, FALSE, FALSE);
pmap_destroy(map->pmap);
KASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
free(map, M_VMMAP, sizeof *map);
return (0);
}
+/*
+ * uvm_map_immutable: block mapping/mprotect for range of addrs in map.
+ *
+ * => map must be unlocked
+ */
+int
+uvm_map_immutable(struct vm_map *map, vaddr_t start, vaddr_t end, int imut, char *name)
+{
+ struct vm_map_entry *entry;
+
+ if (start > end)
+ return EINVAL;
+ start = MAX(start, map->min_offset);
+ end = MIN(end, map->max_offset);
+ if (start >= end)
+ return 0;
+
+ vm_map_lock(map);
+
+ entry = uvm_map_entrybyaddr(&map->addr, start);
+ if (entry->end > start)
+ UVM_MAP_CLIP_START(map, entry, start);
+ else
+ entry = RBT_NEXT(uvm_map_addr, entry);
+
+ while (entry != NULL && entry->start < end) {
+ UVM_MAP_CLIP_END(map, entry, end);
+ if (imut)
+ entry->etype |= UVM_ET_IMMUTABLE;
+ else
+ entry->etype &= ~UVM_ET_IMMUTABLE;
+ entry = RBT_NEXT(uvm_map_addr, entry);
+ }
+
+ map->wserial++;
+ vm_map_unlock(map);
+ return (0);
+}
+
/*
* uvm_map_advice: set advice code for range of addrs in map.
*
fail2_unmap:
if (error) {
uvm_unmap_remove(kernel_map, dstaddr, dstaddr + len, &dead,
- FALSE, TRUE);
+ FALSE, TRUE, FALSE);
}
/* Release maps, release dead entries. */
-/* $OpenBSD: uvm_map.h,v 1.75 2022/03/12 08:11:07 mpi Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.76 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
vaddr_t uvm_map_pie(vaddr_t);
vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t);
int uvm_map_syscall(struct vm_map *, vaddr_t, vaddr_t);
+int uvm_map_immutable(struct vm_map *, vaddr_t, vaddr_t, int, char *);
int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t);
int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
void uvm_map_init(void);
struct vm_map *);
void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
void uvm_unmap_detach(struct uvm_map_deadq *, int);
-void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
- struct uvm_map_deadq *, boolean_t, boolean_t);
+int uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
+ struct uvm_map_deadq *, boolean_t, boolean_t, boolean_t);
void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**,
struct uvm_addr_state*);
int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int);
-/* $OpenBSD: uvm_mmap.c,v 1.172 2022/08/01 14:56:59 deraadt Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.173 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
}
TAILQ_INIT(&dead_entries);
- uvm_unmap_remove(map, addr, addr + size, &dead_entries, FALSE, TRUE);
+ if (uvm_unmap_remove(map, addr, addr + size, &dead_entries,
+ FALSE, TRUE, TRUE) != 0) {
+ vm_map_unlock(map);
+ return EPERM; /* immutable entries found */
+ }
vm_map_unlock(map); /* and unlock */
uvm_unmap_detach(&dead_entries, 0);
return EINVAL; /* disallow wrap-around. */
return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size,
- prot, FALSE));
+ prot, FALSE, TRUE));
}
/*
return uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size);
}
+/*
+ * sys_mimmutable: the mimmutable system call
+ */
+int
+sys_mimmutable(struct proc *p, void *v, register_t *retval)
+{
+ struct sys_mimmutable_args /* {
+ immutablearg(void *) addr;
+ immutablearg(size_t) len;
+ } */ *uap = v;
+ vaddr_t addr;
+ vsize_t size, pageoff;
+
+ addr = (vaddr_t)SCARG(uap, addr);
+ size = (vsize_t)SCARG(uap, len);
+
+ /*
+ * align the address to a page boundary, and adjust the size accordingly
+ */
+ ALIGN_ADDR(addr, size, pageoff);
+ if (addr > SIZE_MAX - size)
+ return EINVAL; /* disallow wrap-around. */
+
+ return uvm_map_immutable(&p->p_vmspace->vm_map, addr, addr+size, 1, "sys");
+}
+
/*
* sys_minherit: the minherit system call
*/
if (kva != 0) {
vm_map_lock(kernel_map);
uvm_unmap_remove(kernel_map, kva,
- kva+PAGE_SIZE, &dead_entries, FALSE, TRUE);
+ kva+PAGE_SIZE, &dead_entries,
+ FALSE, TRUE, FALSE); /* XXX */
vm_map_unlock(kernel_map);
kva = 0;
}
if (kva != 0) {
vm_map_lock(kernel_map);
uvm_unmap_remove(kernel_map, kva, kva+PAGE_SIZE,
- &dead_entries, FALSE, TRUE);
+ &dead_entries, FALSE, TRUE, FALSE); /* XXX */
vm_map_unlock(kernel_map);
}
uvm_unmap_detach(&dead_entries, AMAP_REFALL);