From 2e53ad5e72f9b36133dfc3c0ca2faa805f0dec1f Mon Sep 17 00:00:00 2001 From: deraadt Date: Sun, 16 Oct 2022 16:16:37 +0000 Subject: [PATCH] Rather than marking MAP_STACK on entries for sigaltstack() [2 days ago], go back to the old approach: using a new anon mapping because it removes any potential gadgetry pre-placed in the region (by making it zero). But also bring in a few more validation checks beyond contigious mapping -- it must not be a syscall region, and the protection must be precisely RW. This does allow sigaltstack() to shoot zero'd MAP_STACK non-immutable regions into the main stack area (which will soon be immutable). I am not sure we can keep reinforce immutable on the region after we do stack (like maybe determine this while doing the validation entry walk?) Sadly, continued support for sigaltstack() does require selecting the guessed best compromise. ok kettenis --- sys/uvm/uvm_extern.h | 3 +- sys/uvm/uvm_map.c | 131 +++++++++++++++++++++++++------------------ sys/uvm/uvm_map.h | 5 +- 3 files changed, 81 insertions(+), 58 deletions(-) diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index cfdfe884a36..ca34d999f69 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.164 2022/10/07 14:59:39 deraadt Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.165 2022/10/16 16:16:37 deraadt Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -112,6 +112,7 @@ typedef int vm_prot_t; #define UVM_FLAG_WC 0x4000000 /* write combining */ #define UVM_FLAG_CONCEAL 0x8000000 /* omit from dumps */ #define UVM_FLAG_SYSCALL 0x10000000 /* system calls allowed */ +#define UVM_FLAG_SIGALTSTACK 0x20000000 /* sigaltstack validation required */ /* macros to extract info */ #define UVM_PROTECTION(X) ((X) & PROT_MASK) diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index aaa24b5209c..74d5f298ab8 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.297 2022/10/15 05:56:01 deraadt Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.298 2022/10/16 16:16:37 deraadt Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -792,8 +792,15 @@ uvm_mapanon(struct vm_map *map, vaddr_t *addr, vsize_t sz, /* Check that the space is available. */ if (flags & UVM_FLAG_UNMAP) { + if ((flags & UVM_FLAG_STACK) && + !uvm_map_is_stack_remappable(map, *addr, sz, + (flags & UVM_FLAG_SIGALTSTACK))) { + error = EINVAL; + goto unlock; + } if (uvm_unmap_remove(map, *addr, *addr + sz, &dead, - FALSE, TRUE, TRUE) != 0) { + FALSE, TRUE, + (flags & UVM_FLAG_SIGALTSTACK) ? FALSE : TRUE) != 0) { error = EPERM; /* immutable entries found */ goto unlock; } @@ -1685,40 +1692,28 @@ uvm_map_inentry(struct proc *p, struct p_inentry *ie, vaddr_t addr, } /* - * If the region already exists as contigious read-write anonymous memory - * without special attributes, also mark it as stack for use by sigaltstack(2), - * otherwise fail. + * Check whether the given address range can be converted to a MAP_STACK + * mapping. + * + * Must be called with map locked. */ -int -uvm_map_make_stack(struct proc *p, vaddr_t addr, vsize_t sz) +boolean_t +uvm_map_is_stack_remappable(struct vm_map *map, vaddr_t addr, vaddr_t sz, + int sigaltstack_check) { - vm_map_t map = &p->p_vmspace->vm_map; - struct vm_map_entry *entry, *iter, *prev = NULL; - vaddr_t start, end; - int error = EINVAL; - - start = round_page(addr); - end = trunc_page(addr + sz); -#ifdef MACHINE_STACK_GROWS_UP - if (end == addr + sz) - end -= PAGE_SIZE; -#else - if (start == addr) - start += PAGE_SIZE; -#endif - - vm_map_lock(map); + vaddr_t end = addr + sz; + struct vm_map_entry *first, *iter, *prev = NULL; - if (!uvm_map_lookup_entry(map, start, &entry)) { + if (!uvm_map_lookup_entry(map, addr, &first)) { printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n", - start, end, map); - goto out; + addr, end, map); + return FALSE; } /* * Check that the address range exists and is contiguous. */ - for (iter = entry; iter != NULL && iter->start < end; + for (iter = first; iter != NULL && iter->start < end; prev = iter, iter = RBT_NEXT(uvm_map_addr, iter)) { /* * Make sure that we do not have holes in the range. @@ -1733,44 +1728,70 @@ uvm_map_make_stack(struct proc *p, vaddr_t addr, vsize_t sz) if (prev != NULL && prev->end != iter->start) { printf("map stack 0x%lx-0x%lx of map %p failed: " - "hole in range\n", start, end, map); - goto out; + "hole in range\n", addr, end, map); + return FALSE; } if (iter->start == iter->end || UVM_ET_ISHOLE(iter)) { printf("map stack 0x%lx-0x%lx of map %p failed: " - "hole in range\n", start, end, map); - goto out; - } - if (iter->etype & UVM_ET_SYSCALL) { - printf("in syscall range, not allowed\n"); - goto out; + "hole in range\n", addr, end, map); + return FALSE; } - if (iter->protection != (PROT_READ | PROT_WRITE)) { - printf("prot %x, not allowed\n", iter->protection); - goto out; + if (sigaltstack_check) { + if ((iter->etype & UVM_ET_SYSCALL)) + return FALSE; + if (iter->protection != (PROT_READ | PROT_WRITE)) + return FALSE; } } + return TRUE; +} + +/* + * Remap the middle-pages of an existing mapping as a stack range. + * If there exists a previous contiguous mapping with the given range + * [addr, addr + sz), with protection PROT_READ|PROT_WRITE, then the + * mapping is dropped, and a new anon mapping is created and marked as + * a stack. + * + * Must be called with map unlocked. + */ +int +uvm_map_remap_as_stack(struct proc *p, vaddr_t addr, vaddr_t sz) +{ + vm_map_t map = &p->p_vmspace->vm_map; + vaddr_t start, end; + int error; + int flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_INHERIT_COPY, MADV_NORMAL, + UVM_FLAG_STACK | UVM_FLAG_FIXED | UVM_FLAG_UNMAP | + UVM_FLAG_COPYONW | UVM_FLAG_SIGALTSTACK); + + start = round_page(addr); + end = trunc_page(addr + sz); +#ifdef MACHINE_STACK_GROWS_UP + if (end == addr + sz) + end -= PAGE_SIZE; +#else + if (start == addr) + start += PAGE_SIZE; +#endif + + if (start < map->min_offset || end >= map->max_offset || end < start) + return EINVAL; + /* - * Mark the specified range as stack + * UVM_FLAG_SIGALTSTACK indicates that immutable may be bypassed, + * but the range is checked that it is contigous, is not a syscall + * mapping, and protection RW. Then, a new mapping (all zero) is + * placed upon the region, which prevents an attacker from pivoting + * into pre-placed MAP_STACK space. */ - if (entry->end > start) - UVM_MAP_CLIP_START(map, entry, start); - else - entry = RBT_NEXT(uvm_map_addr, entry); - - while (entry != NULL && entry->start < end) { - UVM_MAP_CLIP_END(map, entry, end); - entry->etype |= UVM_ET_STACK; - entry = RBT_NEXT(uvm_map_addr, entry); - } - map->sserial++; - error = 0; -out: - vm_map_unlock(map); + error = uvm_mapanon(map, &start, end - start, 0, flags); if (error != 0) - printf("map stack for pid %s/%d %lx/%lx failed\n", - p->p_p->ps_comm, p->p_p->ps_pid, addr, sz); + printf("map stack for pid %d failed\n", p->p_p->ps_pid); + return error; } diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h index 4915e416dab..267300d8d99 100644 --- a/sys/uvm/uvm_map.h +++ b/sys/uvm/uvm_map.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.h,v 1.77 2022/10/15 03:23:50 deraadt Exp $ */ +/* $OpenBSD: uvm_map.h,v 1.78 2022/10/16 16:16:37 deraadt Exp $ */ /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ /* @@ -355,7 +355,8 @@ int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t); int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_init(void); boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t, vm_map_entry_t *); -int uvm_map_make_stack(struct proc *, vaddr_t, vsize_t); +boolean_t uvm_map_is_stack_remappable(struct vm_map *, vaddr_t, vsize_t, int); +int uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t); int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t, vm_map_entry_t, int); int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, -- 2.20.1