-/* $OpenBSD: uvm_map.c,v 1.297 2022/10/15 05:56:01 deraadt Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.298 2022/10/16 16:16:37 deraadt Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
/* Check that the space is available. */
if (flags & UVM_FLAG_UNMAP) {
+ if ((flags & UVM_FLAG_STACK) &&
+ !uvm_map_is_stack_remappable(map, *addr, sz,
+ (flags & UVM_FLAG_SIGALTSTACK))) {
+ error = EINVAL;
+ goto unlock;
+ }
if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
- FALSE, TRUE, TRUE) != 0) {
+ FALSE, TRUE,
+ (flags & UVM_FLAG_SIGALTSTACK) ? FALSE : TRUE) != 0) {
error = EPERM; /* immutable entries found */
goto unlock;
}
}
/*
- * If the region already exists as contigious read-write anonymous memory
- * without special attributes, also mark it as stack for use by sigaltstack(2),
- * otherwise fail.
+ * Check whether the given address range can be converted to a MAP_STACK
+ * mapping.
+ *
+ * Must be called with map locked.
*/
-int
-uvm_map_make_stack(struct proc *p, vaddr_t addr, vsize_t sz)
+boolean_t
+uvm_map_is_stack_remappable(struct vm_map *map, vaddr_t addr, vaddr_t sz,
+ int sigaltstack_check)
{
- vm_map_t map = &p->p_vmspace->vm_map;
- struct vm_map_entry *entry, *iter, *prev = NULL;
- vaddr_t start, end;
- int error = EINVAL;
-
- start = round_page(addr);
- end = trunc_page(addr + sz);
-#ifdef MACHINE_STACK_GROWS_UP
- if (end == addr + sz)
- end -= PAGE_SIZE;
-#else
- if (start == addr)
- start += PAGE_SIZE;
-#endif
-
- vm_map_lock(map);
+ vaddr_t end = addr + sz;
+ struct vm_map_entry *first, *iter, *prev = NULL;
- if (!uvm_map_lookup_entry(map, start, &entry)) {
+ if (!uvm_map_lookup_entry(map, addr, &first)) {
printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n",
- start, end, map);
- goto out;
+ addr, end, map);
+ return FALSE;
}
/*
* Check that the address range exists and is contiguous.
*/
- for (iter = entry; iter != NULL && iter->start < end;
+ for (iter = first; iter != NULL && iter->start < end;
prev = iter, iter = RBT_NEXT(uvm_map_addr, iter)) {
/*
* Make sure that we do not have holes in the range.
if (prev != NULL && prev->end != iter->start) {
printf("map stack 0x%lx-0x%lx of map %p failed: "
- "hole in range\n", start, end, map);
- goto out;
+ "hole in range\n", addr, end, map);
+ return FALSE;
}
if (iter->start == iter->end || UVM_ET_ISHOLE(iter)) {
printf("map stack 0x%lx-0x%lx of map %p failed: "
- "hole in range\n", start, end, map);
- goto out;
- }
- if (iter->etype & UVM_ET_SYSCALL) {
- printf("in syscall range, not allowed\n");
- goto out;
+ "hole in range\n", addr, end, map);
+ return FALSE;
}
- if (iter->protection != (PROT_READ | PROT_WRITE)) {
- printf("prot %x, not allowed\n", iter->protection);
- goto out;
+ if (sigaltstack_check) {
+ if ((iter->etype & UVM_ET_SYSCALL))
+ return FALSE;
+ if (iter->protection != (PROT_READ | PROT_WRITE))
+ return FALSE;
}
}
+ return TRUE;
+}
+
+/*
+ * Remap the middle-pages of an existing mapping as a stack range.
+ * If there exists a previous contiguous mapping with the given range
+ * [addr, addr + sz), with protection PROT_READ|PROT_WRITE, then the
+ * mapping is dropped, and a new anon mapping is created and marked as
+ * a stack.
+ *
+ * Must be called with map unlocked.
+ */
+int
+uvm_map_remap_as_stack(struct proc *p, vaddr_t addr, vaddr_t sz)
+{
+ vm_map_t map = &p->p_vmspace->vm_map;
+ vaddr_t start, end;
+ int error;
+ int flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_INHERIT_COPY, MADV_NORMAL,
+ UVM_FLAG_STACK | UVM_FLAG_FIXED | UVM_FLAG_UNMAP |
+ UVM_FLAG_COPYONW | UVM_FLAG_SIGALTSTACK);
+
+ start = round_page(addr);
+ end = trunc_page(addr + sz);
+#ifdef MACHINE_STACK_GROWS_UP
+ if (end == addr + sz)
+ end -= PAGE_SIZE;
+#else
+ if (start == addr)
+ start += PAGE_SIZE;
+#endif
+
+ if (start < map->min_offset || end >= map->max_offset || end < start)
+ return EINVAL;
+
/*
- * Mark the specified range as stack
+ * UVM_FLAG_SIGALTSTACK indicates that immutable may be bypassed,
+ * but the range is checked that it is contigous, is not a syscall
+ * mapping, and protection RW. Then, a new mapping (all zero) is
+ * placed upon the region, which prevents an attacker from pivoting
+ * into pre-placed MAP_STACK space.
*/
- if (entry->end > start)
- UVM_MAP_CLIP_START(map, entry, start);
- else
- entry = RBT_NEXT(uvm_map_addr, entry);
-
- while (entry != NULL && entry->start < end) {
- UVM_MAP_CLIP_END(map, entry, end);
- entry->etype |= UVM_ET_STACK;
- entry = RBT_NEXT(uvm_map_addr, entry);
- }
- map->sserial++;
- error = 0;
-out:
- vm_map_unlock(map);
+ error = uvm_mapanon(map, &start, end - start, 0, flags);
if (error != 0)
- printf("map stack for pid %s/%d %lx/%lx failed\n",
- p->p_p->ps_comm, p->p_p->ps_pid, addr, sz);
+ printf("map stack for pid %d failed\n", p->p_p->ps_pid);
+
return error;
}