-/* $OpenBSD: exec_subr.c,v 1.62 2022/10/21 20:46:40 deraadt Exp $ */
+/* $OpenBSD: exec_subr.c,v 1.63 2022/11/17 18:53:12 deraadt Exp $ */
/* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */
/*
*/
error = (uvm_map_protect(&p->p_vmspace->vm_map,
cmd->ev_addr, round_page(cmd->ev_len),
- prot, FALSE, TRUE));
+ prot, 0, FALSE, TRUE));
}
if (error == 0) {
if (cmd->ev_flags & VMCMD_IMMUTABLE)
-/* $OpenBSD: kern_exec.c,v 1.238 2022/10/30 17:43:40 guenther Exp $ */
+/* $OpenBSD: kern_exec.c,v 1.239 2022/11/17 18:53:12 deraadt Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
#ifdef MACHINE_STACK_GROWS_UP
pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
- trunc_page(pr->ps_strings), PROT_NONE, TRUE, FALSE))
+ trunc_page(pr->ps_strings), PROT_NONE, 0, TRUE, FALSE))
goto exec_abort;
#else
pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
if (uvm_map_protect(&vm->vm_map,
round_page(pr->ps_strings + sizeof(arginfo)),
- (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE, FALSE))
+ (vaddr_t)vm->vm_minsaddr, PROT_NONE, 0, TRUE, FALSE))
goto exec_abort;
#endif
-/* $OpenBSD: kern_resource.c,v 1.75 2022/10/07 14:59:39 deraadt Exp $ */
+/* $OpenBSD: kern_resource.c,v 1.76 2022/11/17 18:53:13 deraadt Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
#include <sys/syscallargs.h>
#include <uvm/uvm_extern.h>
+#include <uvm/uvm.h>
/* Resource usage check interval in msec */
#define RUCHECK_INTERVAL 1000
size = round_page(size);
KERNEL_LOCK();
(void) uvm_map_protect(&vm->vm_map, addr,
- addr+size, prot, FALSE, FALSE);
+ addr+size, prot, UVM_ET_STACK, FALSE, FALSE);
KERNEL_UNLOCK();
}
}
-/* $OpenBSD: uvm_extern.h,v 1.165 2022/10/16 16:16:37 deraadt Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.166 2022/11/17 18:53:05 deraadt Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
boolean_t uvm_map_checkprot(vm_map_t, vaddr_t,
vaddr_t, vm_prot_t);
int uvm_map_protect(vm_map_t, vaddr_t,
- vaddr_t, vm_prot_t, boolean_t, boolean_t);
+ vaddr_t, vm_prot_t, int etype, boolean_t, boolean_t);
struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t,
boolean_t, boolean_t);
void uvmspace_init(struct vmspace *, struct pmap *,
-/* $OpenBSD: uvm_map.c,v 1.303 2022/11/04 09:36:44 mpi Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.304 2022/11/17 18:53:05 deraadt Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
*/
int
uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
- vm_prot_t new_prot, boolean_t set_max, boolean_t checkimmutable)
+ vm_prot_t new_prot, int etype, boolean_t set_max, boolean_t checkimmutable)
{
struct vm_map_entry *first, *iter;
vm_prot_t old_prot;
vsize_t dused;
int error;
+ KASSERT((etype & ~UVM_ET_STACK) == 0); /* only UVM_ET_STACK allowed */
+
if (start > end)
return EINVAL;
start = MAX(start, map->min_offset);
}
}
+ /* only apply UVM_ET_STACK on a mapping changing to RW */
+ if (etype && new_prot != (PROT_READ|PROT_WRITE))
+ etype = 0;
+
/* Fix protections. */
for (iter = first; iter != NULL && iter->start < end;
iter = RBT_NEXT(uvm_map_addr, iter)) {
iter->protection &= new_prot;
} else
iter->protection = new_prot;
+ iter->etype |= etype; /* potentially add UVM_ET_STACK */
/*
* update physical map if necessary. worry about copy-on-write
pmap_update(map->pmap);
out:
+ if (etype & UVM_ET_STACK)
+ map->sserial++;
vm_map_unlock(map);
return error;
}
-/* $OpenBSD: uvm_mmap.c,v 1.174 2022/10/21 19:13:33 deraadt Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.175 2022/11/17 18:53:05 deraadt Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
return EINVAL; /* disallow wrap-around. */
return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size,
- prot, FALSE, TRUE));
+ prot, 0, FALSE, TRUE));
}
/*