-/* $OpenBSD: exec_elf.c,v 1.168 2022/08/29 16:53:46 deraadt Exp $ */
+/* $OpenBSD: exec_elf.c,v 1.169 2022/10/21 18:10:56 deraadt Exp $ */
/*
* Copyright (c) 1996 Per Fogelstrom
* initially. The dynamic linker will make these read-only
* and add back X permission after relocation processing.
* Static executables with W|X segments will probably crash.
+ * Apply immutability as much as possible, but not for RELRO
+ * or PT_OPENBSD_MUTABLE sections, or LOADS marked
+ * PF_OPENBSD_MUTABLE, or LOADS which violate W^X. Userland
+ * (meaning crt0 or ld.so) will repair those regions.
*/
*prot |= (ph->p_flags & PF_R) ? PROT_READ : 0;
*prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0;
if ((ph->p_flags & PF_W) == 0)
*prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0;
+ if ((ph->p_flags & (PF_X | PF_W)) != (PF_X | PF_W) &&
+ (ph->p_flags & PF_OPENBSD_MUTABLE) == 0)
+ flags |= VMCMD_IMMUTABLE;
msize = ph->p_memsz + diff;
offset = ph->p_offset - bdiff;
ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
break;
+ case PT_GNU_RELRO:
+ case PT_OPENBSD_MUTABLE:
+ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
+ ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
+ break;
+
default:
break;
}
ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
break;
+ case PT_GNU_RELRO:
+ case PT_OPENBSD_MUTABLE:
+ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
+ ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
+ break;
+
default:
/*
* Not fatal, we don't need to understand everything
-/* $OpenBSD: exec_subr.c,v 1.58 2022/10/07 14:59:39 deraadt Exp $ */
+/* $OpenBSD: exec_subr.c,v 1.59 2022/10/21 18:10:56 deraadt Exp $ */
/* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */
/*
* call this routine.
*/
struct uvm_object *uobj;
- unsigned int syscalls = 0;
+ unsigned int flags = UVM_FLAG_COPYONW | UVM_FLAG_FIXED;
int error;
/*
* do the map
*/
if ((cmd->ev_flags & VMCMD_SYSCALL) && (cmd->ev_prot & PROT_EXEC))
- syscalls |= UVM_FLAG_SYSCALL;
+ flags |= UVM_FLAG_SYSCALL;
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
uobj, cmd->ev_offset, 0,
UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
- MADV_NORMAL, UVM_FLAG_COPYONW | UVM_FLAG_FIXED | syscalls));
+ MADV_NORMAL, flags));
/*
* check for error
* error: detach from object
*/
uobj->pgops->pgo_detach(uobj);
+ } else {
+ if (cmd->ev_flags & VMCMD_IMMUTABLE)
+ uvm_map_immutable(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
+ 1, "pagedvn");
}
return (error);
prot = cmd->ev_prot;
- cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
+ KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(prot | PROT_WRITE, PROT_MASK, MAP_INHERIT_COPY,
* it mapped read-only, so now we are going to have to call
* uvm_map_protect() to fix up the protection. ICK.
*/
- return (uvm_map_protect(&p->p_vmspace->vm_map,
- trunc_page(cmd->ev_addr),
- round_page(cmd->ev_addr + cmd->ev_len),
+ error = (uvm_map_protect(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
prot, FALSE, TRUE));
}
- return (0);
+ if (error == 0) {
+ if (cmd->ev_flags & VMCMD_IMMUTABLE) {
+ //printf("imut readvn\n");
+ uvm_map_immutable(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
+ 1, "readvn");
+ }
+ }
+ return (error);
}
/*
int
vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
{
+ int error;
+
if (cmd->ev_len == 0)
return (0);
- cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
- return (uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
+ KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
+ error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW |
- (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0))));
+ (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0)));
+ if (cmd->ev_flags & VMCMD_IMMUTABLE) {
+ //printf("imut zero\n");
+ uvm_map_immutable(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
+ 1, "zero");
+ }
+ return error;
+}
+
+/*
+ * vmcmd_mutable():
+ * handle vmcmd which changes an address space region.back to mutable
+ */
+
+int
+vmcmd_mutable(struct proc *p, struct exec_vmcmd *cmd)
+{
+ if (cmd->ev_len == 0)
+ return (0);
+
+ /* ev_addr, ev_len may be misaligned, so maximize the region */
+ uvm_map_immutable(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
+ round_page(cmd->ev_addr + cmd->ev_len), 0, "mutable");
+ return 0;
}
/*
-/* $OpenBSD: exec.h,v 1.48 2022/09/01 07:26:56 jsg Exp $ */
+/* $OpenBSD: exec.h,v 1.49 2022/10/21 18:10:52 deraadt Exp $ */
/* $NetBSD: exec.h,v 1.59 1996/02/09 18:25:09 christos Exp $ */
/*-
#define VMCMD_BASE 0x0002 /* marks a base entry */
#define VMCMD_STACK 0x0004 /* create with UVM_FLAG_STACK */
#define VMCMD_SYSCALL 0x0008 /* create with UVM_FLAG_SYSCALL */
+#define VMCMD_IMMUTABLE 0x0010 /* create with UVM_ET_IMMUTABLE */
};
-#define EXEC_DEFAULT_VMCMD_SETSIZE 8 /* # of cmds in set to start */
+#define EXEC_DEFAULT_VMCMD_SETSIZE 12 /* # of cmds in set to start */
/* exec vmspace-creation command set; see below */
struct exec_vmcmd_set {
int vmcmd_map_pagedvn(struct proc *, struct exec_vmcmd *);
int vmcmd_map_readvn(struct proc *, struct exec_vmcmd *);
int vmcmd_map_zero(struct proc *, struct exec_vmcmd *);
+int vmcmd_mutable(struct proc *, struct exec_vmcmd *);
int vmcmd_randomize(struct proc *, struct exec_vmcmd *);
int copyargs(struct exec_package *, struct ps_strings *, void *, void *);
void setregs(struct proc *, struct exec_package *, u_long, register_t *);