automatically mark immutable certain regions in program&ld.so LOADs.
authorderaadt <deraadt@openbsd.org>
Fri, 21 Oct 2022 18:10:52 +0000 (18:10 +0000)
committerderaadt <deraadt@openbsd.org>
Fri, 21 Oct 2022 18:10:52 +0000 (18:10 +0000)
The large commented block in elf_load_psection explains the sitaution.
ok kettenis.

sys/kern/exec_elf.c
sys/kern/exec_subr.c
sys/sys/exec.h

index a27dac2..8b5a67b 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: exec_elf.c,v 1.168 2022/08/29 16:53:46 deraadt Exp $  */
+/*     $OpenBSD: exec_elf.c,v 1.169 2022/10/21 18:10:56 deraadt Exp $  */
 
 /*
  * Copyright (c) 1996 Per Fogelstrom
@@ -189,11 +189,18 @@ elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
         * initially.  The dynamic linker will make these read-only
         * and add back X permission after relocation processing.
         * Static executables with W|X segments will probably crash.
+        * Apply immutability as much as possible, but not for RELRO
+        * or PT_OPENBSD_MUTABLE sections, or LOADS marked
+        * PF_OPENBSD_MUTABLE, or LOADS which violate W^X. Userland
+        * (meaning crt0 or ld.so) will repair those regions.
         */
        *prot |= (ph->p_flags & PF_R) ? PROT_READ : 0;
        *prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0;
        if ((ph->p_flags & PF_W) == 0)
                *prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0;
+       if ((ph->p_flags & (PF_X | PF_W)) != (PF_X | PF_W) &&
+           (ph->p_flags & PF_OPENBSD_MUTABLE) == 0)
+               flags |= VMCMD_IMMUTABLE;
 
        msize = ph->p_memsz + diff;
        offset = ph->p_offset - bdiff;
@@ -432,6 +439,12 @@ elf_load_file(struct proc *p, char *path, struct exec_package *epp,
                            ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
                        break;
 
+               case PT_GNU_RELRO:
+               case PT_OPENBSD_MUTABLE:
+                       NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
+                           ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
+                       break;
+
                default:
                        break;
                }
@@ -655,6 +668,12 @@ exec_elf_makecmds(struct proc *p, struct exec_package *epp)
                            ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
                        break;
 
+               case PT_GNU_RELRO:
+               case PT_OPENBSD_MUTABLE:
+                       NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
+                           ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
+                       break;
+
                default:
                        /*
                         * Not fatal, we don't need to understand everything
index 8a949be..dc4053a 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: exec_subr.c,v 1.58 2022/10/07 14:59:39 deraadt Exp $  */
+/*     $OpenBSD: exec_subr.c,v 1.59 2022/10/21 18:10:56 deraadt Exp $  */
 /*     $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $    */
 
 /*
@@ -167,7 +167,7 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
         * call this routine.
         */
        struct uvm_object *uobj;
-       unsigned int syscalls = 0;
+       unsigned int flags = UVM_FLAG_COPYONW | UVM_FLAG_FIXED;
        int error;
 
        /*
@@ -195,12 +195,12 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
         * do the map
         */
        if ((cmd->ev_flags & VMCMD_SYSCALL) && (cmd->ev_prot & PROT_EXEC))
-               syscalls |= UVM_FLAG_SYSCALL;
+               flags |= UVM_FLAG_SYSCALL;
 
        error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
            uobj, cmd->ev_offset, 0,
            UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
-           MADV_NORMAL, UVM_FLAG_COPYONW | UVM_FLAG_FIXED | syscalls));
+           MADV_NORMAL, flags));
 
        /*
         * check for error
@@ -211,6 +211,11 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
                 * error: detach from object
                 */
                uobj->pgops->pgo_detach(uobj);
+       } else {
+               if (cmd->ev_flags & VMCMD_IMMUTABLE)
+                       uvm_map_immutable(&p->p_vmspace->vm_map,
+                           cmd->ev_addr, round_page(cmd->ev_len),
+                           1, "pagedvn");
        }
 
        return (error);
@@ -234,7 +239,7 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
 
        prot = cmd->ev_prot;
 
-       cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
+       KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
        error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
            round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
            UVM_MAPFLAG(prot | PROT_WRITE, PROT_MASK, MAP_INHERIT_COPY,
@@ -256,12 +261,19 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
                 * it mapped read-only, so now we are going to have to call
                 * uvm_map_protect() to fix up the protection.  ICK.
                 */
-               return (uvm_map_protect(&p->p_vmspace->vm_map,
-                   trunc_page(cmd->ev_addr),
-                   round_page(cmd->ev_addr + cmd->ev_len),
+               error = (uvm_map_protect(&p->p_vmspace->vm_map,
+                   cmd->ev_addr, round_page(cmd->ev_len),
                    prot, FALSE, TRUE));
        }
-       return (0);
+       if (error == 0) {
+               if (cmd->ev_flags & VMCMD_IMMUTABLE) {
+                       //printf("imut readvn\n");
+                       uvm_map_immutable(&p->p_vmspace->vm_map,
+                           cmd->ev_addr, round_page(cmd->ev_len),
+                           1, "readvn");
+               }
+       }
+       return (error);
 }
 
 /*
@@ -272,15 +284,41 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
 int
 vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
 {
+       int error;
+
        if (cmd->ev_len == 0)
                return (0);
        
-       cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
-       return (uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
+       KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
+       error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
            round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
            UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
            MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW |
-           (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0))));
+           (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0)));
+       if (cmd->ev_flags & VMCMD_IMMUTABLE) {
+               //printf("imut zero\n");
+               uvm_map_immutable(&p->p_vmspace->vm_map,
+                   cmd->ev_addr, round_page(cmd->ev_len),
+                   1, "zero");
+       }
+       return error;
+}
+
+/*
+ * vmcmd_mutable():
+ *     handle vmcmd which changes an address space region.back to mutable
+ */
+
+int
+vmcmd_mutable(struct proc *p, struct exec_vmcmd *cmd)
+{
+       if (cmd->ev_len == 0)
+               return (0);
+       
+       /* ev_addr, ev_len may be misaligned, so maximize the region */
+       uvm_map_immutable(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
+           round_page(cmd->ev_addr + cmd->ev_len), 0, "mutable");
+       return 0;
 }
 
 /*
index e3e6071..2e410c0 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: exec.h,v 1.48 2022/09/01 07:26:56 jsg Exp $   */
+/*     $OpenBSD: exec.h,v 1.49 2022/10/21 18:10:52 deraadt Exp $       */
 /*     $NetBSD: exec.h,v 1.59 1996/02/09 18:25:09 christos Exp $       */
 
 /*-
@@ -93,9 +93,10 @@ struct exec_vmcmd {
 #define VMCMD_BASE      0x0002  /* marks a base entry */
 #define VMCMD_STACK     0x0004  /* create with UVM_FLAG_STACK */
 #define VMCMD_SYSCALL   0x0008  /* create with UVM_FLAG_SYSCALL */
+#define VMCMD_IMMUTABLE        0x0010  /* create with UVM_ET_IMMUTABLE */
 };
 
-#define        EXEC_DEFAULT_VMCMD_SETSIZE            /* # of cmds in set to start */
+#define        EXEC_DEFAULT_VMCMD_SETSIZE      12      /* # of cmds in set to start */
 
 /* exec vmspace-creation command set; see below */
 struct exec_vmcmd_set {
@@ -147,6 +148,7 @@ void        kill_vmcmds(struct exec_vmcmd_set *evsp);
 int    vmcmd_map_pagedvn(struct proc *, struct exec_vmcmd *);
 int    vmcmd_map_readvn(struct proc *, struct exec_vmcmd *);
 int    vmcmd_map_zero(struct proc *, struct exec_vmcmd *);
+int    vmcmd_mutable(struct proc *, struct exec_vmcmd *);
 int    vmcmd_randomize(struct proc *, struct exec_vmcmd *);
 int    copyargs(struct exec_package *, struct ps_strings *, void *, void *);
 void   setregs(struct proc *, struct exec_package *, u_long, register_t *);