-/* $OpenBSD: cpu.c,v 1.163 2022/11/29 21:41:39 guenther Exp $ */
+/* $OpenBSD: cpu.c,v 1.164 2023/01/20 16:01:04 deraadt Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
cr4 |= CR4_UMIP;
if ((cpu_ecxfeature & CPUIDECX_XSAVE) && cpuid_level >= 0xd)
cr4 |= CR4_OSXSAVE;
+ if (pg_xo)
+ cr4 |= CR4_PKE;
if (pmap_use_pcid)
cr4 |= CR4_PCIDE;
lcr4(cr4);
-/* $OpenBSD: locore.S,v 1.131 2022/12/01 00:26:15 guenther Exp $ */
+/* $OpenBSD: locore.S,v 1.132 2023/01/20 16:01:04 deraadt Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
jz .Lsyscall_restore_fsbase
.Lsyscall_restore_registers:
+ call pku_xonly
RET_STACK_REFILL_WITH_RCX
movq TF_R8(%rsp),%r8
jz .Lintr_restore_fsbase
.Lintr_restore_registers:
+ call pku_xonly
RET_STACK_REFILL_WITH_RCX
movq TF_R8(%rsp),%r8
testq $PSL_I,%rdx
jnz .Lintr_exit_not_blocked
#endif /* DIAGNOSTIC */
+ call pku_xonly /* XXX guenther disapproves, but foo3 locks */
movq TF_RDI(%rsp),%rdi
movq TF_RSI(%rsp),%rsi
movq TF_R8(%rsp),%r8
lfence
END(pagezero)
+/* void pku_xonly(void) */
+ENTRY(pku_xonly)
+ movq pg_xo,%rax /* have PKU support? */
+ cmpq $0,%rax
+ je 1f
+ movl $0,%ecx /* force PKRU for xonly restriction */
+ movl $0,%edx
+ movl $PGK_VALUE,%eax /* key0 normal, key1 is exec without read */
+ wrpkru
+1: ret
+ lfence
+END(pku_xonly)
+
/* int rdmsr_safe(u_int msr, uint64_t *data) */
ENTRY(rdmsr_safe)
RETGUARD_SETUP(rdmsr_safe, r10)
-/* $OpenBSD: pmap.c,v 1.157 2023/01/19 20:17:11 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.158 2023/01/20 16:01:04 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
pt_entry_t pg_nx = 0;
pt_entry_t pg_g_kern = 0;
+/* pg_nx: XO PTE bits, set to PKU key1 (if cpu supports PKU) */
+pt_entry_t pg_xo;
+
/*
* pmap_pg_wc: if our processor supports PAT then we set this
* to be the pte bits for Write Combining. Else we fall back to
virtual_avail = kva_start; /* first free KVA */
+ /*
+ * If PKU is available, initialize PROT_EXEC entry correctly,
+ * and enable the feature before it gets used
+ * XXX Some Hypervisors forget to save/restore PKU
+ */
+ if (cpuid_level >= 0x7) {
+ uint32_t ecx, dummy;
+ CPUID_LEAF(0x7, 0, dummy, dummy, ecx, dummy);
+ if ((ecx & SEFF0ECX_PKU) &&
+ (cpu_ecxfeature & CPUIDECX_HV) == 0) {
+ lcr4(rcr4() | CR4_PKE);
+ pg_xo = PG_XO;
+ }
+ }
+
/*
* set up protection_codes: we need to be able to convert from
* a MI protection code (some combo of VM_PROT...) to something
*/
protection_codes[PROT_NONE] = pg_nx; /* --- */
- protection_codes[PROT_EXEC] = PG_RO; /* --x */
+ protection_codes[PROT_EXEC] = pg_xo; ; /* --x */
protection_codes[PROT_READ] = PG_RO | pg_nx; /* -r- */
protection_codes[PROT_READ | PROT_EXEC] = PG_RO; /* -rx */
protection_codes[PROT_WRITE] = PG_RW | pg_nx; /* w-- */
sva &= PG_FRAME;
eva &= PG_FRAME;
+ if (!(prot & PROT_READ))
+ set |= pg_xo;
if (!(prot & PROT_WRITE))
clear = PG_RW;
if (!(prot & PROT_EXEC))
-/* $OpenBSD: trap.c,v 1.95 2023/01/17 08:03:51 kettenis Exp $ */
+/* $OpenBSD: trap.c,v 1.96 2023/01/20 16:01:04 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.2 2003/05/04 23:51:56 fvdl Exp $ */
/*-
static inline void frame_dump(struct trapframe *_tf, struct proc *_p,
const char *_sig, uint64_t _cr2);
static inline void verify_smap(const char *_func);
+static inline int verify_pkru(struct proc *);
static inline void debug_trap(struct trapframe *_frame, struct proc *_p,
long _type);
}
}
+/* If we find out userland changed the pkru register, punish the process */
+static inline int
+verify_pkru(struct proc *p)
+{
+ if (pg_xo == 0 || rdpkru(0) == PGK_VALUE)
+ return 0;
+ KERNEL_LOCK();
+ sigabort(p);
+ KERNEL_UNLOCK();
+ return 1;
+}
/*
* usertrap(frame): handler for exceptions, faults, and traps from userspace
p->p_md.md_regs = frame;
refreshcreds(p);
+ if (verify_pkru(p))
+ goto out;
+
switch (type) {
case T_TSSFLT:
sig = SIGBUS;
uvmexp.syscalls++;
p = curproc;
+ if (verify_pkru(p)) {
+ userret(p);
+ return;
+ }
+
code = frame->tf_rax;
argp = &args[0];
argoff = 0;
-/* $OpenBSD: vector.S,v 1.87 2022/12/01 00:26:15 guenther Exp $ */
+/* $OpenBSD: vector.S,v 1.88 2023/01/20 16:01:04 deraadt Exp $ */
/* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */
/*
movq %r12,%rax
movq %r13,%rdx
wrmsr
+ call pku_xonly
popq %rdi
popq %rsi
popq %rdx
-/* $OpenBSD: cpufunc.h,v 1.37 2022/09/22 04:57:08 robert Exp $ */
+/* $OpenBSD: cpufunc.h,v 1.38 2023/01/20 16:01:04 deraadt Exp $ */
/* $NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $ */
/*-
return (((uint64_t)hi << 32) | (uint64_t) lo);
}
+static __inline int
+rdpkru(u_int ecx)
+{
+ uint32_t edx, pkru;
+ asm volatile("rdpkru " : "=a" (pkru), "=d" (edx) : "c" (ecx));
+ return pkru;
+}
+
static __inline void
wrmsr(u_int msr, u_int64_t newval)
{
-/* $OpenBSD: pte.h,v 1.16 2023/01/16 00:04:47 deraadt Exp $ */
+/* $OpenBSD: pte.h,v 1.17 2023/01/20 16:01:04 deraadt Exp $ */
/* $NetBSD: pte.h,v 1.1 2003/04/26 18:39:47 fvdl Exp $ */
/*
#define PGEX_PK 0x20 /* protection-key violation */
#ifdef _KERNEL
+extern pt_entry_t pg_xo; /* XO pte bits using PKU key1 */
extern pt_entry_t pg_nx; /* NX pte bit */
extern pt_entry_t pg_g_kern; /* PG_G if glbl mappings can be used in kern */
#endif /* _KERNEL */