From: gkoehler Date: Tue, 31 Jan 2023 01:27:58 +0000 (+0000) Subject: Execute-only for macppc G5 X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=9bd4dd0f969eee33c21106fbb4e2e73cb9ab1369;p=openbsd Execute-only for macppc G5 The G5 PowerPC 970 has a Data Address Compare mechanism that can trap loads and stores to pages with PTE_AC_64, while allowing instruction fetches. Use this for execute-only mappings, like we do on powerpc64. Add a check to pte_spill_v for execute-only mappings. Without this, we would forever retry reading an execute-only page. In altivec_assist, copyin would fail to read the instruction from an execute-only page. Add copyinsn to bypass x-only, like sparc64. with help from abieber@ deraadt@ kettenis@ ok deraadt@ --- diff --git a/sys/arch/powerpc/include/pmap.h b/sys/arch/powerpc/include/pmap.h index 50a54c74db8..22a2de62b10 100644 --- a/sys/arch/powerpc/include/pmap.h +++ b/sys/arch/powerpc/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.61 2023/01/21 19:39:28 miod Exp $ */ +/* $OpenBSD: pmap.h,v 1.62 2023/01/31 01:27:58 gkoehler Exp $ */ /* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */ /*- @@ -141,6 +141,10 @@ int pmap_test_attrs(struct vm_page *, unsigned int); void pmap_pinit(struct pmap *); void pmap_release(struct pmap *); +#ifdef ALTIVEC +int pmap_copyinsn(pmap_t, vaddr_t, uint32_t *); +#endif + void pmap_real_memory(vaddr_t *start, vsize_t *size); int pte_spill_v(struct pmap *pm, u_int32_t va, u_int32_t dsisr, int exec_fault); diff --git a/sys/arch/powerpc/include/pte.h b/sys/arch/powerpc/include/pte.h index 62ded11f9d1..bbe87ebc21d 100644 --- a/sys/arch/powerpc/include/pte.h +++ b/sys/arch/powerpc/include/pte.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pte.h,v 1.10 2015/01/20 18:14:39 mpi Exp $ */ +/* $OpenBSD: pte.h,v 1.11 2023/01/31 01:27:58 gkoehler Exp $ */ /* $NetBSD: pte.h,v 1.1 1996/09/30 16:34:32 ws Exp $ */ /*- @@ -82,6 +82,7 @@ struct pte_64 { #define PTE_HID_64 0x0000000000000002ULL /* Low word: */ #define PTE_RPGN_64 0x3ffffffffffff000ULL +#define PTE_AC_64 0x0000000000000200ULL #define PTE_REF_64 0x0000000000000100ULL #define PTE_CHG_64 0x0000000000000080ULL #define PTE_WIMG_64 0x0000000000000078ULL diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c index 66e5dc29b62..d1b7af7b329 100644 --- a/sys/arch/powerpc/powerpc/pmap.c +++ b/sys/arch/powerpc/powerpc/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.178 2023/01/10 21:27:12 gkoehler Exp $ */ +/* $OpenBSD: pmap.c,v 1.179 2023/01/31 01:27:58 gkoehler Exp $ */ /* * Copyright (c) 2015 Martin Pieuchot @@ -900,6 +900,9 @@ pmap_fill_pte64(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted, else pte64->pte_lo |= (PTE_M_64 | PTE_I_64 | PTE_G_64); + if ((prot & (PROT_READ | PROT_WRITE)) == 0) + pte64->pte_lo |= PTE_AC_64; + if (prot & PROT_WRITE) pte64->pte_lo |= PTE_RW_64; else @@ -1604,6 +1607,13 @@ pmap_enable_mmu(void) uint32_t scratch, sdr1; int i; + /* + * For the PowerPC 970, ACCR = 3 inhibits loads and stores to + * pages with PTE_AC_64. This is for execute-only mappings. + */ + if (ppc_proc_is_64b) + asm volatile ("mtspr 29, %0" :: "r" (3)); + if (!ppc_nobat) { extern caddr_t etext; @@ -1685,6 +1695,39 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pa) return TRUE; } +#ifdef ALTIVEC +/* + * Read an instruction from a given virtual memory address. + * Execute-only protection is bypassed. + */ +int +pmap_copyinsn(pmap_t pm, vaddr_t va, uint32_t *insn) +{ + struct pte_desc *pted; + paddr_t pa; + + /* Assume pm != pmap_kernel(). */ + if (ppc_proc_is_64b) { + /* inline pmap_extract */ + PMAP_VP_LOCK(pm); + pted = pmap_vp_lookup(pm, va); + if (pted == NULL || !PTED_VALID(pted)) { + PMAP_VP_UNLOCK(pm); + return EFAULT; + } + pa = (pted->p.pted_pte64.pte_lo & PTE_RPGN_64) | + (va & ~PTE_RPGN_64); + PMAP_VP_UNLOCK(pm); + + if (pa > physmaxaddr - sizeof(*insn)) + return EFAULT; + *insn = *(uint32_t *)pa; + return 0; + } else + return copyin32((void *)va, insn); +} +#endif + u_int32_t pmap_setusr(pmap_t pm, vaddr_t va) { @@ -1965,6 +2008,9 @@ pmap_pted_ro64(struct pte_desc *pted, vm_prot_t prot) if ((prot & PROT_EXEC) == 0) pted->p.pted_pte64.pte_lo |= PTE_N_64; + if ((prot & (PROT_READ | PROT_WRITE)) == 0) + pted->p.pted_pte64.pte_lo |= PTE_AC_64; + PMAP_HASH_LOCK(s); if ((pte = pmap_ptedinhash(pted)) != NULL) { struct pte_64 *ptp64 = pte; @@ -1977,8 +2023,7 @@ pmap_pted_ro64(struct pte_desc *pted, vm_prot_t prot) } /* Add a Page Table Entry, section 7.6.3.1. */ - ptp64->pte_lo &= ~(PTE_CHG_64|PTE_PP_64); - ptp64->pte_lo |= PTE_RO_64; + ptp64->pte_lo = pted->p.pted_pte64.pte_lo; eieio(); /* Order 1st PTE update before 2nd. */ ptp64->pte_hi |= PTE_VALID_64; sync(); /* Ensure updates completed. */ @@ -2243,6 +2288,13 @@ pte_spill_v(pmap_t pm, u_int32_t va, u_int32_t dsisr, int exec_fault) struct pte_desc *pted; int inserted = 0; + /* + * DSISR_DABR is set if the PowerPC 970 attempted to read or + * write an execute-only page. + */ + if (dsisr & DSISR_DABR) + return 0; + /* * If the current mapping is RO and the access was a write * we return 0 diff --git a/sys/arch/powerpc/powerpc/trap.c b/sys/arch/powerpc/powerpc/trap.c index f79a2c03be5..5002becf570 100644 --- a/sys/arch/powerpc/powerpc/trap.c +++ b/sys/arch/powerpc/powerpc/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.129 2023/01/16 05:32:05 deraadt Exp $ */ +/* $OpenBSD: trap.c,v 1.130 2023/01/31 01:27:58 gkoehler Exp $ */ /* $NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $ */ /* @@ -67,7 +67,7 @@ void trap(struct trapframe *frame); #define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */ #ifdef ALTIVEC -static int altivec_assist(void *); +static int altivec_assist(struct proc *p, vaddr_t); /* * Save state of the vector processor, This is done lazily in the hope @@ -518,7 +518,7 @@ brain_damage: case EXC_VECAST_G4|EXC_USER: case EXC_VECAST_G5|EXC_USER: #ifdef ALTIVEC - if (altivec_assist((void *)frame->srr0) == 0) { + if (altivec_assist(p, (vaddr_t)frame->srr0) == 0) { frame->srr0 += 4; break; } @@ -650,8 +650,26 @@ fix_unaligned(struct proc *p, struct trapframe *frame) } #ifdef ALTIVEC +static inline int +copyinsn(struct proc *p, vaddr_t uva, int *insn) +{ + struct vm_map *map = &p->p_vmspace->vm_map; + int error = 0; + + if (__predict_false((uva & 3) != 0)) + return EFAULT; + + do { + if (pmap_copyinsn(map->pmap, uva, (uint32_t *)insn) == 0) + break; + error = uvm_fault(map, trunc_page(uva), 0, PROT_EXEC); + } while (error == 0); + + return error; +} + static int -altivec_assist(void *user_pc) +altivec_assist(struct proc *p, vaddr_t user_pc) { /* These labels are in vecast.S */ void vecast_asm(uint32_t, void *); @@ -667,10 +685,12 @@ altivec_assist(void *user_pc) void vecast_vctsxs(void); uint32_t insn, op, va, vc, lo; + int error; void (*lab)(void); - if (copyin(user_pc, &insn, sizeof(insn)) != 0) - return -1; + error = copyinsn(p, user_pc, &insn); + if (error) + return error; op = (insn & 0xfc000000) >> 26; /* primary opcode */ va = (insn & 0x001f0000) >> 16; /* vector A */ vc = (insn & 0x000007c0) >> 6; /* vector C or extended opcode */ @@ -678,7 +698,7 @@ altivec_assist(void *user_pc) /* Stop if this isn't an altivec instruction. */ if (op != 4) - return -1; + return EINVAL; /* Decide which instruction to emulate. */ lab = NULL; @@ -727,6 +747,6 @@ altivec_assist(void *user_pc) vecast_asm(insn, lab); /* Emulate it. */ return 0; } else - return -1; + return EINVAL; } #endif