From 38cdd850cf57ee44b29c8547000175c2383f89d5 Mon Sep 17 00:00:00 2001 From: kettenis Date: Tue, 24 Jan 2023 16:51:05 +0000 Subject: [PATCH] Make sure pmap_page_protect() does the right thing for execute-only mappings and enforce this with a KASSERT like we do on amd64. Bring the pmap_protect() inline in line with the amd64 version. ok miod@, deraadt@ --- sys/arch/hppa/hppa/pmap.c | 39 +++++++++++++++++++++++++++++++++++- sys/arch/hppa/include/pmap.h | 24 +++++++++++----------- 2 files changed, 50 insertions(+), 13 deletions(-) diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c index 7301e83a273..fcb711c2894 100644 --- a/sys/arch/hppa/hppa/pmap.c +++ b/sys/arch/hppa/hppa/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.180 2023/01/07 10:09:34 kettenis Exp $ */ +/* $OpenBSD: pmap.c,v 1.181 2023/01/24 16:51:05 kettenis Exp $ */ /* * Copyright (c) 1998-2004 Michael Shalayeff @@ -894,6 +894,43 @@ pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva) pmap_unlock(pmap); } +void +pmap_page_write_protect(struct vm_page *pg) +{ + struct pv_entry *pve; + int attrs; + + DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_page_write_protect(%p)\n", pg)); + + attrs = 0; + mtx_enter(&pg->mdpage.pvh_mtx); + for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { + struct pmap *pmap = pve->pv_pmap; + vaddr_t va = pve->pv_va; + volatile pt_entry_t *pde; + pt_entry_t opte, pte; + + if ((pde = pmap_pde_get(pmap->pm_pdir, va))) { + opte = pte = pmap_pte_get(pde, va); + if (pte & TLB_GATEWAY) + continue; + pte &= ~TLB_WRITE; + attrs |= pmap_pvh_attrs(pte); + + if (opte != pte) { + pmap_pte_flush(pmap, va, opte); + pmap_pte_set(pde, va, pte); + } + } + } + mtx_leave(&pg->mdpage.pvh_mtx); + if (attrs != (PG_PMAP_REF | PG_PMAP_MOD)) + atomic_clearbits_int(&pg->pg_flags, + attrs ^(PG_PMAP_REF | PG_PMAP_MOD)); + if (attrs != 0) + atomic_setbits_int(&pg->pg_flags, attrs); +} + void pmap_write_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) { diff --git a/sys/arch/hppa/include/pmap.h b/sys/arch/hppa/include/pmap.h index cfcea8f9aad..be38973a76c 100644 --- a/sys/arch/hppa/include/pmap.h +++ b/sys/arch/hppa/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.53 2023/01/01 19:49:17 miod Exp $ */ +/* $OpenBSD: pmap.h,v 1.54 2023/01/24 16:51:06 kettenis Exp $ */ /* * Copyright (c) 2002-2004 Michael Shalayeff @@ -33,6 +33,7 @@ #include #ifdef _KERNEL +#include #include struct pmap { @@ -117,6 +118,7 @@ struct vm_page *pmap_unmap_direct(vaddr_t); void pmap_bootstrap(vaddr_t); boolean_t pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t); boolean_t pmap_testbit(struct vm_page *, int); +void pmap_page_write_protect(struct vm_page *); void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva); void pmap_page_remove(struct vm_page *pg); @@ -131,23 +133,21 @@ pmap_prot(struct pmap *pmap, int prot) static __inline void pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { - if ((prot & PROT_WRITE) == 0) { - if (prot & (PROT_READ | PROT_EXEC)) - pmap_changebit(pg, 0, PTE_PROT(TLB_WRITE)); - else - pmap_page_remove(pg); + if (prot == PROT_READ) { + pmap_page_write_protect(pg); + } else { + KASSERT(prot == PROT_NONE); + pmap_page_remove(pg); } } static __inline void pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) { - if ((prot & PROT_WRITE) == 0) { - if (prot & (PROT_READ | PROT_EXEC)) - pmap_write_protect(pmap, sva, eva, prot); - else - pmap_remove(pmap, sva, eva); - } + if (prot != PROT_NONE) + pmap_write_protect(pmap, sva, eva, prot); + else + pmap_remove(pmap, sva, eva); } #endif /* _KERNEL */ -- 2.20.1