Add MIPS64r2 TLB read inhibit support
authorvisa <visa@openbsd.org>
Wed, 11 Jan 2023 03:17:56 +0000 (03:17 +0000)
committervisa <visa@openbsd.org>
Wed, 11 Jan 2023 03:17:56 +0000 (03:17 +0000)
OK deraadt@ miod@

sys/arch/mips64/include/pte.h
sys/arch/mips64/mips64/db_machdep.c
sys/arch/mips64/mips64/pmap.c

index d12cb2c..3bf02d2 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: pte.h,v 1.23 2021/05/01 16:11:11 visa Exp $   */
+/*     $OpenBSD: pte.h,v 1.24 2023/01/11 03:17:56 visa Exp $   */
 
 /*
  * Copyright (c) 1988 University of Utah.
@@ -83,8 +83,7 @@ typedef u_int32_t pt_entry_t;
 #define        PTE_CLEAR_SWBITS(reg)                                           \
        .set    push;                                                   \
        .set    mips64r2;                                               \
-       /* Clear SW bits around PG_XI. */                               \
-       dins    reg, zero, (PTE_BITS - 1), 1;                           \
+       /* Clear SW bits between PG_XI and PG_FRAMEBITS. */             \
        dins    reg, zero, PG_FRAMEBITS, (PTE_BITS - 2 - PG_FRAMEBITS); \
        .set    pop
 #else
@@ -108,21 +107,22 @@ typedef u_int32_t pt_entry_t;
 /* entrylo values */
 
 #ifdef MIPS_PTE64
-#define        PG_FRAMEBITS    61
+#define        PG_FRAMEBITS    60
 #else
-#define        PG_FRAMEBITS    29
+#define        PG_FRAMEBITS    28
 #endif
 #define        PG_FRAME        ((1ULL << PG_FRAMEBITS) - (1ULL << PG_SHIFT))
 #define        PG_SHIFT        6
 
 /* software pte bits - not put in entrylo */
-#define        PG_WIRED        (1ULL << (PG_FRAMEBITS + 2))
-                       /* 1ULL << (PG_FRAMEBITS + 1) is PG_XI. */
+#define        PG_WIRED        (1ULL << (PG_FRAMEBITS + 1))
 #define        PG_RO           (1ULL << (PG_FRAMEBITS + 0))
 
 #ifdef CPU_MIPS64R2
-#define        PG_XI           (1ULL << (PTE_BITS - 2))
+#define        PG_RI           (1ULL << (PG_FRAMEBITS + 3))
+#define        PG_XI           (1ULL << (PG_FRAMEBITS + 2))
 #else
+#define        PG_RI           0x00000000
 #define        PG_XI           0x00000000
 #endif
 
@@ -139,11 +139,7 @@ typedef u_int32_t pt_entry_t;
 #define        PG_CACHED       (CCA_CACHED << PG_CCA_SHIFT)
 #define        PG_CACHEMODE    (7 << PG_CCA_SHIFT)
 
-#define        PG_ATTR         (PG_CACHEMODE | PG_M | PG_V | PG_G)
-#define        PG_ROPAGE       (PG_V | PG_RO | PG_CACHED) /* Write protected */
-#define        PG_RWPAGE       (PG_V | PG_M | PG_CACHED)  /* Not w-prot not clean */
-#define        PG_CWPAGE       (PG_V | PG_CACHED)         /* Not w-prot but clean */
-#define        PG_IOPAGE       (PG_G | PG_V | PG_M | PG_UNCACHED)
+#define        PG_PROTMASK     (PG_M | PG_RO | PG_RI | PG_XI)
 
 #define        pfn_to_pad(pa)  ((((paddr_t)pa) & PG_FRAME) << PG_SHIFT)
 #define        vad_to_pfn(va)  (((va) >> PG_SHIFT) & PG_FRAME)
index 687f3a1..c2d8f03 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: db_machdep.c,v 1.59 2022/04/14 19:47:11 naddy Exp $ */
+/*     $OpenBSD: db_machdep.c,v 1.60 2023/01/11 03:17:56 visa Exp $ */
 
 /*
  * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se)
@@ -398,6 +398,7 @@ db_print_tlb(uint tlbno, uint64_t tlblo)
        if (tlblo & PG_V) {
                db_printf("%016lx ", pa);
 #ifdef CPU_MIPS64R2
+               db_printf("%c", tlblo & PG_RI ? 'R' : ' ');
                db_printf("%c", tlblo & PG_XI ? 'X' : ' ');
 #endif
                db_printf("%c", tlblo & PG_M ? 'M' : ' ');
index 99f45a2..69ae0df 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: pmap.c,v 1.122 2023/01/01 19:49:17 miod Exp $ */
+/*     $OpenBSD: pmap.c,v 1.123 2023/01/11 03:17:56 visa Exp $ */
 
 /*
  * Copyright (c) 2001-2004 Opsycon AB  (www.opsycon.se / www.opsycon.com)
@@ -165,6 +165,8 @@ pt_entry_t  *Sysmap;                /* kernel pte table */
 u_int          Sysmapsize;             /* number of pte's in Sysmap */
 const vaddr_t  Sysmapbase = VM_MIN_KERNEL_ADDRESS;     /* for libkvm */
 
+pt_entry_t     protection_codes[8];
+pt_entry_t     pg_ri;
 pt_entry_t     pg_xi;
 
 void
@@ -410,9 +412,20 @@ pmap_bootstrap(void)
        }
 
 #if defined(CPU_MIPS64R2) && !defined(CPU_LOONGSON2)
+       if (cp0_get_pagegrain() & PGRAIN_RIE)
+               pg_ri = PG_RI;
        if (cp0_get_pagegrain() & PGRAIN_XIE)
                pg_xi = PG_XI;
 #endif
+
+       for (i = 0; i < 8; i++) {
+               if ((i & PROT_READ) == 0)
+                       protection_codes[i] |= pg_ri;
+               if ((i & PROT_WRITE) == 0)
+                       protection_codes[i] |= PG_RO;
+               if ((i & PROT_EXEC) == 0)
+                       protection_codes[i] |= pg_xi;
+       }
 }
 
 /*
@@ -786,9 +799,7 @@ pmap_page_wrprotect(struct vm_page *pg, vm_prot_t prot)
        pv_entry_t pv;
        int needisync = 0;
 
-       p = PG_RO;
-       if (!(prot & PROT_EXEC))
-               p |= pg_xi;
+       p = protection_codes[prot];
 
        mtx_enter(&pg->mdpage.pv_mtx);
        for (pv = pg_to_pvh(pg); pv != NULL; pv = pv->pv_next) {
@@ -806,7 +817,7 @@ pmap_page_wrprotect(struct vm_page *pg, vm_prot_t prot)
                            (entry & PG_CACHEMODE) == PG_CACHED)
                                Mips_HitSyncDCachePage(ci, pv->pv_va,
                                    pfn_to_pad(entry));
-                       entry = (entry & ~(PG_M | PG_XI)) | p;
+                       entry = (entry & ~PG_PROTMASK) | p;
                        *pte = entry;
                        pmap_update_kernel_page(pv->pv_va, entry);
                        pmap_shootdown_range(pmap_kernel(), pv->pv_va,
@@ -824,7 +835,7 @@ pmap_page_wrprotect(struct vm_page *pg, vm_prot_t prot)
                                    pfn_to_pad(entry));
                        if (pg_xi != 0 && (entry & pg_xi) == 0)
                                needisync = 1;
-                       entry = (entry & ~(PG_M | PG_XI)) | p;
+                       entry = (entry & ~PG_PROTMASK) | p;
                        *pte = entry;
                        pmap_update_user_page(pv->pv_pmap, pv->pv_va, entry);
                        if (needisync)
@@ -903,6 +914,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                break;
 
        /* copy_on_write */
+       case PROT_EXEC:
        case PROT_READ:
        case PROT_READ | PROT_EXEC:
                pmap_page_wrprotect(pg, prot);
@@ -936,9 +948,9 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                return;
        }
 
-       p = (prot & PROT_WRITE) ? PG_M : PG_RO;
-       if (!(prot & PROT_EXEC))
-               p |= pg_xi;
+       p = protection_codes[prot];
+       if (prot & PROT_WRITE)
+               p |= PG_M;
 
        pmap_lock(pmap);
 
@@ -966,7 +978,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                                if ((entry & PG_CACHEMODE) == PG_CACHED)
                                        Mips_HitSyncDCachePage(ci, va,
                                            pfn_to_pad(entry));
-                       entry = (entry & ~(PG_M | PG_RO | PG_XI)) | p;
+                       entry = (entry & ~PG_PROTMASK) | p;
                        *pte = entry;
                        /*
                         * Update the TLB if the given address is in the cache.
@@ -1013,7 +1025,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                                }
                                if (pg_xi != 0 && (entry & pg_xi) == 0)
                                        needisync = 1;
-                               entry = (entry & ~(PG_M | PG_RO | PG_XI)) | p;
+                               entry = (entry & ~PG_PROTMASK) | p;
                                *pte = entry;
                                pmap_update_user_page(pmap, va, entry);
                                if (needisync)
@@ -1113,6 +1125,8 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
                        atomic_inc_long(&pmap->pm_stats.wired_count);
        }
 
+       npte = protection_codes[prot] | PG_V;
+
        if (pg != NULL) {
                mtx_enter(&pg->mdpage.pv_mtx);
 
@@ -1123,27 +1137,22 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
                else if (flags & PROT_MASK)
                        atomic_setbits_int(&pg->pg_flags, PGF_ATTR_REF);
 
-               if (!(prot & PROT_WRITE)) {
-                       npte = PG_ROPAGE;
-               } else {
+               if (prot & PROT_WRITE) {
                        if (pmap == pmap_kernel()) {
                                /*
                                 * Don't bother to trap on kernel writes,
                                 * just record page as dirty.
                                 */
-                               npte = PG_RWPAGE;
-                       } else {
-                               if (pg->pg_flags & PGF_ATTR_MOD) {
-                                       npte = PG_RWPAGE;
-                               } else {
-                                       npte = PG_CWPAGE;
-                               }
+                               npte |= PG_M;
+                       } else if (pg->pg_flags & PGF_ATTR_MOD) {
+                               npte |= PG_M;
                        }
                }
-               if (flags & PMAP_NOCACHE) {
-                       npte &= ~PG_CACHED;
+
+               if (flags & PMAP_NOCACHE)
                        npte |= PG_UNCACHED;
-               }
+               else
+                       npte |= PG_CACHED;
 
                stat_count(enter_stats.managed);
        } else {
@@ -1152,16 +1161,11 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
                 * then it must be device memory which may be volatile.
                 */
                stat_count(enter_stats.unmanaged);
-               if (prot & PROT_WRITE) {
-                       npte = PG_IOPAGE & ~PG_G;
-               } else {
-                       npte = (PG_IOPAGE | PG_RO) & ~(PG_G | PG_M);
-               }
+               npte |= PG_UNCACHED;
+               if (prot & PROT_WRITE)
+                       npte |= PG_M;
        }
 
-       if (!(prot & PROT_EXEC))
-               npte |= pg_xi;
-
        if (pmap == pmap_kernel()) {
                /*
                 * Enter kernel space mapping.
@@ -1268,13 +1272,10 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
                panic("pmap_kenter_pa: kva %p", (void *)va);
 #endif
 
-       npte = vad_to_pfn(pa) | PG_G | PG_WIRED;
+       npte = vad_to_pfn(pa) | protection_codes[prot] |
+           PG_V | PG_G | PG_CACHED | PG_WIRED;
        if (prot & PROT_WRITE)
-               npte |= PG_RWPAGE;
-       else
-               npte |= PG_ROPAGE;
-       if (!(prot & PROT_EXEC))
-               npte |= pg_xi;
+               npte |= PG_M;
        pte = kvtopte(va);
        if ((*pte & PG_V) == 0) {
                atomic_inc_long(&pmap_kernel()->pm_stats.resident_count);