-/* $OpenBSD: pte.h,v 1.23 2021/05/01 16:11:11 visa Exp $ */
+/* $OpenBSD: pte.h,v 1.24 2023/01/11 03:17:56 visa Exp $ */
/*
* Copyright (c) 1988 University of Utah.
#define PTE_CLEAR_SWBITS(reg) \
.set push; \
.set mips64r2; \
- /* Clear SW bits around PG_XI. */ \
- dins reg, zero, (PTE_BITS - 1), 1; \
+ /* Clear SW bits between PG_XI and PG_FRAMEBITS. */ \
dins reg, zero, PG_FRAMEBITS, (PTE_BITS - 2 - PG_FRAMEBITS); \
.set pop
#else
/* entrylo values */
#ifdef MIPS_PTE64
-#define PG_FRAMEBITS 61
+#define PG_FRAMEBITS 60
#else
-#define PG_FRAMEBITS 29
+#define PG_FRAMEBITS 28
#endif
#define PG_FRAME ((1ULL << PG_FRAMEBITS) - (1ULL << PG_SHIFT))
#define PG_SHIFT 6
/* software pte bits - not put in entrylo */
-#define PG_WIRED (1ULL << (PG_FRAMEBITS + 2))
- /* 1ULL << (PG_FRAMEBITS + 1) is PG_XI. */
+#define PG_WIRED (1ULL << (PG_FRAMEBITS + 1))
#define PG_RO (1ULL << (PG_FRAMEBITS + 0))
#ifdef CPU_MIPS64R2
-#define PG_XI (1ULL << (PTE_BITS - 2))
+#define PG_RI (1ULL << (PG_FRAMEBITS + 3))
+#define PG_XI (1ULL << (PG_FRAMEBITS + 2))
#else
+#define PG_RI 0x00000000
#define PG_XI 0x00000000
#endif
#define PG_CACHED (CCA_CACHED << PG_CCA_SHIFT)
#define PG_CACHEMODE (7 << PG_CCA_SHIFT)
-#define PG_ATTR (PG_CACHEMODE | PG_M | PG_V | PG_G)
-#define PG_ROPAGE (PG_V | PG_RO | PG_CACHED) /* Write protected */
-#define PG_RWPAGE (PG_V | PG_M | PG_CACHED) /* Not w-prot not clean */
-#define PG_CWPAGE (PG_V | PG_CACHED) /* Not w-prot but clean */
-#define PG_IOPAGE (PG_G | PG_V | PG_M | PG_UNCACHED)
+#define PG_PROTMASK (PG_M | PG_RO | PG_RI | PG_XI)
#define pfn_to_pad(pa) ((((paddr_t)pa) & PG_FRAME) << PG_SHIFT)
#define vad_to_pfn(va) (((va) >> PG_SHIFT) & PG_FRAME)
-/* $OpenBSD: pmap.c,v 1.122 2023/01/01 19:49:17 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.123 2023/01/11 03:17:56 visa Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
u_int Sysmapsize; /* number of pte's in Sysmap */
const vaddr_t Sysmapbase = VM_MIN_KERNEL_ADDRESS; /* for libkvm */
+pt_entry_t protection_codes[8];
+pt_entry_t pg_ri;
pt_entry_t pg_xi;
void
}
#if defined(CPU_MIPS64R2) && !defined(CPU_LOONGSON2)
+ if (cp0_get_pagegrain() & PGRAIN_RIE)
+ pg_ri = PG_RI;
if (cp0_get_pagegrain() & PGRAIN_XIE)
pg_xi = PG_XI;
#endif
+
+ for (i = 0; i < 8; i++) {
+ if ((i & PROT_READ) == 0)
+ protection_codes[i] |= pg_ri;
+ if ((i & PROT_WRITE) == 0)
+ protection_codes[i] |= PG_RO;
+ if ((i & PROT_EXEC) == 0)
+ protection_codes[i] |= pg_xi;
+ }
}
/*
pv_entry_t pv;
int needisync = 0;
- p = PG_RO;
- if (!(prot & PROT_EXEC))
- p |= pg_xi;
+ p = protection_codes[prot];
mtx_enter(&pg->mdpage.pv_mtx);
for (pv = pg_to_pvh(pg); pv != NULL; pv = pv->pv_next) {
(entry & PG_CACHEMODE) == PG_CACHED)
Mips_HitSyncDCachePage(ci, pv->pv_va,
pfn_to_pad(entry));
- entry = (entry & ~(PG_M | PG_XI)) | p;
+ entry = (entry & ~PG_PROTMASK) | p;
*pte = entry;
pmap_update_kernel_page(pv->pv_va, entry);
pmap_shootdown_range(pmap_kernel(), pv->pv_va,
pfn_to_pad(entry));
if (pg_xi != 0 && (entry & pg_xi) == 0)
needisync = 1;
- entry = (entry & ~(PG_M | PG_XI)) | p;
+ entry = (entry & ~PG_PROTMASK) | p;
*pte = entry;
pmap_update_user_page(pv->pv_pmap, pv->pv_va, entry);
if (needisync)
break;
/* copy_on_write */
+ case PROT_EXEC:
case PROT_READ:
case PROT_READ | PROT_EXEC:
pmap_page_wrprotect(pg, prot);
return;
}
- p = (prot & PROT_WRITE) ? PG_M : PG_RO;
- if (!(prot & PROT_EXEC))
- p |= pg_xi;
+ p = protection_codes[prot];
+ if (prot & PROT_WRITE)
+ p |= PG_M;
pmap_lock(pmap);
if ((entry & PG_CACHEMODE) == PG_CACHED)
Mips_HitSyncDCachePage(ci, va,
pfn_to_pad(entry));
- entry = (entry & ~(PG_M | PG_RO | PG_XI)) | p;
+ entry = (entry & ~PG_PROTMASK) | p;
*pte = entry;
/*
* Update the TLB if the given address is in the cache.
}
if (pg_xi != 0 && (entry & pg_xi) == 0)
needisync = 1;
- entry = (entry & ~(PG_M | PG_RO | PG_XI)) | p;
+ entry = (entry & ~PG_PROTMASK) | p;
*pte = entry;
pmap_update_user_page(pmap, va, entry);
if (needisync)
atomic_inc_long(&pmap->pm_stats.wired_count);
}
+ npte = protection_codes[prot] | PG_V;
+
if (pg != NULL) {
mtx_enter(&pg->mdpage.pv_mtx);
else if (flags & PROT_MASK)
atomic_setbits_int(&pg->pg_flags, PGF_ATTR_REF);
- if (!(prot & PROT_WRITE)) {
- npte = PG_ROPAGE;
- } else {
+ if (prot & PROT_WRITE) {
if (pmap == pmap_kernel()) {
/*
* Don't bother to trap on kernel writes,
* just record page as dirty.
*/
- npte = PG_RWPAGE;
- } else {
- if (pg->pg_flags & PGF_ATTR_MOD) {
- npte = PG_RWPAGE;
- } else {
- npte = PG_CWPAGE;
- }
+ npte |= PG_M;
+ } else if (pg->pg_flags & PGF_ATTR_MOD) {
+ npte |= PG_M;
}
}
- if (flags & PMAP_NOCACHE) {
- npte &= ~PG_CACHED;
+
+ if (flags & PMAP_NOCACHE)
npte |= PG_UNCACHED;
- }
+ else
+ npte |= PG_CACHED;
stat_count(enter_stats.managed);
} else {
* then it must be device memory which may be volatile.
*/
stat_count(enter_stats.unmanaged);
- if (prot & PROT_WRITE) {
- npte = PG_IOPAGE & ~PG_G;
- } else {
- npte = (PG_IOPAGE | PG_RO) & ~(PG_G | PG_M);
- }
+ npte |= PG_UNCACHED;
+ if (prot & PROT_WRITE)
+ npte |= PG_M;
}
- if (!(prot & PROT_EXEC))
- npte |= pg_xi;
-
if (pmap == pmap_kernel()) {
/*
* Enter kernel space mapping.
panic("pmap_kenter_pa: kva %p", (void *)va);
#endif
- npte = vad_to_pfn(pa) | PG_G | PG_WIRED;
+ npte = vad_to_pfn(pa) | protection_codes[prot] |
+ PG_V | PG_G | PG_CACHED | PG_WIRED;
if (prot & PROT_WRITE)
- npte |= PG_RWPAGE;
- else
- npte |= PG_ROPAGE;
- if (!(prot & PROT_EXEC))
- npte |= pg_xi;
+ npte |= PG_M;
pte = kvtopte(va);
if ((*pte & PG_V) == 0) {
atomic_inc_long(&pmap_kernel()->pm_stats.resident_count);