-/* $OpenBSD: pmap.h,v 1.10 2023/12/13 18:26:41 jca Exp $ */
+/* $OpenBSD: pmap.h,v 1.11 2024/01/23 19:51:10 kettenis Exp $ */
/*
* Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
/* cache flags */
// XXX These are duplicated from arm64 and may need some reworking
#define PMAP_CACHE_CI (PMAP_MD0) /* cache inhibit */
-#define PMAP_CACHE_WT (PMAP_MD1) /* writethru */
-#define PMAP_CACHE_WB (PMAP_MD1|PMAP_MD0) /* writeback */
+#define PMAP_CACHE_WB (PMAP_MD1) /* writeback */
#define PMAP_CACHE_DEV (PMAP_MD2) /* device mapping */
#define PMAP_CACHE_BITS (PMAP_MD0|PMAP_MD1|PMAP_MD2)
-/* $OpenBSD: pte.h,v 1.2 2021/05/12 01:20:52 jsg Exp $ */
+/* $OpenBSD: pte.h,v 1.3 2024/01/23 19:51:10 kettenis Exp $ */
/*
* Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
#define PTE_RWX (PTE_R | PTE_W | PTE_X)
#define PTE_RX (PTE_R | PTE_X)
#define PTE_KERN (PTE_V | PTE_R | PTE_W | PTE_A | PTE_D)
-#define PTE_PROMOTE (PTE_V | PTE_RWX | PTE_D | PTE_A | PTE_G | PTE_U | \
- PTE_SW_MANAGED | PTE_SW_WIRED
+
+/* T-Head extended page attributes */
+#define PTE_THEAD_SO (1ULL << 63)
+#define PTE_THEAD_C (1ULL << 62)
+#define PTE_THEAD_B (1ULL << 61)
+#define PTE_THEAD_SH (1ULL << 60)
/* Level 0 table, 512GiB per entry */
#define L0_SHIFT 39
-/* $OpenBSD: cpu.c,v 1.16 2023/10/24 13:20:10 claudio Exp $ */
+/* $OpenBSD: cpu.c,v 1.17 2024/01/23 19:51:10 kettenis Exp $ */
/*
* Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
#include <dev/ofw/fdt.h>
/* CPU Identification */
-
#define CPU_VENDOR_SIFIVE 0x489
+#define CPU_VENDOR_THEAD 0x5b7
+/* SiFive */
#define CPU_ARCH_U5 0x0000000000000001
#define CPU_ARCH_U7 0x8000000000000007
struct arch *archlist;
} cpu_vendors[] = {
{ CPU_VENDOR_SIFIVE, "SiFive", cpu_arch_sifive },
+ { CPU_VENDOR_THEAD, "T-Head", cpu_arch_none },
{ 0, NULL }
};
-/* $OpenBSD: machdep.c,v 1.33 2023/12/04 15:00:09 claudio Exp $ */
+/* $OpenBSD: machdep.c,v 1.34 2024/01/23 19:51:10 kettenis Exp $ */
/*
* Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
*bshp = (bus_space_handle_t)(va + (bpa - startpa));
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
- pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
+ pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE,
+ PMAP_CACHE_DEV);
virtual_avail = va;
-/* $OpenBSD: pmap.c,v 1.37 2023/12/13 18:26:41 jca Exp $ */
+/* $OpenBSD: pmap.c,v 1.38 2024/01/23 19:51:10 kettenis Exp $ */
/*
* Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
vaddr_t copy_src_page;
vaddr_t copy_dst_page;
+#define CPU_VENDOR_THEAD 0x5b7
+
struct pool pmap_pmap_pool;
struct pool pmap_pted_pool;
struct pool pmap_vp_pool;
[PROT_EXEC|PROT_WRITE|PROT_READ] = PTE_A|PTE_X|PTE_R|PTE_D|PTE_W,
};
+/* PBMT encodings for the Svpmbt modes. */
+uint64_t pmap_pma;
+uint64_t pmap_nc;
+uint64_t pmap_io;
+
/*
* This is used for pmap_kernel() mappings, they are not to be removed
* from the vp table because they were statically initialized at the
switch (cache) {
case PMAP_CACHE_WB:
break;
- case PMAP_CACHE_WT:
- break;
case PMAP_CACHE_CI:
if (pa >= pmap_cached_start && pa <= pmap_cached_end)
pa += (pmap_uncached_start - pmap_cached_start);
case PMAP_CACHE_DEV:
break;
default:
- panic("pmap_fill_pte:invalid cache mode");
+ panic("%s: invalid cache mode", __func__);
}
pted->pted_va |= cache;
/* gigapages */
pn = (pa / PAGE_SIZE);
- entry = PTE_KERN;
+ entry = PTE_KERN | pmap_pma;
entry |= (pn << PTE_PPN0_S);
- atomic_store_64(&l1[l1_slot], entry);
+ l1[l1_slot] = entry;
}
sfence_vma();
vaddr_t vstart;
int i, j, k;
int lb_idx2, ub_idx2;
+ uint64_t marchid, mimpid;
+ uint32_t mvendorid;
void *node;
+ mvendorid = sbi_get_mvendorid();
+ marchid = sbi_get_marchid();
+ mimpid = sbi_get_mimpid();
+
+ /*
+ * The T-Head cores implement a page attributes extension that
+ * violates the RISC-V privileged architecture specification.
+ * Work around this as best as we can by adding the
+ * appropriate page attributes in a way that is mostly
+ * compatible with the Svpbmt extension.
+ */
+ if (mvendorid == CPU_VENDOR_THEAD && marchid == 0 && mimpid == 0) {
+ pmap_pma = PTE_THEAD_C | PTE_THEAD_B | PTE_THEAD_SH;
+ pmap_nc = PTE_THEAD_B | PTE_THEAD_SH;
+ pmap_io = PTE_THEAD_SO | PTE_THEAD_SH;
+ }
+
node = fdt_find_node("/");
if (fdt_is_compatible(node, "starfive,jh7100")) {
pmap_cached_start = 0x0080000000ULL;
void
pmap_pte_update(struct pte_desc *pted, uint64_t *pl3)
{
- pt_entry_t pte, access_bits;
+ uint64_t pte, access_bits;
pmap_t pm = pted->pted_pmap;
+ uint64_t attr = 0;
+
+ switch (pted->pted_va & PMAP_CACHE_BITS) {
+ case PMAP_CACHE_WB:
+ attr |= pmap_pma;
+ break;
+ case PMAP_CACHE_CI:
+ attr |= pmap_nc;
+ break;
+ case PMAP_CACHE_DEV:
+ attr |= pmap_io;
+ break;
+ default:
+ panic("%s: invalid cache mode", __func__);
+ }
if (pm->pm_privileged)
access_bits = ap_bits_kern[pted->pted_pte & PROT_MASK];
else
access_bits = ap_bits_user[pted->pted_pte & PROT_MASK];
- pte = VP_Lx(pted->pted_pte) | access_bits | PTE_V;
+ pte = VP_Lx(pted->pted_pte) | attr | access_bits | PTE_V;
*pl3 = access_bits ? pte : 0;
}