-/* $OpenBSD: bootconfig.h,v 1.4 2021/07/02 10:42:22 kettenis Exp $ */
+/* $OpenBSD: bootconfig.h,v 1.5 2024/04/06 18:33:54 kettenis Exp $ */
/*-
* Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
vaddr_t kern_l1pt; /* L1 page table for the kernel */
paddr_t kern_phys;
vaddr_t kern_stack;
- vaddr_t dtbp_virt; /* Device tree blob virtual addr */
paddr_t dtbp_phys; /* Device tree blob physical addr */
};
-/* $OpenBSD: pmap.h,v 1.11 2024/01/23 19:51:10 kettenis Exp $ */
+/* $OpenBSD: pmap.h,v 1.12 2024/04/06 18:33:54 kettenis Exp $ */
/*
* Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
vaddr_t pmap_bootstrap(long kvo, paddr_t lpt1,
vaddr_t kernelstart, vaddr_t kernelend,
- paddr_t memstart, paddr_t memend,
- paddr_t ramstart, paddr_t ramend);
+ paddr_t memstart, paddr_t memend);
void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
void pmap_page_ro(pmap_t pm, vaddr_t va, vm_prot_t prot);
-# $OpenBSD: genassym.cf,v 1.7 2023/11/24 16:41:12 miod Exp $
+# $OpenBSD: genassym.cf,v 1.8 2024/04/06 18:33:54 kettenis Exp $
#
# Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
# All rights reserved.
member RISCV_BOOTPARAMS_KERN_L1PT kern_l1pt
member RISCV_BOOTPARAMS_KERN_PHYS kern_phys
member RISCV_BOOTPARAMS_KERN_STACK kern_stack
-member RISCV_BOOTPARAMS_DTBP_VIRT dtbp_virt
member RISCV_BOOTPARAMS_DTBP_PHYS dtbp_phys
-/* $OpenBSD: locore.S,v 1.18 2024/03/25 23:10:03 kettenis Exp $ */
+/* $OpenBSD: locore.S,v 1.19 2024/04/06 18:33:54 kettenis Exp $ */
/*-
* Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
#include <machine/riscvreg.h>
#include <machine/pte.h>
-#define VM_EARLY_DTB_ADDRESS (VM_MAX_KERNEL_ADDRESS - (2 * L2_SIZE))
-
.globl kernbase
.set kernbase, KERNBASE
addi t4, t4, 1
bltu t4, t3, 1b
- /* Create an L1 page for early devmap */
- lla s1, pagetable_l1
- lla s2, pagetable_l2_devmap /* Link to next level PN */
- srli s2, s2, PAGE_SHIFT
-
- li a5, (VM_MAX_KERNEL_ADDRESS - L2_SIZE)
- srli a5, a5, L1_SHIFT /* >> L1_SHIFT */
- andi a5, a5, 0x1ff /* & 0x1ff */
- li t4, PTE_V
- slli t5, s2, PTE_PPN0_S /* (s2 << PTE_PPN0_S) */
- or t6, t4, t5
-
- /* Store single level1 PTE entry to position */
- li a6, PTE_SIZE
- mulw a5, a5, a6
- add t0, s1, a5
- sd t6, (t0)
-
- /* Create an L2 page superpage for DTB */
- lla s1, pagetable_l2_devmap
- mv s2, a1
- srli s2, s2, PAGE_SHIFT
- /* Mask off any bits that aren't aligned */
- andi s2, s2, ~((1 << (PTE_PPN1_S - PTE_PPN0_S)) - 1)
-
- li t0, (PTE_KERN)
- slli t2, s2, PTE_PPN0_S /* << PTE_PPN0_S */
- or t0, t0, t2
-
- /* Store PTE entry to position */
- li a6, PTE_SIZE
- li a5, 510
- mulw a5, a5, a6
- add t1, s1, a5
- sd t0, (t1)
-
/* Page tables END */
/* Setup supervisor trap vector */
la t0, pagetable_l1
sd t0, RISCV_BOOTPARAMS_KERN_L1PT(sp)
sd s9, RISCV_BOOTPARAMS_KERN_PHYS(sp)
-
la t0, initstack
sd t0, RISCV_BOOTPARAMS_KERN_STACK(sp)
-
- li t0, (VM_EARLY_DTB_ADDRESS)
- /* Add offset of DTB within superpage */
- li t1, (L2_OFFSET)
- and t1, a1, t1
- add t0, t0, t1
- sd t0, RISCV_BOOTPARAMS_DTBP_VIRT(sp)
sd a1, RISCV_BOOTPARAMS_DTBP_PHYS(sp)
/* Set esym to virtual address of symbol table end */
.space PAGE_SIZE
pagetable_l2:
.space PAGE_SIZE
-pagetable_l2_devmap:
- .space PAGE_SIZE
.align 3
virt_map:
-/* $OpenBSD: machdep.c,v 1.37 2024/03/26 22:46:48 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.38 2024/04/06 18:33:54 kettenis Exp $ */
/*
* Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
initriscv(struct riscv_bootparams *rbp)
{
paddr_t memstart, memend;
- paddr_t ramstart, ramend;
- paddr_t start, end;
- vaddr_t vstart;
- void *config = (void *)rbp->dtbp_virt;
+ paddr_t startpa, endpa, pa;
+ vaddr_t vstart, va;
+ struct fdt_head *fh;
+ void *config = (void *)rbp->dtbp_phys;
void *fdt = NULL;
- paddr_t fdt_start = (paddr_t)rbp->dtbp_phys;
- size_t fdt_size;
struct fdt_reg reg;
- const char *s;
void *node;
EFI_PHYSICAL_ADDRESS system_table = 0;
int (*map_func_save)(bus_space_tag_t, bus_addr_t, bus_size_t, int,
/* Set the per-CPU pointer. */
__asm volatile("mv tp, %0" :: "r"(&cpu_info_primary));
- if (!fdt_init(config) || fdt_get_size(config) == 0)
- panic("initriscv: no FDT");
- fdt_size = fdt_get_size(config);
+ sbi_init();
+
+ /* The bootloader has loaded us into a 64MB block. */
+ memstart = rbp->kern_phys;
+ memend = memstart + 64 * 1024 * 1024;
+
+ /* Bootstrap enough of pmap to enter the kernel proper. */
+ vstart = pmap_bootstrap(rbp->kern_phys - KERNBASE, rbp->kern_l1pt,
+ KERNBASE, esym, memstart, memend);
+
+ /* Map the FDT header to determine its size. */
+ va = vstart;
+ startpa = trunc_page((paddr_t)config);
+ endpa = round_page((paddr_t)config + sizeof(struct fdt_head));
+ for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
+ pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, PMAP_CACHE_WB);
+ fh = (void *)(vstart + ((paddr_t)config - startpa));
+ if (betoh32(fh->fh_magic) != FDT_MAGIC || betoh32(fh->fh_size) == 0)
+ panic("%s: no FDT", __func__);
+
+ /* Map the remainder of the FDT. */
+ endpa = round_page((paddr_t)config + betoh32(fh->fh_size));
+ for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
+ pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, PMAP_CACHE_WB);
+ config = (void *)(vstart + ((paddr_t)config - startpa));
+ vstart = va;
+
+ if (!fdt_init(config))
+ panic("%s: corrupt FDT", __func__);
node = fdt_find_node("/cpus");
if (node != NULL) {
}
}
- sbi_init();
-
process_kernel_args();
- /*
- * Determine physical RAM address range from the /memory nodes
- * in the FDT. There can be multiple nodes and each node can
- * contain multiple ranges.
- */
- node = fdt_find_node("/memory");
- if (node == NULL)
- panic("%s: no memory specified", __func__);
- ramstart = (paddr_t)-1, ramend = 0;
- while (node) {
- s = fdt_node_name(node);
- if (strncmp(s, "memory", 6) == 0 &&
- (s[6] == '\0' || s[6] == '@')) {
- for (i = 0; i < VM_PHYSSEG_MAX; i++) {
- if (fdt_get_reg(node, i, ®))
- break;
- if (reg.size == 0)
- continue;
-
- start = reg.addr;
- end = reg.addr + reg.size;
-
- if (start < ramstart)
- ramstart = start;
- if (end > ramend)
- ramend = end;
-
- physmem += atop(reg.size);
- }
- }
-
- node = fdt_next_node(node);
- }
-
- /* The bootloader has loaded us into a 64MB block. */
- memstart = rbp->kern_phys;
- memend = memstart + 64 * 1024 * 1024;
-
- /* Bootstrap enough of pmap to enter the kernel proper. */
- vstart = pmap_bootstrap(rbp->kern_phys - KERNBASE, rbp->kern_l1pt,
- KERNBASE, esym, memstart, memend, ramstart, ramend);
-
proc0paddr = (struct user *)rbp->kern_stack;
msgbufaddr = (caddr_t)vstart;
vstart += MAXCPUS * PAGE_SIZE;
/* Relocate the FDT to safe memory. */
- if (fdt_size != 0) {
- uint32_t csize, size = round_page(fdt_size);
+ if (fdt_get_size(config) != 0) {
+ uint32_t csize, size = round_page(fdt_get_size(config));
paddr_t pa;
vaddr_t va;
pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
- memcpy((void *)PHYS_TO_DMAP(pa),
- (void *)PHYS_TO_DMAP(fdt_start), size);
+ memcpy((void *)PHYS_TO_DMAP(pa), config, size);
for (va = vstart, csize = size; csize > 0;
csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
- pmap_kenter_pa(va, pa, PROT_READ);
+ pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
fdt = (void *)vstart;
vstart += size;
/* Relocate the EFI memory map too. */
if (mmap_start != 0) {
uint32_t csize, size = round_page(mmap_size);
- paddr_t pa;
+ paddr_t pa, startpa, endpa;
vaddr_t va;
+ startpa = trunc_page(mmap_start);
+ endpa = round_page(mmap_start + mmap_size);
+ for (pa = startpa, va = vstart; pa < endpa;
+ pa += PAGE_SIZE, va += PAGE_SIZE)
+ pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
memcpy((void *)PHYS_TO_DMAP(pa),
- (void *)PHYS_TO_DMAP(mmap_start), size);
+ (caddr_t)vstart + (mmap_start - startpa), mmap_size);
+ pmap_kremove(vstart, endpa - startpa);
+
for (va = vstart, csize = size; csize > 0;
csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
- pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
+ pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, PMAP_CACHE_WB);
mmap = (void *)vstart;
vstart += size;
atop(start), atop(end), 0);
}
+ /*
+ * Determine physical RAM size from the /memory nodes in the
+ * FDT. There can be multiple nodes and each node can contain
+ * multiple ranges.
+ */
+ node = fdt_find_node("/memory");
+ if (node == NULL)
+ panic("%s: no memory specified", __func__);
+ while (node) {
+ const char *s = fdt_node_name(node);
+ if (strncmp(s, "memory", 6) == 0 &&
+ (s[6] == '\0' || s[6] == '@')) {
+ for (i = 0; i < VM_PHYSSEG_MAX; i++) {
+ if (fdt_get_reg(node, i, ®))
+ break;
+ if (reg.size == 0)
+ continue;
+ physmem += atop(reg.size);
+ }
+ }
+
+ node = fdt_next_node(node);
+ }
+
kmeminit_nkmempages();
/*
-/* $OpenBSD: pmap.c,v 1.39 2024/03/25 23:10:03 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.40 2024/04/06 18:33:54 kettenis Exp $ */
/*
* Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
/* Fill kernel PTEs. */
kvp1 = pmap_kernel()->pm_vp.l1;
memcpy(&vp1->l1[L1_KERN_BASE], &kvp1->l1[L1_KERN_BASE],
- L1_KERN_ENTRIES * sizeof(pt_entry_t));
+ L1_KERN_ENTRIES * sizeof(pt_entry_t));
memcpy(&vp1->vp[L1_KERN_BASE], &kvp1->vp[L1_KERN_BASE],
- L1_KERN_ENTRIES * sizeof(struct pmapvp2 *));
-
- /* Fill DMAP PTEs. */
- memcpy(&vp1->l1[L1_DMAP_BASE], &kvp1->l1[L1_DMAP_BASE],
- L1_DMAP_ENTRIES * sizeof(pt_entry_t));
- memcpy(&vp1->vp[L1_DMAP_BASE], &kvp1->vp[L1_DMAP_BASE],
- L1_DMAP_ENTRIES * sizeof(struct pmapvp2 *));
+ L1_KERN_ENTRIES * sizeof(struct pmapvp2 *));
pmap_extract(pmap_kernel(), l1va, (paddr_t *)&l1pa);
pm->pm_satp |= SATP_FORMAT_PPN(PPN(l1pa));
vaddr_t
pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t kernelstart, vaddr_t kernelend,
- paddr_t memstart, paddr_t memend, paddr_t ramstart, paddr_t ramend)
+ paddr_t memstart, paddr_t memend)
{
void *va;
paddr_t pa, pt1pa;
int lb_idx2, ub_idx2;
uint64_t marchid, mimpid;
uint32_t mvendorid;
- void *node;
mvendorid = sbi_get_mvendorid();
marchid = sbi_get_marchid();
pmap_io = PTE_THEAD_SO | PTE_THEAD_SH;
}
- node = fdt_find_node("/");
- if (fdt_is_compatible(node, "starfive,jh7100")) {
- pmap_cached_start = 0x0080000000ULL;
- pmap_cached_end = 0x087fffffffULL;
- pmap_uncached_start = 0x1000000000ULL;
- pmap_uncached_end = 0x17ffffffffULL;
- }
-
pmap_setup_avail(memstart, memend, kvo);
pmap_remove_avail(kernelstart + kvo, kernelend + kvo);
* via physical pointers
*/
- // Map the entire Physical Address Space to Direct Mapped Region
- pmap_bootstrap_dmap(l1pt, ramstart, ramend);
+ /* Map the initial 64MB block to the Direct Mapped Region. */
+ pmap_bootstrap_dmap(l1pt, memstart, memend);
pt1pa = pmap_steal_avail(2 * sizeof(struct pmapvp1), Lx_TABLE_ALIGN,
&va);
mappings_allocated++;
pa = pmap_steal_avail(sizeof(struct pmapvp2), Lx_TABLE_ALIGN,
&va);
- vp2 = (struct pmapvp2 *) PHYS_TO_DMAP(pa);
+ vp2 = (struct pmapvp2 *)PHYS_TO_DMAP(pa);
vp1->vp[i] = va;
vp1->l1[i] = VP_Lx(pa);
mappings_allocated++;
pa = pmap_steal_avail(sizeof(struct pmapvp3),
Lx_TABLE_ALIGN, &va);
- vp3 = (struct pmapvp3 *) PHYS_TO_DMAP(pa);
+ vp3 = (struct pmapvp3 *)PHYS_TO_DMAP(pa);
vp2->vp[j] = va;
vp2->l2[j] = VP_Lx(pa);
}
for (i = VP_IDX1(VM_MIN_KERNEL_ADDRESS);
i <= VP_IDX1(pmap_maxkvaddr - 1);
i++) {
- vp2 = (void *) PHYS_TO_DMAP((long)vp1->vp[i] + kvo);
+ vp2 = (void *)PHYS_TO_DMAP((long)vp1->vp[i] + kvo);
if (i == VP_IDX1(VM_MIN_KERNEL_ADDRESS)) {
lb_idx2 = VP_IDX2(VM_MIN_KERNEL_ADDRESS);
ub_idx2 = VP_IDX2_CNT - 1;
}
for (j = lb_idx2; j <= ub_idx2; j++) {
- vp3 = (void *) PHYS_TO_DMAP((long)vp2->vp[j] + kvo);
+ vp3 = (void *)PHYS_TO_DMAP((long)vp2->vp[j] + kvo);
for (k = 0; k <= VP_IDX3_CNT - 1; k++) {
pted_allocated++;
}
}
- /* now that we have mapping-space for everything, lets map it */
- /* all of these mappings are ram -> kernel va */
-
-#if 0 // XXX This block does not appear to do anything useful?
- /*
- * enable mappings for existing 'allocated' mapping in the bootstrap
- * page tables
- */
- extern pt_entry_t *pagetable_l2;
- extern char _end[];
- vp2 = (void *) PHYS_TO_DMAP((long)&pagetable_l2 + kvo);
- struct mem_region *mp;
- ssize_t size;
- for (mp = pmap_allocated; mp->size != 0; mp++) {
- /* bounds may be kinda messed up */
- for (pa = mp->start, size = mp->size & ~(PAGE_SIZE-1);
- size > 0;
- pa+= L2_SIZE, size -= L2_SIZE)
- {
- paddr_t mappa = pa & ~(L2_SIZE-1);
- vaddr_t mapva = mappa - kvo;
- int prot = PROT_READ | PROT_WRITE;
-
- if (mapva < (vaddr_t)_end)
- continue;
-
- if (mapva >= (vaddr_t)__text_start &&
- mapva < (vaddr_t)_etext)
- prot = PROT_READ | PROT_EXEC;
- else if (mapva >= (vaddr_t)__rodata_start &&
- mapva < (vaddr_t)_erodata)
- prot = PROT_READ;
-
- // XXX What does ATTR_nG in arm64 mean?
- vp2->l2[VP_IDX2(mapva)] = VP_Lx(mappa) |
- ap_bits_kern[prot];
- }
- }
-#endif
-
pmap_avail_fixup();
/*
*/
vstart = pmap_map_stolen(kernelstart);
- // Include the Direct Map in Kernel PMAP
- // as gigapages, only populated the pmapvp1->l1 field,
- // pmap->va field is not used
- pmap_bootstrap_dmap((vaddr_t) pmap_kernel()->pm_vp.l1, ramstart, ramend);
+ /*
+ * Temporarily add the Direct Map Area into the kernel pmap
+ * such that we can continue to access stolen memory by
+ * physical address.
+ */
+ pmap_bootstrap_dmap((vaddr_t)pmap_kernel()->pm_vp.l1, memstart, memend);
- //switching to new page table
+ /* Switch to the new page tables. */
uint64_t satp = pmap_kernel()->pm_satp;
__asm volatile("csrw satp, %0" :: "r" (satp) : "memory");
sfence_vma();
- printf("all mapped\n");
-
curcpu()->ci_curpm = pmap_kernel();
vmmap = vstart;
void
pmap_init(void)
{
+ struct pmapvp1 *kvp1;
+ void *node;
+
+ node = fdt_find_node("/");
+ if (fdt_is_compatible(node, "starfive,jh7100")) {
+ pmap_cached_start = 0x0080000000ULL;
+ pmap_cached_end = 0x087fffffffULL;
+ pmap_uncached_start = 0x1000000000ULL;
+ pmap_uncached_end = 0x17ffffffffULL;
+ }
+
+ /* Clear DMAP PTEs. */
+ kvp1 = pmap_kernel()->pm_vp.l1;
+ memset(&kvp1->l1[L1_DMAP_BASE], 0,
+ L1_DMAP_ENTRIES * sizeof(pt_entry_t));
+ memset(&kvp1->vp[L1_DMAP_BASE], 0,
+ L1_DMAP_ENTRIES * sizeof(struct pmapvp2 *));
+ sfence_vma();
+
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, IPL_NONE, 0,
"pmap", NULL);
pool_setlowat(&pmap_pmap_pool, 2);