From: kettenis Date: Mon, 3 May 2021 11:35:29 +0000 (+0000) Subject: Use the EFI memory map (if available) to determine available physical X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=06b6ba58f7173c22307394c527598dc5c061897d;p=openbsd Use the EFI memory map (if available) to determine available physical memory. This brings the code closer to arm64 but some key differences remain. The most notable difference is that the riscv64 currently uses its own private direct map of physical memory. Therefore it needs to know the RAM address range which we derive from the /memory node in the FDT. The code also needs to work around some bugs/flaws in the firmware: * Newer OpenSBI versions no longer add a "no-map" property to the reserved memory block that covers the memory used by OpenSBI itself. This makes it appear as EfiBootServicesData in the EFI memory map, which means it is available for general use. * The OpenSBI shipped with the beaglev prototype boards doesn't reserve the memory used by OpenBSI at all. The workaround for the first issue is to remove all reserved memory blocks specified in the FDT. In its current implementation this may remove too much memory on certain boards. The workaround for the second issue is to remove 2MB before the memory where the kernel lives. This workaround is fragile since it relies on a specific memory layout. Hopefully the beaglev firmware gets fixed and we can remove this hack. ok jsg@ --- diff --git a/sys/arch/riscv64/include/pmap.h b/sys/arch/riscv64/include/pmap.h index 5a7f106fbd7..6208f387203 100644 --- a/sys/arch/riscv64/include/pmap.h +++ b/sys/arch/riscv64/include/pmap.h @@ -93,8 +93,8 @@ extern struct pmap kernel_pmap_; vaddr_t pmap_bootstrap(long kvo, paddr_t lpt1, vaddr_t kernelstart, vaddr_t kernelend, - paddr_t fdt_start, paddr_t fdt_end, - paddr_t ram_start, paddr_t ram_end); + paddr_t memstart, paddr_t memend, + paddr_t ramstart, paddr_t ramend); void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable); void pmap_page_ro(pmap_t pm, vaddr_t va, vm_prot_t prot); diff --git a/sys/arch/riscv64/riscv64/machdep.c b/sys/arch/riscv64/riscv64/machdep.c index 24f3e030a19..f33a3526948 100644 --- a/sys/arch/riscv64/riscv64/machdep.c +++ b/sys/arch/riscv64/riscv64/machdep.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014 Patrick Wildt + * Copyright (c) 2021 Mark Kettenis * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -58,8 +59,9 @@ extern vaddr_t virtual_avail; extern uint64_t esym; -char *boot_args = NULL; +extern char _start[]; +char *boot_args = NULL; uint8_t *bootmac = NULL; int stdout_node; @@ -81,7 +83,10 @@ paddr_t msgbufphys; struct user *proc0paddr; struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 }; -struct uvm_constraint_range *uvm_md_constraints[] = { NULL }; +struct uvm_constraint_range *uvm_md_constraints[] = { + &dma_constraint, + NULL, +}; /* the following is used externally (sysctl_hw) */ char machine[] = MACHINE; /* from */ @@ -92,6 +97,12 @@ uint32_t boot_hart; /* The hart we booted on. */ struct cpu_info cpu_info_primary; struct cpu_info *cpu_info[MAXCPUS] = { &cpu_info_primary }; +struct fdt_reg memreg[VM_PHYSSEG_MAX]; +int nmemreg; + +void memreg_add(const struct fdt_reg *); +void memreg_remove(const struct fdt_reg *); + static int atoi(const char *s) { @@ -508,21 +519,31 @@ uint32_t mmap_size; uint32_t mmap_desc_size; uint32_t mmap_desc_ver; +EFI_MEMORY_DESCRIPTOR *mmap; + void collect_kernel_args(const char *); void process_kernel_args(void); +int pmap_bootstrap_bs_map(bus_space_tag_t, bus_addr_t, + bus_size_t, int, bus_space_handle_t *); + void initriscv(struct riscv_bootparams *rbp) { - vaddr_t vstart, vend; - long kvo = rbp->kern_delta; //should be PA - VA + long kernbase = (long)_start & ~PAGE_MASK; + long kvo = rbp->kern_delta; paddr_t memstart, memend; - - void *config = (void *) rbp->dtbp_virt; + vaddr_t vstart; + void *config = (void *)rbp->dtbp_virt; void *fdt = NULL; - + paddr_t fdt_start = (paddr_t)rbp->dtbp_phys; + size_t fdt_size; + EFI_PHYSICAL_ADDRESS system_table = 0; int (*map_func_save)(bus_space_tag_t, bus_addr_t, bus_size_t, int, bus_space_handle_t *); + paddr_t ramstart, ramend; + paddr_t start, end; + int i; /* Set the per-CPU pointer. */ __asm volatile("mv tp, %0" :: "r"(&cpu_info_primary)); @@ -535,10 +556,8 @@ initriscv(struct riscv_bootparams *rbp) // Initialize the Flattened Device Tree if (!fdt_init(config) || fdt_get_size(config) == 0) panic("initriscv: no FDT"); + fdt_size = fdt_get_size(config); - size_t fdt_size = fdt_get_size(config); - paddr_t fdt_start = (paddr_t) rbp->dtbp_phys; - paddr_t fdt_end = fdt_start + fdt_size; struct fdt_reg reg; void *node; @@ -582,7 +601,6 @@ initriscv(struct riscv_bootparams *rbp) if (len > 0) explicit_bzero(prop, len); -#if 0 //CMPE: yet not using these properties len = fdt_node_property(node, "openbsd,uefi-mmap-start", &prop); if (len == sizeof(mmap_start)) mmap_start = bemtoh64((uint64_t *)prop); @@ -599,7 +617,12 @@ initriscv(struct riscv_bootparams *rbp) len = fdt_node_property(node, "openbsd,uefi-system-table", &prop); if (len == sizeof(system_table)) system_table = bemtoh64((uint64_t *)prop); -#endif + + len = fdt_node_property(node, "openbsd,dma-constraint", &prop); + if (len == sizeof(dma_constraint)) { + dma_constraint.ucr_low = bemtoh64((uint64_t *)prop); + dma_constraint.ucr_high = bemtoh64((uint64_t *)prop + 1); + } } sbi_init(); @@ -607,27 +630,13 @@ initriscv(struct riscv_bootparams *rbp) process_kernel_args(); - void _start(void); - long kernbase = (long)&_start & ~(PAGE_SIZE-1); // page aligned - -#if 0 // Below we set memstart / memend based on entire physical address - // range based on information sourced from FDT. - /* The bootloader has loaded us into a 64MB block. */ - memstart = KERNBASE + kvo; //va + (pa - va) ==> pa - memend = memstart + 64 * 1024 * 1024; //XXX CMPE: size also 64M?? -#endif - + /* + * Determine physical RAM address range from FDT. + */ node = fdt_find_node("/memory"); if (node == NULL) panic("%s: no memory specified", __func__); - - paddr_t start, end; - int i; - - // Assume that the kernel was loaded at valid physical memory location - // Scan the FDT to identify the full physical address range for machine - // XXX Save physical memory segments to later allocate to UVM? - memstart = memend = kernbase + kvo; + ramstart = (paddr_t)-1, ramend = 0; for (i = 0; i < VM_PHYSSEG_MAX; i++) { if (fdt_get_reg(node, i, ®)) break; @@ -637,27 +646,29 @@ initriscv(struct riscv_bootparams *rbp) start = reg.addr; end = reg.addr + reg.size; - if (start < memstart) - memstart = start; - if (end > memend) - memend = end; + if (start < ramstart) + ramstart = start; + if (end > ramend) + ramend = end; + + physmem += atop(ramend - ramstart); } - // XXX At this point, OpenBSD/arm64 would have set memstart / memend - // to the range mapped by the bootloader (KERNBASE - KERNBASE + 64MiB). - // Instead, we have mapped memstart / memend to the full physical - // address range. What implications might this have? + /* The bootloader has loaded us into a 64MB block. */ + memstart = KERNBASE + kvo; + memend = memstart + 64 * 1024 * 1024; + + /* XXX */ + kernbase = KERNBASE; /* Bootstrap enough of pmap to enter the kernel proper. */ vstart = pmap_bootstrap(kvo, rbp->kern_l1pt, - kernbase, esym, fdt_start, fdt_end, memstart, memend); + kernbase, esym, memstart, memend, ramstart, ramend); proc0paddr = (struct user *)rbp->kern_stack; msgbufaddr = (caddr_t)vstart; msgbufphys = pmap_steal_avail(round_page(MSGBUFSIZE), PAGE_SIZE, NULL); - // XXX should map this msgbuffphys to kernel pmap?? - vstart += round_page(MSGBUFSIZE); zero_page = vstart; @@ -674,44 +685,45 @@ initriscv(struct riscv_bootparams *rbp) vaddr_t va; pa = pmap_steal_avail(size, PAGE_SIZE, NULL); - memcpy((void *) PHYS_TO_DMAP(pa), - (void *) PHYS_TO_DMAP(fdt_start), size); + memcpy((void *)PHYS_TO_DMAP(pa), + (void *)PHYS_TO_DMAP(fdt_start), size); for (va = vstart, csize = size; csize > 0; csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE) - pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB); + pmap_kenter_pa(va, pa, PROT_READ); fdt = (void *)vstart; vstart += size; } - /* - * Managed KVM space is what we have claimed up to end of - * mapped kernel buffers. - */ - { - // export back to pmap - extern vaddr_t virtual_avail, virtual_end; - virtual_avail = vstart; - vend = VM_MAX_KERNEL_ADDRESS; // XXX - virtual_end = vend; + /* Relocate the EFI memory map too. */ + if (mmap_start != 0) { + uint32_t csize, size = round_page(mmap_size); + paddr_t pa; + vaddr_t va; + + pa = pmap_steal_avail(size, PAGE_SIZE, NULL); + memcpy((void *)PHYS_TO_DMAP(pa), + (void *)PHYS_TO_DMAP(mmap_start), size); + for (va = vstart, csize = size; csize > 0; + csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE) + pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE); + + mmap = (void *)vstart; + vstart += size; } + /* No more KVA stealing after this point. */ + virtual_avail = vstart; + /* Now we can reinit the FDT, using the virtual address. */ if (fdt) fdt_init(fdt); - int pmap_bootstrap_bs_map(bus_space_tag_t t, bus_addr_t bpa, - bus_size_t size, int flags, bus_space_handle_t *bshp); - map_func_save = riscv64_bs_tag._space_map; riscv64_bs_tag._space_map = pmap_bootstrap_bs_map; consinit(); -#ifdef DEBUG_AUTOCONF - fdt_print_tree(); -#endif - riscv64_bs_tag._space_map = map_func_save; pmap_avail_fixup(); @@ -722,18 +734,16 @@ initriscv(struct riscv_bootparams *rbp) /* Make what's left of the initial 64MB block available to UVM. */ pmap_physload_avail(); -#if 0 /* Make all other physical memory available to UVM. */ if (mmap && mmap_desc_ver == EFI_MEMORY_DESCRIPTOR_VERSION) { EFI_MEMORY_DESCRIPTOR *desc = mmap; - int i; /* * Load all memory marked as EfiConventionalMemory, * EfiBootServicesCode or EfiBootServicesData. * Don't bother with blocks smaller than 64KB. The * initial 64MB memory block should be marked as - * EfiLoaderData so it won't be added again here. + * EfiLoaderData so it won't be added here. */ for (i = 0; i < mmap_size / mmap_desc_size; i++) { printf("type 0x%x pa 0x%llx va 0x%llx pages 0x%llx attr 0x%llx\n", @@ -744,58 +754,52 @@ initriscv(struct riscv_bootparams *rbp) desc->Type == EfiBootServicesCode || desc->Type == EfiBootServicesData) && desc->NumberOfPages >= 16) { - uvm_page_physload(atop(desc->PhysicalStart), - atop(desc->PhysicalStart) + - desc->NumberOfPages, - atop(desc->PhysicalStart), - atop(desc->PhysicalStart) + - desc->NumberOfPages, 0); - physmem += desc->NumberOfPages; + reg.addr = desc->PhysicalStart; + reg.size = ptoa(desc->NumberOfPages); + memreg_add(®); } desc = NextMemoryDescriptor(desc, mmap_desc_size); } } else { - paddr_t start, end; - int i; - node = fdt_find_node("/memory"); if (node == NULL) panic("%s: no memory specified", __func__); - for (i = 0; i < VM_PHYSSEG_MAX; i++) { + for (i = 0; nmemreg < nitems(memreg); i++) { if (fdt_get_reg(node, i, ®)) break; if (reg.size == 0) continue; + memreg_add(®); + } + } - start = reg.addr; - end = MIN(reg.addr + reg.size, (paddr_t)-PAGE_SIZE); - - /* - * The initial 64MB block is not excluded, so we need - * to make sure we don't add it here. - */ - if (start < memend && end > memstart) { - if (start < memstart) { - uvm_page_physload(atop(start), - atop(memstart), atop(start), - atop(memstart), 0); - physmem += atop(memstart - start); - } - if (end > memend) { - uvm_page_physload(atop(memend), - atop(end), atop(memend), - atop(end), 0); - physmem += atop(end - memend); - } - } else { - uvm_page_physload(atop(start), atop(end), - atop(start), atop(end), 0); - physmem += atop(end - start); - } + /* Remove reserved memory. */ + node = fdt_find_node("/reserved-memory"); + if (node) { + for (node = fdt_child_node(node); node; + node = fdt_next_node(node)) { + if (fdt_get_reg(node, 0, ®)) + continue; + if (reg.size == 0) + continue; + memreg_remove(®); } } -#endif + + /* Remove the initial 64MB block. */ + reg.addr = memstart; + reg.size = memend - memstart; + memreg_remove(®); + + for (i = 0; i < nmemreg; i++) { + paddr_t start = memreg[i].addr; + paddr_t end = start + memreg[i].size; + + uvm_page_physload(atop(start), atop(end), + atop(start), atop(end), 0); + } + /* * Make sure that we have enough KVA to initialize UVM. In * particular, we need enough KVA to be able to allocate the @@ -894,10 +898,61 @@ pmap_bootstrap_bs_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, *bshp = (bus_space_handle_t)(va + (bpa - startpa)); for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) - pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, - PMAP_CACHE_DEV); + pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE); virtual_avail = va; return 0; } + +void +memreg_add(const struct fdt_reg *reg) +{ + if (nmemreg >= nitems(memreg)) + return; + + memreg[nmemreg++] = *reg; +} + +void +memreg_remove(const struct fdt_reg *reg) +{ + uint64_t start = reg->addr; + uint64_t end = reg->addr + reg->size; + int i, j; + + for (i = 0; i < nmemreg; i++) { + uint64_t memstart = memreg[i].addr; + uint64_t memend = memreg[i].addr + memreg[i].size; + + if (end <= memstart) + continue; + if (start >= memend) + continue; + + if (start <= memstart) + memstart = MIN(end, memend); + if (end >= memend) + memend = MAX(start, memstart); + + if (start > memstart && end < memend) { + if (nmemreg < nitems(memreg)) { + memreg[nmemreg].addr = end; + memreg[nmemreg].size = memend - end; + nmemreg++; + } + memend = start; + } + memreg[i].addr = memstart; + memreg[i].size = memend - memstart; + } + + /* Remove empty slots. */ + for (i = nmemreg - 1; i >= 0; i--) { + if (memreg[i].size == 0) { + for (j = i; (j + 1) < nmemreg; j++) + memreg[j] = memreg[j + 1]; + nmemreg--; + } + } +} diff --git a/sys/arch/riscv64/riscv64/pmap.c b/sys/arch/riscv64/riscv64/pmap.c index 1b1ed7b41a2..2d462025f78 100644 --- a/sys/arch/riscv64/riscv64/pmap.c +++ b/sys/arch/riscv64/riscv64/pmap.c @@ -936,7 +936,7 @@ pmap_vp_destroy(pmap_t pm) return; } -vaddr_t virtual_avail, virtual_end; +vaddr_t virtual_avail; int pmap_virtual_space_called; static inline pt_entry_t @@ -1112,7 +1112,7 @@ fail: return pmap_maxkvaddr; } -void pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo); +void pmap_setup_avail(uint64_t memstart, uint64_t memend, uint64_t kvo); /* * Initialize pmap setup. @@ -1131,7 +1131,7 @@ paddr_t dmap_phys_base; paddr_t dmap_phys_max; vaddr_t dmap_virt_max; -static void +void pmap_bootstrap_dmap(vaddr_t kern_l1, paddr_t min_pa, paddr_t max_pa) { vaddr_t va; @@ -1166,7 +1166,7 @@ pmap_bootstrap_dmap(vaddr_t kern_l1, paddr_t min_pa, paddr_t max_pa) vaddr_t pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t kernelstart, vaddr_t kernelend, - paddr_t fdt_start, paddr_t fdt_end, paddr_t ram_start, paddr_t ram_end) + paddr_t memstart, paddr_t memend, paddr_t ramstart, paddr_t ramend) { void *va; paddr_t pa, pt1pa; @@ -1178,23 +1178,8 @@ pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t kernelstart, vaddr_t kernelend, int i, j, k; int lb_idx2, ub_idx2; - pmap_setup_avail(ram_start, ram_end, kvo); - - /* - * in theory we could start with just the memory in the - * kernel, however this could 'allocate' the bootloader and - * bootstrap vm table, which we may need to preserve until - * later. - */ - printf("removing %lx-%lx\n", ram_start, kernelstart+kvo); - pmap_remove_avail(ram_start, kernelstart+kvo); - - printf("removing %lx-%lx\n", kernelstart+kvo, kernelend+kvo); - pmap_remove_avail(kernelstart+kvo, kernelend+kvo); - - // Remove the FDT physical address range as well - printf("removing %lx-%lx\n", fdt_start+kvo, fdt_end+kvo); - pmap_remove_avail(fdt_start, fdt_end); + pmap_setup_avail(memstart, memend, kvo); + pmap_remove_avail(kernelstart + kvo, kernelend + kvo); /* * KERNEL IS ASSUMED TO BE 39 bits (or less), start from L1, @@ -1204,7 +1189,7 @@ pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t kernelstart, vaddr_t kernelend, */ // Map the entire Physical Address Space to Direct Mapped Region - pmap_bootstrap_dmap(l1pt, ram_start, ram_end); + pmap_bootstrap_dmap(l1pt, ramstart, ramend); pt1pa = pmap_steal_avail(2 * sizeof(struct pmapvp1), Lx_TABLE_ALIGN, &va); @@ -1333,7 +1318,7 @@ pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t kernelstart, vaddr_t kernelend, // Include the Direct Map in Kernel PMAP // as gigapages, only populated the pmapvp1->l1 field, // pmap->va field is not used - pmap_bootstrap_dmap((vaddr_t) pmap_kernel()->pm_vp.l1, ram_start, ram_end); + pmap_bootstrap_dmap((vaddr_t) pmap_kernel()->pm_vp.l1, ramstart, ramend); //switching to new page table uint64_t satp = pmap_kernel()->pm_satp; @@ -1964,14 +1949,14 @@ void pmap_virtual_space(vaddr_t *start, vaddr_t *end) { *start = virtual_avail; - *end = virtual_end; + *end = VM_MAX_KERNEL_ADDRESS; /* Prevent further KVA stealing. */ pmap_virtual_space_called = 1; } void -pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo) +pmap_setup_avail(uint64_t memstart, uint64_t memend, uint64_t kvo) { /* This makes several assumptions * 1) kernel will be located 'low' in memory @@ -1986,12 +1971,8 @@ pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo) */ pmap_avail_kvo = kvo; - pmap_avail[0].start = ram_start; - pmap_avail[0].size = ram_end-ram_start; - - /* XXX - multiple sections */ - physmem = atop(pmap_avail[0].size); - + pmap_avail[0].start = memstart; + pmap_avail[0].size = memend - memstart; pmap_cnt_avail = 1; pmap_avail_fixup();