/*
* Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ * Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
extern vaddr_t virtual_avail;
extern uint64_t esym;
-char *boot_args = NULL;
+extern char _start[];
+char *boot_args = NULL;
uint8_t *bootmac = NULL;
int stdout_node;
struct user *proc0paddr;
struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 };
-struct uvm_constraint_range *uvm_md_constraints[] = { NULL };
+struct uvm_constraint_range *uvm_md_constraints[] = {
+ &dma_constraint,
+ NULL,
+};
/* the following is used externally (sysctl_hw) */
char machine[] = MACHINE; /* from <machine/param.h> */
struct cpu_info cpu_info_primary;
struct cpu_info *cpu_info[MAXCPUS] = { &cpu_info_primary };
+struct fdt_reg memreg[VM_PHYSSEG_MAX];
+int nmemreg;
+
+void memreg_add(const struct fdt_reg *);
+void memreg_remove(const struct fdt_reg *);
+
static int
atoi(const char *s)
{
uint32_t mmap_desc_size;
uint32_t mmap_desc_ver;
+EFI_MEMORY_DESCRIPTOR *mmap;
+
void collect_kernel_args(const char *);
void process_kernel_args(void);
+int pmap_bootstrap_bs_map(bus_space_tag_t, bus_addr_t,
+ bus_size_t, int, bus_space_handle_t *);
+
void
initriscv(struct riscv_bootparams *rbp)
{
- vaddr_t vstart, vend;
- long kvo = rbp->kern_delta; //should be PA - VA
+ long kernbase = (long)_start & ~PAGE_MASK;
+ long kvo = rbp->kern_delta;
paddr_t memstart, memend;
-
- void *config = (void *) rbp->dtbp_virt;
+ vaddr_t vstart;
+ void *config = (void *)rbp->dtbp_virt;
void *fdt = NULL;
-
+ paddr_t fdt_start = (paddr_t)rbp->dtbp_phys;
+ size_t fdt_size;
+ EFI_PHYSICAL_ADDRESS system_table = 0;
int (*map_func_save)(bus_space_tag_t, bus_addr_t, bus_size_t, int,
bus_space_handle_t *);
+ paddr_t ramstart, ramend;
+ paddr_t start, end;
+ int i;
/* Set the per-CPU pointer. */
__asm volatile("mv tp, %0" :: "r"(&cpu_info_primary));
// Initialize the Flattened Device Tree
if (!fdt_init(config) || fdt_get_size(config) == 0)
panic("initriscv: no FDT");
+ fdt_size = fdt_get_size(config);
- size_t fdt_size = fdt_get_size(config);
- paddr_t fdt_start = (paddr_t) rbp->dtbp_phys;
- paddr_t fdt_end = fdt_start + fdt_size;
struct fdt_reg reg;
void *node;
if (len > 0)
explicit_bzero(prop, len);
-#if 0 //CMPE: yet not using these properties
len = fdt_node_property(node, "openbsd,uefi-mmap-start", &prop);
if (len == sizeof(mmap_start))
mmap_start = bemtoh64((uint64_t *)prop);
len = fdt_node_property(node, "openbsd,uefi-system-table", &prop);
if (len == sizeof(system_table))
system_table = bemtoh64((uint64_t *)prop);
-#endif
+
+ len = fdt_node_property(node, "openbsd,dma-constraint", &prop);
+ if (len == sizeof(dma_constraint)) {
+ dma_constraint.ucr_low = bemtoh64((uint64_t *)prop);
+ dma_constraint.ucr_high = bemtoh64((uint64_t *)prop + 1);
+ }
}
sbi_init();
process_kernel_args();
- void _start(void);
- long kernbase = (long)&_start & ~(PAGE_SIZE-1); // page aligned
-
-#if 0 // Below we set memstart / memend based on entire physical address
- // range based on information sourced from FDT.
- /* The bootloader has loaded us into a 64MB block. */
- memstart = KERNBASE + kvo; //va + (pa - va) ==> pa
- memend = memstart + 64 * 1024 * 1024; //XXX CMPE: size also 64M??
-#endif
-
+ /*
+ * Determine physical RAM address range from FDT.
+ */
node = fdt_find_node("/memory");
if (node == NULL)
panic("%s: no memory specified", __func__);
-
- paddr_t start, end;
- int i;
-
- // Assume that the kernel was loaded at valid physical memory location
- // Scan the FDT to identify the full physical address range for machine
- // XXX Save physical memory segments to later allocate to UVM?
- memstart = memend = kernbase + kvo;
+ ramstart = (paddr_t)-1, ramend = 0;
for (i = 0; i < VM_PHYSSEG_MAX; i++) {
if (fdt_get_reg(node, i, ®))
break;
start = reg.addr;
end = reg.addr + reg.size;
- if (start < memstart)
- memstart = start;
- if (end > memend)
- memend = end;
+ if (start < ramstart)
+ ramstart = start;
+ if (end > ramend)
+ ramend = end;
+
+ physmem += atop(ramend - ramstart);
}
- // XXX At this point, OpenBSD/arm64 would have set memstart / memend
- // to the range mapped by the bootloader (KERNBASE - KERNBASE + 64MiB).
- // Instead, we have mapped memstart / memend to the full physical
- // address range. What implications might this have?
+ /* The bootloader has loaded us into a 64MB block. */
+ memstart = KERNBASE + kvo;
+ memend = memstart + 64 * 1024 * 1024;
+
+ /* XXX */
+ kernbase = KERNBASE;
/* Bootstrap enough of pmap to enter the kernel proper. */
vstart = pmap_bootstrap(kvo, rbp->kern_l1pt,
- kernbase, esym, fdt_start, fdt_end, memstart, memend);
+ kernbase, esym, memstart, memend, ramstart, ramend);
proc0paddr = (struct user *)rbp->kern_stack;
msgbufaddr = (caddr_t)vstart;
msgbufphys = pmap_steal_avail(round_page(MSGBUFSIZE), PAGE_SIZE, NULL);
- // XXX should map this msgbuffphys to kernel pmap??
-
vstart += round_page(MSGBUFSIZE);
zero_page = vstart;
vaddr_t va;
pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
- memcpy((void *) PHYS_TO_DMAP(pa),
- (void *) PHYS_TO_DMAP(fdt_start), size);
+ memcpy((void *)PHYS_TO_DMAP(pa),
+ (void *)PHYS_TO_DMAP(fdt_start), size);
for (va = vstart, csize = size; csize > 0;
csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
- pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
+ pmap_kenter_pa(va, pa, PROT_READ);
fdt = (void *)vstart;
vstart += size;
}
- /*
- * Managed KVM space is what we have claimed up to end of
- * mapped kernel buffers.
- */
- {
- // export back to pmap
- extern vaddr_t virtual_avail, virtual_end;
- virtual_avail = vstart;
- vend = VM_MAX_KERNEL_ADDRESS; // XXX
- virtual_end = vend;
+ /* Relocate the EFI memory map too. */
+ if (mmap_start != 0) {
+ uint32_t csize, size = round_page(mmap_size);
+ paddr_t pa;
+ vaddr_t va;
+
+ pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
+ memcpy((void *)PHYS_TO_DMAP(pa),
+ (void *)PHYS_TO_DMAP(mmap_start), size);
+ for (va = vstart, csize = size; csize > 0;
+ csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
+
+ mmap = (void *)vstart;
+ vstart += size;
}
+ /* No more KVA stealing after this point. */
+ virtual_avail = vstart;
+
/* Now we can reinit the FDT, using the virtual address. */
if (fdt)
fdt_init(fdt);
- int pmap_bootstrap_bs_map(bus_space_tag_t t, bus_addr_t bpa,
- bus_size_t size, int flags, bus_space_handle_t *bshp);
-
map_func_save = riscv64_bs_tag._space_map;
riscv64_bs_tag._space_map = pmap_bootstrap_bs_map;
consinit();
-#ifdef DEBUG_AUTOCONF
- fdt_print_tree();
-#endif
-
riscv64_bs_tag._space_map = map_func_save;
pmap_avail_fixup();
/* Make what's left of the initial 64MB block available to UVM. */
pmap_physload_avail();
-#if 0
/* Make all other physical memory available to UVM. */
if (mmap && mmap_desc_ver == EFI_MEMORY_DESCRIPTOR_VERSION) {
EFI_MEMORY_DESCRIPTOR *desc = mmap;
- int i;
/*
* Load all memory marked as EfiConventionalMemory,
* EfiBootServicesCode or EfiBootServicesData.
* Don't bother with blocks smaller than 64KB. The
* initial 64MB memory block should be marked as
- * EfiLoaderData so it won't be added again here.
+ * EfiLoaderData so it won't be added here.
*/
for (i = 0; i < mmap_size / mmap_desc_size; i++) {
printf("type 0x%x pa 0x%llx va 0x%llx pages 0x%llx attr 0x%llx\n",
desc->Type == EfiBootServicesCode ||
desc->Type == EfiBootServicesData) &&
desc->NumberOfPages >= 16) {
- uvm_page_physload(atop(desc->PhysicalStart),
- atop(desc->PhysicalStart) +
- desc->NumberOfPages,
- atop(desc->PhysicalStart),
- atop(desc->PhysicalStart) +
- desc->NumberOfPages, 0);
- physmem += desc->NumberOfPages;
+ reg.addr = desc->PhysicalStart;
+ reg.size = ptoa(desc->NumberOfPages);
+ memreg_add(®);
}
desc = NextMemoryDescriptor(desc, mmap_desc_size);
}
} else {
- paddr_t start, end;
- int i;
-
node = fdt_find_node("/memory");
if (node == NULL)
panic("%s: no memory specified", __func__);
- for (i = 0; i < VM_PHYSSEG_MAX; i++) {
+ for (i = 0; nmemreg < nitems(memreg); i++) {
if (fdt_get_reg(node, i, ®))
break;
if (reg.size == 0)
continue;
+ memreg_add(®);
+ }
+ }
- start = reg.addr;
- end = MIN(reg.addr + reg.size, (paddr_t)-PAGE_SIZE);
-
- /*
- * The initial 64MB block is not excluded, so we need
- * to make sure we don't add it here.
- */
- if (start < memend && end > memstart) {
- if (start < memstart) {
- uvm_page_physload(atop(start),
- atop(memstart), atop(start),
- atop(memstart), 0);
- physmem += atop(memstart - start);
- }
- if (end > memend) {
- uvm_page_physload(atop(memend),
- atop(end), atop(memend),
- atop(end), 0);
- physmem += atop(end - memend);
- }
- } else {
- uvm_page_physload(atop(start), atop(end),
- atop(start), atop(end), 0);
- physmem += atop(end - start);
- }
+ /* Remove reserved memory. */
+ node = fdt_find_node("/reserved-memory");
+ if (node) {
+ for (node = fdt_child_node(node); node;
+ node = fdt_next_node(node)) {
+ if (fdt_get_reg(node, 0, ®))
+ continue;
+ if (reg.size == 0)
+ continue;
+ memreg_remove(®);
}
}
-#endif
+
+ /* Remove the initial 64MB block. */
+ reg.addr = memstart;
+ reg.size = memend - memstart;
+ memreg_remove(®);
+
+ for (i = 0; i < nmemreg; i++) {
+ paddr_t start = memreg[i].addr;
+ paddr_t end = start + memreg[i].size;
+
+ uvm_page_physload(atop(start), atop(end),
+ atop(start), atop(end), 0);
+ }
+
/*
* Make sure that we have enough KVA to initialize UVM. In
* particular, we need enough KVA to be able to allocate the
*bshp = (bus_space_handle_t)(va + (bpa - startpa));
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
- pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE,
- PMAP_CACHE_DEV);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
virtual_avail = va;
return 0;
}
+
+void
+memreg_add(const struct fdt_reg *reg)
+{
+ if (nmemreg >= nitems(memreg))
+ return;
+
+ memreg[nmemreg++] = *reg;
+}
+
+void
+memreg_remove(const struct fdt_reg *reg)
+{
+ uint64_t start = reg->addr;
+ uint64_t end = reg->addr + reg->size;
+ int i, j;
+
+ for (i = 0; i < nmemreg; i++) {
+ uint64_t memstart = memreg[i].addr;
+ uint64_t memend = memreg[i].addr + memreg[i].size;
+
+ if (end <= memstart)
+ continue;
+ if (start >= memend)
+ continue;
+
+ if (start <= memstart)
+ memstart = MIN(end, memend);
+ if (end >= memend)
+ memend = MAX(start, memstart);
+
+ if (start > memstart && end < memend) {
+ if (nmemreg < nitems(memreg)) {
+ memreg[nmemreg].addr = end;
+ memreg[nmemreg].size = memend - end;
+ nmemreg++;
+ }
+ memend = start;
+ }
+ memreg[i].addr = memstart;
+ memreg[i].size = memend - memstart;
+ }
+
+ /* Remove empty slots. */
+ for (i = nmemreg - 1; i >= 0; i--) {
+ if (memreg[i].size == 0) {
+ for (j = i; (j + 1) < nmemreg; j++)
+ memreg[j] = memreg[j + 1];
+ nmemreg--;
+ }
+ }
+}
return;
}
-vaddr_t virtual_avail, virtual_end;
+vaddr_t virtual_avail;
int pmap_virtual_space_called;
static inline pt_entry_t
return pmap_maxkvaddr;
}
-void pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo);
+void pmap_setup_avail(uint64_t memstart, uint64_t memend, uint64_t kvo);
/*
* Initialize pmap setup.
paddr_t dmap_phys_max;
vaddr_t dmap_virt_max;
-static void
+void
pmap_bootstrap_dmap(vaddr_t kern_l1, paddr_t min_pa, paddr_t max_pa)
{
vaddr_t va;
vaddr_t
pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t kernelstart, vaddr_t kernelend,
- paddr_t fdt_start, paddr_t fdt_end, paddr_t ram_start, paddr_t ram_end)
+ paddr_t memstart, paddr_t memend, paddr_t ramstart, paddr_t ramend)
{
void *va;
paddr_t pa, pt1pa;
int i, j, k;
int lb_idx2, ub_idx2;
- pmap_setup_avail(ram_start, ram_end, kvo);
-
- /*
- * in theory we could start with just the memory in the
- * kernel, however this could 'allocate' the bootloader and
- * bootstrap vm table, which we may need to preserve until
- * later.
- */
- printf("removing %lx-%lx\n", ram_start, kernelstart+kvo);
- pmap_remove_avail(ram_start, kernelstart+kvo);
-
- printf("removing %lx-%lx\n", kernelstart+kvo, kernelend+kvo);
- pmap_remove_avail(kernelstart+kvo, kernelend+kvo);
-
- // Remove the FDT physical address range as well
- printf("removing %lx-%lx\n", fdt_start+kvo, fdt_end+kvo);
- pmap_remove_avail(fdt_start, fdt_end);
+ pmap_setup_avail(memstart, memend, kvo);
+ pmap_remove_avail(kernelstart + kvo, kernelend + kvo);
/*
* KERNEL IS ASSUMED TO BE 39 bits (or less), start from L1,
*/
// Map the entire Physical Address Space to Direct Mapped Region
- pmap_bootstrap_dmap(l1pt, ram_start, ram_end);
+ pmap_bootstrap_dmap(l1pt, ramstart, ramend);
pt1pa = pmap_steal_avail(2 * sizeof(struct pmapvp1), Lx_TABLE_ALIGN,
&va);
// Include the Direct Map in Kernel PMAP
// as gigapages, only populated the pmapvp1->l1 field,
// pmap->va field is not used
- pmap_bootstrap_dmap((vaddr_t) pmap_kernel()->pm_vp.l1, ram_start, ram_end);
+ pmap_bootstrap_dmap((vaddr_t) pmap_kernel()->pm_vp.l1, ramstart, ramend);
//switching to new page table
uint64_t satp = pmap_kernel()->pm_satp;
pmap_virtual_space(vaddr_t *start, vaddr_t *end)
{
*start = virtual_avail;
- *end = virtual_end;
+ *end = VM_MAX_KERNEL_ADDRESS;
/* Prevent further KVA stealing. */
pmap_virtual_space_called = 1;
}
void
-pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo)
+pmap_setup_avail(uint64_t memstart, uint64_t memend, uint64_t kvo)
{
/* This makes several assumptions
* 1) kernel will be located 'low' in memory
*/
pmap_avail_kvo = kvo;
- pmap_avail[0].start = ram_start;
- pmap_avail[0].size = ram_end-ram_start;
-
- /* XXX - multiple sections */
- physmem = atop(pmap_avail[0].size);
-
+ pmap_avail[0].start = memstart;
+ pmap_avail[0].size = memend - memstart;
pmap_cnt_avail = 1;
pmap_avail_fixup();