From a6c8f9465544d24334598cf827c022c26c07ce42 Mon Sep 17 00:00:00 2001 From: kettenis Date: Fri, 9 Dec 2022 22:31:31 +0000 Subject: [PATCH] Simplify early kernel bootstrap a bit more. Just map the entire 64MB memory block that the bootloader allocates for us (minus the first 2MB). This means we can get rid of a bunch of code in pmap_bootstrap(). It also makes sure that we don't accidentally enter mappings that cover secure memory just beyond where the kernel was loaded. ok patrick@ --- sys/arch/arm64/arm64/locore0.S | 47 ++++++++++++---------------------- sys/arch/arm64/arm64/pmap.c | 40 +---------------------------- 2 files changed, 17 insertions(+), 70 deletions(-) diff --git a/sys/arch/arm64/arm64/locore0.S b/sys/arch/arm64/arm64/locore0.S index 9017bff3ce7..60ef7b280a5 100644 --- a/sys/arch/arm64/arm64/locore0.S +++ b/sys/arch/arm64/arm64/locore0.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore0.S,v 1.8 2022/12/08 01:25:44 guenther Exp $ */ +/* $OpenBSD: locore0.S,v 1.9 2022/12/09 22:31:31 kettenis Exp $ */ /*- * Copyright (c) 2012-2014 Andrew Turner * All rights reserved. @@ -168,26 +168,29 @@ virtdone: .quad virtdone .Lbss: .quad __bss_start -.Lstart: - .quad _start .Lend: .quad _end /* - * This builds the page tables containing the identity map, and the kernel - * virtual map. + * This builds the page tables containing the identity map, and the + * initial kernel virtual map. * * It relies on: * We were loaded to an address that is on a 2MiB boundary * All the memory must not cross a 1GiB boundary * x28 contains the physical address we were loaded from * - * There are 3 pages before that address for the page tables - * These pages are allocated aligned in .data - * The pages used are: - * - The identity (PA = VA) table (TTBR0) - * - The Kernel L1 table (TTBR1) - * - The PA == VA L2 table for kernel + * The page table for the identity map starts at L0 and maps the 1GB + * of memory that contains the memory block where the kernel was + * loaded by the bootloader. These are loaded into TTBR0. + * + * The initial kernel page table starts at L1 and maps the 64MB block + * that the kernel was initially loaded into by the bootloader using + * 2MB (L2) pages. The first 2MB of this 64MB block is unused and + * not mapped. These are loaded into TTBR1. + * + * The pages for the page tables are allocated aligned in .data + * (see locore.S). */ .Lpagetable: .xword pagetable @@ -227,34 +230,16 @@ create_pagetables: * Build the TTBR1 maps. */ - /* Find the size of the kernel */ - adr x6, .Lstart - ldr x6, [x6] - sub x6, x6, x29 - - /* End is the symbol address */ - adr x7, .Lesym - ldr x7, [x7] - sub x7, x7, x29 - ldr x7, [x7] - sub x7, x7, x29 - - /* Find the end - begin */ - sub x8, x7, x6 - /* Get the number of l2 pages to allocate, rounded down */ - lsr x10, x8, #(L2_SHIFT) - /* Add 4 MiB for any rounding above and the module data */ - add x10, x10, #2 - /* Create the kernel space L2 table */ mov x6, x26 // pagetable: mov x7, #NORMAL_MEM add x8, x28, x29 mov x9, x28 + mov x10, #31 // entries for 64MB - 2MB bl build_l2_block_pagetable /* Move to the l1 table */ - add x26, x26, #PAGE_SIZE*2 // pagetable_l1_ttbr1: + add x26, x26, #PAGE_SIZE * 2 // pagetable_l1_ttbr1: /* Link the l1 -> l2 table */ mov x9, x6 diff --git a/sys/arch/arm64/arm64/pmap.c b/sys/arch/arm64/arm64/pmap.c index 47be3f1e216..82216f7c3d5 100644 --- a/sys/arch/arm64/arm64/pmap.c +++ b/sys/arch/arm64/arm64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.89 2022/11/21 20:19:21 kettenis Exp $ */ +/* $OpenBSD: pmap.c,v 1.90 2022/12/09 22:31:31 kettenis Exp $ */ /* * Copyright (c) 2008-2009,2014-2016 Dale Rahn * @@ -1295,44 +1295,6 @@ pmap_bootstrap(long kvo, paddr_t lpt1, long kernelstart, long kernelend, memset((void *)pa, 0, Lx_TABLE_ALIGN); pmap_kernel()->pm_pt0pa = pa; - /* now that we have mapping space for everything, lets map it */ - /* all of these mappings are ram -> kernel va */ - - /* - * enable mappings for existing 'allocated' mapping in the bootstrap - * page tables - */ - extern uint64_t *pagetable; - extern char _end[]; - vp2 = (void *)((long)&pagetable + kvo); - struct mem_region *mp; - ssize_t size; - for (mp = pmap_allocated; mp->size != 0; mp++) { - /* bounds may be kinda messed up */ - for (pa = mp->start, size = mp->size & ~0xfff; - size > 0; - pa+= L2_SIZE, size -= L2_SIZE) - { - paddr_t mappa = pa & ~(L2_SIZE-1); - vaddr_t mapva = mappa - kvo; - int prot = PROT_READ | PROT_WRITE; - - if (mapva < (vaddr_t)_end) - continue; - - if (mapva >= (vaddr_t)__text_start && - mapva < (vaddr_t)_etext) - prot = PROT_READ | PROT_EXEC; - else if (mapva >= (vaddr_t)__rodata_start && - mapva < (vaddr_t)_erodata) - prot = PROT_READ; - - vp2->l2[VP_IDX2(mapva)] = mappa | L2_BLOCK | - ATTR_IDX(PTE_ATTR_WB) | ATTR_SH(SH_INNER) | - ATTR_nG | ap_bits_kern[prot]; - } - } - pmap_avail_fixup(); /* -- 2.20.1