--- /dev/null
+# $OpenBSD: Makefile,v 1.1 2021/04/23 02:42:16 drahn Exp $
+
+.include <bsd.own.mk> # for KEEPKERNELS
+
+S= ${.CURDIR}/../..
+KFILE= GENERIC
+.if exists(conf/GENERIC.MP)
+KFILE= GENERIC.MP
+.endif
+TDIRS= ${_arch} include
+TAGS= ${.CURDIR}/tags
+
+NOPROG=
+NOMAN=
+NOOBJ=
+SUBDIR= stand
+.if !defined(KEEPKERNELS) || !(make(clean) || make(cleandir))
+SUBDIR+=compile
+.endif
+
+# config the fattest kernel we can find into a temporary dir
+# to create a Makefile. Then use make to pull some variables
+# out and push them into the sub-shell to expand the paths,
+# and finally run ctags.
+tags::
+ TDIR=`mktemp -d /tmp/_tagXXXXXXXXXX` || exit 1; \
+ eval "S=${S}" && \
+ config -s ${S} -b $${TDIR} ${.CURDIR}/conf/${KFILE} && \
+ eval "_arch=\"`make -V _arch -f $${TDIR}/Makefile`\"" && \
+ eval "_mach=\"`make -V _mach -f $${TDIR}/Makefile`\"" && \
+ eval "_machdir=\$S/arch/$${_mach}" && \
+ eval "_archdir=\$S/arch/$${_arch}" && \
+ eval "HFILES=\"`find $S \( -path $S/'arch' -o -path $S/stand -o -path $S/lib/libsa -o -path $S'/lib/libkern/arch' \) -prune -o -name '*.h'; find $${_machdir} $${_archdir} $S/lib/libkern/arch/$${_mach} \( -name boot -o -name stand \) -prune -o -name '*.h'`\"" && \
+ eval "SFILES=\"`make -V SFILES -f $${TDIR}/Makefile`\"" && \
+ eval "CFILES=\"`make -V CFILES -f $${TDIR}/Makefile`\"" && \
+ eval "AFILES=\"`make -V AFILES -f $${TDIR}/Makefile`\"" && \
+ ctags -wd -f ${TAGS} $${CFILES} $${HFILES} && \
+ egrep "^[_A-Z]*ENTRY[_A-Z]*\(.*\)" $${SFILES} $${AFILES} | \
+ sed "s;\\([^:]*\\):\\([^(]*\\)(\\([^, )]*\\)\\(.*\\);\\3 \\1 /^\\2(\\3\\4$$/;" \
+ >> ${TAGS} && \
+ sort -o ${TAGS} ${TAGS} && \
+ rm -rf $${TDIR}
+
+links:
+ -for i in conf ${TDIRS}; do \
+ (cd $$i && rm -f tags; ln -s tags tags); done
+
+obj: _SUBDIRUSE
+
+.include <bsd.prog.mk>
--- /dev/null
+.include "../Makefile.inc"
--- /dev/null
+# $OpenBSD: Makefile,v 1.1 2021/04/23 02:42:16 drahn Exp $
+
+.if make(obj) || make(clean) || make(cleandir)
+SUBDIR!=find . -type d -maxdepth 1 \! \( -name . -o -name CVS \) | cut -b3-
+.endif
+
+.include <bsd.subdir.mk>
--- /dev/null
+SYSDIR != cd ${.CURDIR}/../../../..; pwd
+CONFDIR != cd ${.CURDIR}/../../conf; pwd
+
+.if ${.CURDIR} == ${.OBJDIR}
+.PHONY: config
+config:
+ @echo make obj required first >&2
+ @false
+.else
+.PHONY: config clean
+config:
+ config ${.CURDIR:M*.PROF:C/.*/-p/} -b ${.OBJDIR} \
+ -s ${SYSDIR} ${CONFDIR}/${.CURDIR:T:S/.PROF$//}
+.endif
+
+cleandir clean:
+
+.include <bsd.obj.mk>
+
--- /dev/null
+.include "../Makefile.inc"
--- /dev/null
+# For further information on compiling OpenBSD kernels, see the config(8)
+# man page.
+#
+# For further information on hardware support for this architecture, see
+# the intro(4) man page. For further information about kernel options
+# for this architecture, see the options(4) man page. For an explanation
+# of each device driver in this file see the section 4 man page for the
+# device.
+
+machine riscv64
+include "../../../conf/GENERIC"
+maxusers 32
+
+makeoptions KERNEL_BASE_PHYS="0x00200000"
+makeoptions KERNEL_BASE_VIRT="0xffffffc000200000"
+#option DEBUG
+#option DEBUG_AUTOCONF
+#option DEBUG_INTC
+#option DEBUG_TIMER
+
+config bsd swap generic
+
+#
+# Definition of system
+#
+
+# mainbus
+mainbus0 at root
+
+# cpu0
+cpu0 at mainbus0
+timer0 at cpu0
+intc0 at cpu0
+
+# NS16550 compatible serial ports
+com* at mainbus0 early 1
+
+virtio* at mainbus0
+vio* at virtio? # Network
+vioblk* at virtio?
+vioscsi* at virtio? # Disk (SCSI)
+#viomb* at virtio? # Memory Ballooning
+#viornd* at virtio? # Random Source
+
+# simplebus0
+simplebus* at mainbus0 early 1
+# Platform Level Interrupt Controller
+plic* at simplebus? early 1
+
+
+scsibus* at scsi?
+sd* at scsibus?
+cd* at scsibus?
+ch* at scsibus?
+uk* at scsibus?
--- /dev/null
+# For instructions on building kernels consult the config(8) and options(4)
+# manual pages.
+#
+# N.B.: NO DEPENDENCIES ON FOLLOWING FLAGS ARE VISIBLE TO MAKEFILE
+# IF YOU CHANGE THE DEFINITION OF ANY OF THESE RECOMPILE EVERYTHING
+# DEBUG is set to -g by config if debugging is requested (config -g).
+# PROF is set to -pg by config if profiling is requested (config -p).
+
+.include <bsd.own.mk>
+
+SIZE?= size
+STRIP?= llvm-strip
+
+# source tree is located via $S relative to the compilation directory
+.ifndef S
+S!= cd ../../../..; pwd
+.endif
+
+_machdir?= $S/arch/${_mach}
+_archdir?= $S/arch/${_arch}
+
+INCLUDES= -nostdinc -I$S -I${.OBJDIR} -I$S/arch \
+ -I$S/dev/pci/drm/include \
+ -I$S/dev/pci/drm/include/uapi \
+ -I$S/dev/pci/drm/amd/include/asic_reg \
+ -I$S/dev/pci/drm/amd/include \
+ -I$S/dev/pci/drm/amd/amdgpu \
+ -I$S/dev/pci/drm/amd/display \
+ -I$S/dev/pci/drm/amd/display/include \
+ -I$S/dev/pci/drm/amd/display/dc \
+ -I$S/dev/pci/drm/amd/display/amdgpu_dm \
+ -I$S/dev/pci/drm/amd/powerplay/inc \
+ -I$S/dev/pci/drm/amd/powerplay/smumgr \
+ -I$S/dev/pci/drm/amd/powerplay/hwmgr \
+ -I$S/dev/pci/drm/amd/display/dc/inc \
+ -I$S/dev/pci/drm/amd/display/dc/inc/hw \
+ -I$S/dev/pci/drm/amd/display/modules/inc
+CPPFLAGS= ${INCLUDES} ${IDENT} ${PARAM} -D_KERNEL -D__${_mach}__ -MD -MP
+# XXX -Werror removed to relax compile errors while building with -O0
+CWARNFLAGS= -Wall -Wimplicit-function-declaration \
+ -Wno-uninitialized -Wno-pointer-sign \
+ -Wno-constant-conversion -Wno-address-of-packed-member \
+ -Wframe-larger-than=2047
+
+CMACHFLAGS= --target=riscv64-unknown-openbsd6.6 -march=rv64gc \
+ -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -mno-relax \
+ -mcmodel=medany
+CMACHFLAGS+= -ffreestanding ${NOPIE_FLAGS}
+SORTR= sort -R
+.if ${IDENT:M-DNO_PROPOLICE}
+CMACHFLAGS+= -fno-stack-protector
+.endif
+.if ${IDENT:M-DSMALL_KERNEL}
+SORTR= cat
+COPTS?= -Oz
+.endif
+
+DEBUG?= -g
+COPTS?= -O2 # XXX Optimization Disabled for Debugging
+CFLAGS= ${DEBUG} ${CWARNFLAGS} ${CMACHFLAGS} ${COPTS} ${PIPE}
+AFLAGS= -D_LOCORE -x assembler-with-cpp ${CWARNFLAGS} ${CMACHFLAGS}
+LINKFLAGS= -T ld.script -X --warn-common -no-pie
+
+HOSTCC?= ${CC}
+HOSTED_CPPFLAGS=${CPPFLAGS:S/^-nostdinc$//}
+HOSTED_CFLAGS= ${CFLAGS}
+HOSTED_C= ${HOSTCC} ${HOSTED_CFLAGS} ${HOSTED_CPPFLAGS} -c $<
+
+NORMAL_C_NOP= ${CC} ${CFLAGS} ${CPPFLAGS} -c $<
+NORMAL_C= ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} -c $<
+NORMAL_S= ${CC} ${AFLAGS} ${CPPFLAGS} -c $<
+
+%OBJS
+
+%CFILES
+
+%SFILES
+
+# load lines for config "xxx" will be emitted as:
+# xxx: ${SYSTEM_DEP} swapxxx.o
+# ${SYSTEM_LD_HEAD}
+# ${SYSTEM_LD} swapxxx.o
+# ${SYSTEM_LD_TAIL}
+SYSTEM_HEAD= locore0.o gap.o
+SYSTEM_OBJ= ${SYSTEM_HEAD} ${OBJS} param.o ioconf.o
+SYSTEM_DEP= Makefile ${SYSTEM_OBJ} ld.script
+SYSTEM_LD_HEAD= @rm -f $@
+SYSTEM_LD= @echo ${LD} ${LINKFLAGS} -o $@ '$${SYSTEM_HEAD} vers.o $${OBJS}'; \
+ umask 007; \
+ echo ${OBJS} param.o ioconf.o vers.o | tr " " "\n" | ${SORTR} > lorder; \
+ ${LD} ${LINKFLAGS} -o $@ ${SYSTEM_HEAD} `cat lorder`
+SYSTEM_LD_TAIL= @${SIZE} $@
+
+.if ${DEBUG} == "-g"
+STRIPFLAGS= -S
+SYSTEM_LD_TAIL+=; umask 007; \
+ echo mv $@ $@.gdb; rm -f $@.gdb; mv $@ $@.gdb; \
+ echo ${STRIP} ${STRIPFLAGS} -o $@ $@.gdb; \
+ ${STRIP} ${STRIPFLAGS} -o $@ $@.gdb
+.else
+LINKFLAGS+= -S
+.endif
+
+%LOAD
+
+# cc's -MD puts the source and output paths in the dependency file;
+# since those are temp files here we need to fix it up. It also
+# puts the file in /tmp, so we use -MF to put it in the current
+# directory as assym.P and then generate assym.d from it with a
+# good target name
+assym.h: $S/kern/genassym.sh Makefile \
+ ${_archdir}/${_arch}/genassym.cf ${_machdir}/${_mach}/genassym.cf
+ cat ${_archdir}/${_arch}/genassym.cf ${_machdir}/${_mach}/genassym.cf | \
+ sh $S/kern/genassym.sh ${CC} ${CFLAGS} ${CPPFLAGS} -no-integrated-as -MF assym.P > assym.h.tmp
+ sed '1s/.*/assym.h: \\/' assym.P > assym.d
+ sort -u assym.h.tmp > assym.h
+
+param.c: $S/conf/param.c
+ rm -f param.c
+ cp $S/conf/param.c .
+
+param.o: param.c Makefile
+ ${NORMAL_C}
+
+mcount.o: $S/lib/libkern/mcount.c Makefile
+ ${NORMAL_C_NOP}
+
+ioconf.o: ioconf.c
+ ${NORMAL_C}
+
+ld.script: ${_archdir}/conf/kern.ldscript
+ cat ${_archdir}/conf/kern.ldscript | \
+ sed -e 's/@KERNEL_BASE_PHYS@/${KERNEL_BASE_PHYS}/' \
+ -e 's/@KERNEL_BASE_VIRT@/${KERNEL_BASE_VIRT}/' > ld.script
+gapdummy.o:
+ echo '__asm(".section .rodata,\"a\"");' > gapdummy.c
+ ${CC} -c ${CFLAGS} ${CPPFLAGS} gapdummy.c -o $@
+
+makegap.sh:
+ cp $S/conf/makegap.sh $@
+
+MAKE_GAP = LD="${LD}" sh makegap.sh 0x00000000 gapdummy.o
+
+gap.o: Makefile makegap.sh gapdummy.o vers.o
+ ${MAKE_GAP}
+
+vers.o: ${SYSTEM_DEP:Ngap.o}
+ sh $S/conf/newvers.sh
+ ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} -c vers.c
+
+clean:
+ rm -f *bsd *bsd.gdb *.[dio] [a-z]*.s assym.* \
+ gap.link gapdummy.c ld.script lorder makegap.sh param.c
+
+cleandir: clean
+ rm -f Makefile *.h ioconf.c options machine ${_mach} vers.c
+
+depend obj:
+
+locore0.o: ${_archdir}/${_arch}/locore0.S assym.h
+copy.o copystr.o copyinout.o: assym.h
+pagezero.o: assym.h
+cpuswitch.o trap.o support.o: assym.h
+locore.o trampoline.o: assym.h
+
+hardlink-obsd:
+ [[ ! -f /bsd ]] || cmp -s bsd /bsd || ln -f /bsd /obsd
+
+newinstall:
+ install -F -m 700 bsd /bsd && sha256 -h /var/db/kernel.SHA256 /bsd
+
+install: update-link hardlink-obsd newinstall
+
+# pull in the dependency information
+.ifnmake clean
+. for o in ${SYSTEM_OBJ:Ngap.o} assym.h
+. if exists(${o:R}.d)
+. include "${o:R}.d"
+. elif exists($o)
+ .PHONY: $o
+. endif
+. endfor
+.endif
+
+## for qemu this is where ram is located
+RAM_ADDR?=0x40000000
+#KERNEL_LOAD_ADDR!=echo "x = hex(${KERNEL_BASE_PHYS} + ${RAM_ADDR}); print x" | python
+KERNEL_LOAD_ADDR=0x40200000
+
+# until we get native booting working, put this in the tree.
+bsdrd.umg: bsd.rd
+ mkuboot -a arm -o linux -e ${KERNEL_LOAD_ADDR} -l ${KERNEL_LOAD_ADDR} bsd.rd bsdrd.umg
+
+bsd.umg: bsd
+ mkuboot -a arm -o linux -e ${KERNEL_LOAD_ADDR} -l ${KERNEL_LOAD_ADDR} bsd bsd.umg
+
+bsd.rd: bsd
+ cp -p bsd bsd.rd
+ rdsetroot bsd.rd $S/../distrib/${_mach}/ramdisk/mr.fs
+
+%RULES
--- /dev/null
+# $OpenBSD: RAMDISK,v 1.1 2021/04/23 02:42:16 drahn Exp $
+#
+# GENERIC machine description file
+#
+# This machine description file is used to generate the default OpenBSD
+# kernel. The generic kernel does not include all options, subsystems
+# and device drivers, but should be useful for most applications.
+#
+# The machine description file can be customised for your specific
+# machine to reduce the kernel size and improve its performance.
+#
+# For further information on compiling OpenBSD kernels, see the config(8)
+# man page.
+#
+# For further information on hardware support for this architecture, see
+# the intro(4) man page. For further information about kernel options
+# for this architecture, see the options(4) man page. For an explanation
+# of each device driver in this file see the section 4 man page for the
+# device.
+
+machine riscv64
+maxusers 4
+
+option PCIVERBOSE
+option USBVERBOSE
+option SMALL_KERNEL
+option NO_PROPOLICE
+option BOOT_CONFIG
+
+option RAMDISK_HOOKS
+option MINIROOTSIZE=16384
+
+option FFS
+option FFS2
+option MSDOSFS
+option INET6
+option EXT2FS
+option NFSCLIENT
+option CRYPTO
+
+makeoptions KERNEL_BASE_PHYS="0x00200000"
+makeoptions KERNEL_BASE_VIRT="0xffffffc000200000"
+
+config bsd root on rd0a swap on rd0b
+
+# mainbus
+mainbus0 at root
+
+# cpu0
+cpu0 at mainbus0
+timer0 at cpu0
+intc0 at cpu0
+
+# NS16550 compatible serial ports
+com* at mainbus0 early 1
+
+virtio* at mainbus0
+vio* at virtio? # Network
+vioblk* at virtio?
+vioscsi* at virtio? # Disk (SCSI)
+#viomb* at virtio? # Memory Ballooning
+#viornd* at virtio? # Random Source
+
+# simplebus0
+simplebus* at mainbus0 early 1
+# Platform Level Interrupt Controller
+plic* at simplebus? early 1
+
+
+scsibus* at scsi?
+sd* at scsibus?
+cd* at scsibus?
+ch* at scsibus?
+uk* at scsibus?
+
+pseudo-device loop 1
+pseudo-device vlan
+pseudo-device trunk
+pseudo-device bpfilter 1
+pseudo-device rd 1
+pseudo-device bio 1
--- /dev/null
+# Standard stanzas config(8) can't run without
+maxpartitions 16
+maxusers 2 8 64
+
+# Major number for block devices, for ``root on'' lines
+major { sd = 0 }
+#major { cd = 3 }
+#major { wd = 4 }
+major { rd = 8 }
+
+file dev/cninit.c
+
+file arch/riscv64/riscv64/autoconf.c
+file arch/riscv64/riscv64/ast.c
+file arch/riscv64/riscv64/bus_space.c
+file arch/riscv64/riscv64/bus_dma.c
+file arch/riscv64/riscv64/conf.c
+file arch/riscv64/riscv64/disksubr.c disk
+file arch/riscv64/riscv64/locore.S
+file arch/riscv64/riscv64/copy.S
+file arch/riscv64/riscv64/copystr.S
+file arch/riscv64/riscv64/trap.S
+file arch/riscv64/riscv64/cpuswitch.S
+file arch/riscv64/riscv64/machdep.c
+file arch/riscv64/riscv64/intr.c
+file arch/riscv64/riscv64/pmap.c
+file arch/riscv64/riscv64/mem.c
+file arch/riscv64/riscv64/process_machdep.c
+file arch/riscv64/riscv64/vm_machdep.c
+file arch/riscv64/riscv64/sig_machdep.c
+file arch/riscv64/riscv64/softintr.c
+file arch/riscv64/riscv64/support.S
+file arch/riscv64/riscv64/syscall.c
+file arch/riscv64/riscv64/pagezero.S
+file arch/riscv64/riscv64/trap_machdep.c
+file arch/riscv64/riscv64/sbi.c
+file arch/riscv64/riscv64/cpufunc_asm.S
+file arch/riscv64/riscv64/fpu.c
+
+file arch/riscv64/riscv64/db_disasm.c ddb
+file arch/riscv64/riscv64/db_interface.c ddb
+file arch/riscv64/riscv64/db_trace.c ddb
+
+define fdt {[early = 0]}
+
+#
+# mainbus
+#
+define mainbus {[early = 0]}
+device mainbus: fdt
+attach mainbus at root
+file arch/riscv64/dev/mainbus.c
+
+#
+# cpu
+#
+define cpu {}
+device cpu
+attach cpu at mainbus
+file arch/riscv64/riscv64/cpu.c
+
+#
+# timer
+#
+device timer
+attach timer at cpu
+file arch/riscv64/dev/timer.c
+
+#
+# HART-specific interrupt controller
+#
+device intc
+attach intc at cpu
+file arch/riscv64/dev/riscv_cpu_intc.c
+
+#
+# simplebus
+#
+define simplebus {[early = 1]}
+device simplebus
+attach simplebus at mainbus
+file arch/riscv64/dev/simplebus.c
+
+# PLIC
+device plic
+attach plic at simplebus
+file arch/riscv64/dev/plic.c
+
+
+# Paravirtual device bus and virtio
+include "dev/pv/files.pv"
+
+file netinet/in_cksum.c
+file netinet/in4_cksum.c
+
+#
+# Machine-independent HID support
+#
+include "dev/hid/files.hid"
+
+# Machine-independent I2C drivers
+include "dev/i2c/files.i2c"
+
+# FDT now requires drm (which is part of pci)
+include "dev/mii/files.mii"
+include "dev/pci/files.pci"
+
+# FDT support
+include "dev/ofw/files.ofw"
+
+# Machine-independent FDT drivers
+include "dev/fdt/files.fdt"
+
+# Machine-independent SCSI drivers
+include "scsi/files.scsi"
+
+# XXX ofwbus
--- /dev/null
+OUTPUT_ARCH(riscv64)
+
+/* Define how we want our ELF binary to look like. */
+PHDRS
+{
+ text PT_LOAD;
+ rodata PT_LOAD FLAGS (4);
+ data PT_LOAD;
+ openbsd_randomize PT_OPENBSD_RANDOMIZE;
+}
+
+__ALIGN_SIZE = 0x200000;
+__kernel_base = @KERNEL_BASE_VIRT@;
+
+ENTRY(_start)
+SECTIONS
+{
+ . = __kernel_base;
+ PROVIDE (__text_start = .);
+ .text :
+ {
+ *(.text .text.*)
+ *(.stub)
+ *(.glue_7t) *(.glue_7)
+ } :text =0
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ /* Move rodata to the next page, so we can nuke X and W bit on it */
+ . = ALIGN(__ALIGN_SIZE);
+ PROVIDE (__rodata_start = .);
+ .rodata :
+ {
+ *(.rodata .rodata.*)
+ } :rodata
+ .openbsd.randomdata :
+ {
+ *(.openbsd.randomdata)
+ } :openbsd_randomize :rodata
+ PROVIDE (_erodata = .);
+
+ /* Move .data to the next page, so we can add W bit on it */
+ . = ALIGN(__ALIGN_SIZE);
+ PROVIDE (__data_start = .);
+ .got :
+ {
+ *(.got .got.*)
+ } :data
+ .data :
+ {
+ *(.data .data.*)
+ } :data
+ .sdata :
+ {
+ *(.sdata .sdata.*)
+ } :data
+ PROVIDE (_edata = .);
+
+ PROVIDE (__bss_start = .);
+ .sbss :
+ {
+ *(.dynsbss)
+ *(.sbss)
+ *(.sbss.*)
+ *(.scommon)
+ } :data
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(64 / 8);
+ } :data
+ PROVIDE (_end = .);
+ PROVIDE (end = .);
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Patrick Wildt <patrick@blueri.se>
+ * Copyright (c) 2017 Mark Kettenis <kettenis@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+
+#include <machine/fdt.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/fdt.h>
+
+#include <machine/riscv64var.h>
+#include <riscv64/dev/mainbus.h>
+
+int mainbus_match(struct device *, void *, void *);
+void mainbus_attach(struct device *, struct device *, void *);
+
+void mainbus_attach_node(struct device *, int, cfmatch_t);
+int mainbus_match_status(struct device *, void *, void *);
+void mainbus_attach_cpus(struct device *, cfmatch_t);
+int mainbus_match_primary(struct device *, void *, void *);
+int mainbus_match_secondary(struct device *, void *, void *);
+void mainbus_attach_efi(struct device *);
+void mainbus_attach_framebuffer(struct device *);
+
+struct mainbus_softc {
+ struct device sc_dev;
+ int sc_node;
+ bus_space_tag_t sc_iot;
+ bus_dma_tag_t sc_dmat;
+ int sc_acells;
+ int sc_scells;
+ int *sc_ranges;
+ int sc_rangeslen;
+ int sc_early;
+};
+
+const struct cfattach mainbus_ca = {
+ sizeof(struct mainbus_softc), mainbus_match, mainbus_attach, NULL,
+ config_activate_children
+};
+
+struct cfdriver mainbus_cd = {
+ NULL, "mainbus", DV_DULL
+};
+
+struct machine_bus_dma_tag mainbus_dma_tag = {
+ NULL,
+ 0,
+ _dmamap_create,
+ _dmamap_destroy,
+ _dmamap_load,
+ _dmamap_load_mbuf,
+ _dmamap_load_uio,
+ _dmamap_load_raw,
+ _dmamap_load_buffer,
+ _dmamap_unload,
+ _dmamap_sync,
+ _dmamem_alloc,
+ _dmamem_free,
+ _dmamem_map,
+ _dmamem_unmap,
+ _dmamem_mmap,
+};
+
+/*
+ * XXX This statement is copied from OpenBSD/arm64. We do not handle EFI.
+ * Mainbus takes care of FDT and non-FDT machines, so we
+ * always attach.
+ */
+int
+mainbus_match(struct device *parent, void *cfdata, void *aux)
+{
+ return (1);
+}
+
+extern char *hw_prod;
+void riscv_timer_init(void);
+
+void
+mainbus_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct mainbus_softc *sc = (struct mainbus_softc *)self;
+ char model[128];
+ int node, len;
+
+ riscv_intr_init_fdt();
+ riscv_timer_init();
+
+ sc->sc_node = OF_peer(0);
+ sc->sc_iot = &riscv64_bs_tag;
+ sc->sc_dmat = &mainbus_dma_tag;
+ sc->sc_acells = OF_getpropint(OF_peer(0), "#address-cells", 1);
+ sc->sc_scells = OF_getpropint(OF_peer(0), "#size-cells", 1);
+
+ len = OF_getprop(sc->sc_node, "model", model, sizeof(model));
+ if (len > 0) {
+ printf(": %s\n", model);
+ hw_prod = malloc(len, M_DEVBUF, M_NOWAIT);
+ if (hw_prod)
+ strlcpy(hw_prod, model, len);
+ } else
+ printf(": unknown model\n");
+
+ /* Attach primary CPU first. */
+#ifdef DEBUG_AUTOCONF
+ printf("Attaching primary CPU...\n");
+#endif
+ mainbus_attach_cpus(self, mainbus_match_primary);
+
+ sc->sc_rangeslen = OF_getproplen(OF_peer(0), "ranges");
+ if (sc->sc_rangeslen > 0 && !(sc->sc_rangeslen % sizeof(uint32_t))) {
+ sc->sc_ranges = malloc(sc->sc_rangeslen, M_TEMP, M_WAITOK);
+ OF_getpropintarray(OF_peer(0), "ranges", sc->sc_ranges,
+ sc->sc_rangeslen);
+ }
+
+ /* Scan the whole tree. */
+ sc->sc_early = 1;
+#ifdef DEBUG_AUTOCONF
+ printf("Attaching node with sc_early == 1 ...\n");
+#endif
+ for (node = OF_child(sc->sc_node); node != 0; node = OF_peer(node))
+ mainbus_attach_node(self, node, NULL);
+
+ sc->sc_early = 0;
+#ifdef DEBUG_AUTOCONF
+ printf("Attaching node with sc_early == 0 ...\n");
+#endif
+ for (node = OF_child(sc->sc_node); node != 0; node = OF_peer(node))
+ mainbus_attach_node(self, node, NULL);
+
+ mainbus_attach_framebuffer(self);
+
+ /* Attach secondary CPUs. */
+ mainbus_attach_cpus(self, mainbus_match_secondary);
+}
+
+/*
+ * Look for a driver that wants to be attached to this node.
+ */
+void
+mainbus_attach_node(struct device *self, int node, cfmatch_t submatch)
+{
+ struct mainbus_softc *sc = (struct mainbus_softc *)self;
+ struct fdt_attach_args fa;
+ int i, len, line;
+ uint32_t *cell, *reg;
+
+ memset(&fa, 0, sizeof(fa));
+ fa.fa_name = "";
+ fa.fa_node = node;
+ fa.fa_iot = sc->sc_iot;
+ fa.fa_dmat = sc->sc_dmat;
+ fa.fa_acells = sc->sc_acells;
+ fa.fa_scells = sc->sc_scells;
+
+ len = OF_getproplen(node, "reg");
+ line = (sc->sc_acells + sc->sc_scells) * sizeof(uint32_t);
+ if (len > 0 && line > 0 && (len % line) == 0) {
+ reg = malloc(len, M_TEMP, M_WAITOK);
+ OF_getpropintarray(node, "reg", reg, len);
+
+ fa.fa_reg = malloc((len / line) * sizeof(struct fdt_reg),
+ M_DEVBUF, M_WAITOK);
+ fa.fa_nreg = (len / line);
+
+ for (i = 0, cell = reg; i < len / line; i++) {
+ if (sc->sc_acells >= 1)
+ fa.fa_reg[i].addr = cell[0];
+ if (sc->sc_acells == 2) {
+ fa.fa_reg[i].addr <<= 32;
+ fa.fa_reg[i].addr |= cell[1];
+ }
+ cell += sc->sc_acells;
+ if (sc->sc_scells >= 1)
+ fa.fa_reg[i].size = cell[0];
+ if (sc->sc_scells == 2) {
+ fa.fa_reg[i].size <<= 32;
+ fa.fa_reg[i].size |= cell[1];
+ }
+ cell += sc->sc_scells;
+ }
+
+ free(reg, M_TEMP, len);
+ }
+
+ len = OF_getproplen(node, "interrupts");
+ if (len > 0 && (len % sizeof(uint32_t)) == 0) {
+ fa.fa_intr = malloc(len, M_DEVBUF, M_WAITOK);
+ fa.fa_nintr = len / sizeof(uint32_t);
+
+ OF_getpropintarray(node, "interrupts", fa.fa_intr, len);
+ }
+
+ if (submatch == NULL)
+ submatch = mainbus_match_status;
+
+#ifdef DEBUG_AUTOCONF
+ char buf[32];
+ if (OF_getprop(fa.fa_node, "name", buf, sizeof(buf)) > 0)
+ printf("\ncurrent parent: %s, current node: %d-%s\n", self->dv_xname, fa.fa_node, buf);
+#endif
+
+ config_found_sm(self, &fa, NULL, submatch);
+
+ free(fa.fa_reg, M_DEVBUF, fa.fa_nreg * sizeof(struct fdt_reg));
+ free(fa.fa_intr, M_DEVBUF, fa.fa_nintr * sizeof(uint32_t));
+}
+
+int
+mainbus_match_status(struct device *parent, void *match, void *aux)
+{
+ struct mainbus_softc *sc = (struct mainbus_softc *)parent;
+ struct fdt_attach_args *fa = aux;
+ struct cfdata *cf = match;
+ char buf[32];
+
+ if (fa->fa_node == 0)
+ return 0;
+
+ if (OF_getprop(fa->fa_node, "status", buf, sizeof(buf)) > 0 &&
+ strcmp(buf, "disabled") == 0)
+ return 0;
+
+ if (cf->cf_loc[0] == sc->sc_early)
+ return (*cf->cf_attach->ca_match)(parent, match, aux);
+ return 0;
+}
+
+void
+mainbus_attach_cpus(struct device *self, cfmatch_t match)
+{
+ struct mainbus_softc *sc = (struct mainbus_softc *)self;
+ int node = OF_finddevice("/cpus");
+ int acells, scells;
+ char buf[32];
+
+ if (node == 0)
+ return;
+
+ acells = sc->sc_acells;
+ scells = sc->sc_scells;
+ sc->sc_acells = OF_getpropint(node, "#address-cells", 2);
+ sc->sc_scells = OF_getpropint(node, "#size-cells", 0);
+
+ ncpusfound = 0;
+ for (node = OF_child(node); node != 0; node = OF_peer(node)) {
+ if (OF_getprop(node, "device_type", buf, sizeof(buf)) > 0 &&
+ strcmp(buf, "cpu") == 0)
+ ncpusfound++;
+
+#ifdef DEBUG_AUTOCONF
+ printf("scanning cpus subnode: %d\n", node);
+#endif
+ mainbus_attach_node(self, node, match);
+ }
+
+ sc->sc_acells = acells;
+ sc->sc_scells = scells;
+}
+
+int
+mainbus_match_primary(struct device *parent, void *match, void *aux)
+{
+ struct fdt_attach_args *fa = aux;
+ struct cfdata *cf = match;
+
+ if (fa->fa_nreg < 1)
+ return 0;
+
+ return (*cf->cf_attach->ca_match)(parent, match, aux);
+}
+
+int
+mainbus_match_secondary(struct device *parent, void *match, void *aux)
+{
+ struct fdt_attach_args *fa = aux;
+ struct cfdata *cf = match;
+
+ if (fa->fa_nreg < 1)
+ return 0;
+
+ return (*cf->cf_attach->ca_match)(parent, match, aux);
+}
+
+void
+mainbus_attach_framebuffer(struct device *self)
+{
+ int node = OF_finddevice("/chosen");
+
+ if (node == 0)
+ return;
+
+ for (node = OF_child(node); node != 0; node = OF_peer(node))
+ mainbus_attach_node(self, node, NULL);
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MAINBUS_H__
+#define __MAINBUS_H__
+
+/* Passed as third arg to attach functions. */
+union mainbus_attach_args {
+ const char *ma_name;
+ struct fdt_attach_args ma_faa;
+};
+
+#endif /* __MAINBUS_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com>
+ * Copyright (c) 2020, Brian Bamsch <bbamsch@google.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/evcount.h>
+
+#include <machine/bus.h>
+#include <machine/fdt.h>
+#include <machine/cpu.h>
+#include "riscv64/dev/riscv_cpu_intc.h"
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/fdt.h>
+
+/*
+ * This driver implements a version of the RISC-V PLIC with the actual layout
+ * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
+ *
+ * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
+ *
+ * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
+ * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
+ * Spec.
+ */
+
+#define PLIC_MAX_IRQS 1024
+
+#define PLIC_PRIORITY_BASE 0x000000U
+
+#define PLIC_ENABLE_BASE 0x002000U
+#define PLIC_ENABLE_STRIDE 0x80U
+#define IRQ_ENABLE 1
+#define IRQ_DISABLE 0
+
+#define PLIC_CONTEXT_BASE 0x200000U
+#define PLIC_CONTEXT_STRIDE 0x1000U
+#define PLIC_CONTEXT_THRESHOLD 0x0U
+#define PLIC_CONTEXT_CLAIM 0x4U
+
+#define PLIC_PRIORITY(n) (PLIC_PRIORITY_BASE + (n) * sizeof(uint32_t))
+#define PLIC_ENABLE(sc, n, h) \
+ (sc->sc_contexts[h].enable_offset + ((n) / 32) * sizeof(uint32_t))
+#define PLIC_THRESHOLD(sc, h) \
+ (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_THRESHOLD)
+#define PLIC_CLAIM(sc, h) \
+ (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_CLAIM)
+
+
+struct plic_intrhand {
+ TAILQ_ENTRY(plic_intrhand) ih_list; /* link on intrq list */
+ int (*ih_func)(void *); /* handler */
+ void *ih_arg; /* arg for handler */
+ int ih_ipl; /* IPL_* */
+ int ih_flags;
+ int ih_irq; /* IRQ number */
+ struct evcount ih_count;
+ char *ih_name;
+};
+
+/*
+ * One interrupt source could have multiple handler attached,
+ * each handler could have different priority level,
+ * we track the max and min priority level.
+ */
+struct plic_irqsrc {
+ TAILQ_HEAD(, plic_intrhand) is_list; /* handler list */
+ int is_irq_max; /* IRQ to mask while handling */
+ int is_irq_min; /* lowest IRQ when shared */
+};
+
+struct plic_context {
+ bus_size_t enable_offset;
+ bus_size_t context_offset;
+};
+
+struct plic_softc {
+ struct device sc_dev;
+ int sc_node;
+ bus_space_tag_t sc_iot;
+ bus_space_handle_t sc_ioh;
+ struct plic_irqsrc *sc_isrcs;
+ struct plic_context sc_contexts[MAXCPUS];
+ int sc_ndev;
+ struct interrupt_controller sc_intc;
+};
+struct plic_softc *plic = NULL;
+
+int plic_match(struct device *, void *, void *);
+void plic_attach(struct device *, struct device *, void *);
+int plic_irq_handler(void *);
+int plic_irq_dispatch(uint32_t, void *);
+void *plic_intr_establish(int, int, int (*)(void *),
+ void *, char *);
+void *plic_intr_establish_fdt(void *, int *, int, int (*)(void *),
+ void *, char *);
+void plic_intr_disestablish(void *);
+void plic_intr_route(void *, int, struct cpu_info *);
+
+void plic_splx(int);
+int plic_spllower(int);
+int plic_splraise(int);
+void plic_setipl(int);
+void plic_calc_mask(void);
+
+/* helper function */
+int plic_get_cpuid(int);
+void plic_set_priority(int, uint32_t);
+void plic_set_threshold(int, uint32_t);
+void plic_intr_route_grid(int, int, int);
+void plic_intr_enable_with_pri(int, uint32_t, int);
+void plic_intr_disable(int, int);
+
+
+struct cfattach plic_ca = {
+ sizeof(struct plic_softc), plic_match, plic_attach,
+};
+
+struct cfdriver plic_cd = {
+ NULL, "plic", DV_DULL
+};
+
+int plic_attached = 0;
+
+int
+plic_match(struct device *parent, void *cfdata, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+
+ if (plic_attached)
+ return 0; // Only expect one instance of PLIC
+
+ return (OF_is_compatible(faa->fa_node, "riscv,plic0") ||
+ OF_is_compatible(faa->fa_node, "sifive,plic-1.0.0"));
+}
+
+void
+plic_attach(struct device *parent, struct device *dev, void *aux)
+{
+ struct plic_softc *sc;
+ struct fdt_attach_args *faa;
+ uint32_t *cells;
+ uint32_t irq;
+ uint32_t cpu;
+ int node;
+ int len;
+ int ncell;
+ int context;
+ int i;
+ struct cpu_info *ci;
+ CPU_INFO_ITERATOR cii;
+
+ if (plic_attached)
+ return;
+
+ plic = sc = (struct plic_softc *)dev;
+ faa = (struct fdt_attach_args *)aux;
+
+ if (faa->fa_nreg < 1)
+ return;
+
+ sc->sc_node = node = faa->fa_node;
+ sc->sc_iot = faa->fa_iot;
+
+ /* determine number of devices sending intr to this ic */
+ sc->sc_ndev = OF_getpropint(faa->fa_node, "riscv,ndev", -1);
+ if (sc->sc_ndev < 0) {
+ printf(": unable to resolve number of devices\n");
+ return;
+ }
+
+ if (sc->sc_ndev >= PLIC_MAX_IRQS) {
+ printf(": invalid ndev (%d)\n", sc->sc_ndev);
+ return;
+ }
+
+ /* map interrupt controller to va space */
+ if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
+ faa->fa_reg[0].size, 0, &sc->sc_ioh))
+ panic("%s: bus_space_map failed!", __func__);
+
+ sc->sc_isrcs = mallocarray(PLIC_MAX_IRQS, sizeof(struct plic_irqsrc),
+ M_DEVBUF, M_ZERO | M_NOWAIT);
+
+ for (irq = 1; irq <= sc->sc_ndev; irq++) {
+ TAILQ_INIT(&sc->sc_isrcs[irq].is_list);
+ plic_set_priority(irq, 0);// Mask interrupt
+ }
+
+ /*
+ * Calculate the per-cpu enable and context register offsets.
+ *
+ * This is tricky for a few reasons. The PLIC divides the interrupt
+ * enable, threshold, and claim bits by "context"
+ *
+ * The tricky part is that the PLIC spec imposes no restrictions on how
+ * these contexts are laid out. So for example, there is no guarantee
+ * that each CPU will have both a machine mode and supervisor context,
+ * or that different PLIC implementations will organize the context
+ * registers in the same way. On top of this, we must handle the fact
+ * that cpuid != hartid, as they may have been renumbered during boot.
+ * We perform the following steps:
+ *
+ * 1. Examine the PLIC's "interrupts-extended" property and skip any
+ * entries that are not for supervisor external interrupts.
+ *
+ * 2. Walk up the device tree to find the corresponding CPU, using node
+ * property to identify the cpuid.
+ *
+ * 3. Calculate the register offsets based on the context number.
+ */
+ len = OF_getproplen(node, "interrupts-extended");
+ if (len <= 0) {
+ printf(": could not find interrupts-extended\n");
+ return;
+ }
+
+ cells = malloc(len, M_TEMP, M_WAITOK);
+ ncell = len / sizeof(*cells);
+ if (OF_getpropintarray(node, "interrupts-extended", cells, len) < 0) {
+ printf(": failed to read interrupts-extended\n");
+ free(cells, M_TEMP, len);
+ return;
+ }
+
+ for (i = 0, context = 0; i < ncell; i += 2, context++) {
+ /* Skip M-mode external interrupts */
+ if (cells[i + 1] != IRQ_EXTERNAL_SUPERVISOR)
+ continue;
+
+ /* Get the corresponding cpuid. */
+ cpu = plic_get_cpuid(OF_getnodebyphandle(cells[i]));
+ if (cpu < 0) {
+ printf(": invalid hart!\n");
+ free(cells, M_TEMP, len);
+ return;
+ }
+
+ /*
+ * Set the enable and context register offsets for the CPU.
+ *
+ * We assume S-mode handler always comes later than M-mode
+ * handler, but this might be a little fragile.
+ *
+ * XXX
+ * sifive spec doesn't list hart0 S-mode enable/contexts
+ * in its memory map, but QEMU emulates hart0 S-mode
+ * enable/contexts? Otherwise the following offset calculation
+ * would point to hart1 M-mode enable/contexts.
+ */
+ sc->sc_contexts[cpu].enable_offset = PLIC_ENABLE_BASE +
+ context * PLIC_ENABLE_STRIDE;
+ sc->sc_contexts[cpu].context_offset = PLIC_CONTEXT_BASE +
+ context * PLIC_CONTEXT_STRIDE;
+ }
+
+ free(cells, M_TEMP, len);
+
+ /* Set CPU interrupt priority thresholds to minimum */
+ CPU_INFO_FOREACH(cii, ci) {
+ plic_set_threshold(ci->ci_cpuid, 0);
+ }
+
+ plic_setipl(IPL_HIGH); /* XXX ??? */
+ plic_calc_mask();
+
+ /*
+ * insert self into the external interrupt handler entry in
+ * global interrupt handler vector
+ */
+ riscv_intc_intr_establish(IRQ_EXTERNAL_SUPERVISOR, 0,
+ plic_irq_handler, NULL, "plic0");
+
+ /*
+ * From now on, spl update must be enforeced to plic, so
+ * spl* routine should be updated.
+ */
+ riscv_set_intr_func(plic_splraise, plic_spllower,
+ plic_splx, plic_setipl);
+
+ plic_attached = 1;
+
+ /* enable external interrupt */
+ csr_set(sie, SIE_SEIE);
+
+ sc->sc_intc.ic_node = faa->fa_node;
+ sc->sc_intc.ic_cookie = sc;
+ sc->sc_intc.ic_establish = plic_intr_establish_fdt;
+ sc->sc_intc.ic_disestablish = plic_intr_disestablish;
+ sc->sc_intc.ic_route = plic_intr_route;
+ // sc->sc_intc.ic_cpu_enable = XXX Per-CPU Initialization?
+
+ riscv_intr_register_fdt(&sc->sc_intc);
+
+ printf("\n");
+}
+
+int
+plic_irq_handler(void *frame)
+{
+ struct plic_softc* sc;
+ uint32_t pending;
+ uint32_t cpu;
+ int handled = 0;
+
+ sc = plic;
+ cpu = cpu_number();
+
+ pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
+ PLIC_CLAIM(sc, cpu));
+
+ if(pending >= sc->sc_ndev) {
+ printf("plic0: pending %x\n", pending);
+ return 0;
+ }
+
+ if (pending) {
+ handled = plic_irq_dispatch(pending, frame);
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh,
+ PLIC_CLAIM(sc, cpu), pending);
+
+//#define DEBUG_INTC
+#ifdef DEBUG_INTC
+ if (handled == 0) {
+ printf("plic handled == 0 on pending %d\n", pending);
+ }
+#endif /* DEBUG_INTC */
+ }
+
+ return handled;
+}
+
+int
+plic_irq_dispatch(uint32_t irq, void *frame)
+{
+ int pri, s;
+ int handled = 0;
+ struct plic_softc* sc;
+ struct plic_intrhand *ih;
+ void *arg;
+
+#ifdef DEBUG_INTC
+ printf("plic irq %d fired\n", irq);
+#endif
+
+ sc = plic;
+ pri = sc->sc_isrcs[irq].is_irq_max;
+ s = plic_splraise(pri);
+ TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
+#ifdef MULTIPROCESSOR
+ int need_lock;
+
+ if (ih->ih_flags & IPL_MPSAFE)
+ need_lock = 0;
+ else
+ need_lock = s < IPL_SCHED;
+
+ if (need_lock)
+ KERNEL_LOCK();
+#endif
+
+ if (ih->ih_arg != 0)
+ arg = ih->ih_arg;
+ else
+ arg = frame;
+
+// comment for now, ?!
+// enable_interrupts(); //XXX allow preemption?
+ handled = ih->ih_func(arg);
+// disable_interrupts();
+ if (handled)
+ ih->ih_count.ec_count++;
+
+#ifdef MULTIPROCESSOR
+ if (need_lock)
+ KERNEL_UNLOCK();
+#endif
+ }
+
+ plic_splx(s);
+ return handled;
+}
+
+void *
+plic_intr_establish(int irqno, int level, int (*func)(void *),
+ void *arg, char *name)
+{
+ struct plic_softc *sc = plic;
+ struct plic_intrhand *ih;
+ int sie;
+
+ if (irqno < 0 || irqno >= PLIC_MAX_IRQS)
+ panic("plic_intr_establish: bogus irqnumber %d: %s",
+ irqno, name);
+ sie = disable_interrupts();
+
+ ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
+ ih->ih_func = func;
+ ih->ih_arg = arg;
+ ih->ih_ipl = level & IPL_IRQMASK;
+ ih->ih_flags = level & IPL_FLAGMASK;
+ ih->ih_irq = irqno;
+ ih->ih_name = name;
+
+ TAILQ_INSERT_TAIL(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
+
+ if (name != NULL)
+ evcount_attach(&ih->ih_count, name, &ih->ih_irq);
+
+#ifdef DEBUG_INTC
+ printf("%s irq %d level %d [%s]\n", __func__, irqno, level,
+ name);
+#endif
+
+ plic_calc_mask();
+
+ restore_interrupts(sie);
+ return (ih);
+}
+
+void *
+plic_intr_establish_fdt(void *cookie, int *cell, int level,
+ int (*func)(void *), void *arg, char *name)
+{
+ return plic_intr_establish(cell[0], level, func, arg, name);
+}
+
+void
+plic_intr_disestablish(void *cookie)
+{
+ struct plic_softc *sc = plic;
+ struct plic_intrhand *ih = cookie;
+ int irqno = ih->ih_irq;
+ int sie;
+
+ sie = disable_interrupts();
+ TAILQ_REMOVE(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
+ if (ih->ih_name != NULL)
+ evcount_detach(&ih->ih_count);
+ free(ih, M_DEVBUF, 0);
+ restore_interrupts(sie);
+}
+
+void
+plic_intr_route(void *cookie, int enable, struct cpu_info *ci)
+{
+ struct plic_softc *sc = plic;
+ struct plic_intrhand *ih = cookie;
+
+ int irq = ih->ih_irq;
+ int cpu = ci->ci_cpuid;
+ uint32_t min_pri = sc->sc_isrcs[irq].is_irq_min;
+
+ if (enable == IRQ_ENABLE) {
+ plic_intr_enable_with_pri(irq, min_pri, cpu);
+ } else {
+ plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
+ }
+}
+
+void
+plic_splx(int new)
+{
+ /* XXX
+ * how to do pending external interrupt ?
+ * After set the new threshold, if there is any pending
+ * external interrupts whose priority is now greater than the
+ * threshold, they will get passed through plic to cpu,
+ * trigger a new claim/complete cycle.
+ * So there is no need to handle pending external intr here.
+ *
+ */
+ struct cpu_info *ci = curcpu();
+
+ /* Pending software intr is handled here */
+ if (ci->ci_ipending & riscv_smask[new])
+ riscv_do_pending_intr(new);
+
+ plic_setipl(new);
+}
+
+int
+plic_spllower(int new)
+{
+ struct cpu_info *ci = curcpu();
+ int old = ci->ci_cpl;
+ plic_splx(new);
+ return (old);
+}
+
+int
+plic_splraise(int new)
+{
+ struct cpu_info *ci = curcpu();
+ int old;
+ old = ci->ci_cpl;
+
+ /*
+ * setipl must always be called because there is a race window
+ * where the variable is updated before the mask is set
+ * an interrupt occurs in that window without the mask always
+ * being set, the hardware might not get updated on the next
+ * splraise completely messing up spl protection.
+ */
+ if (old > new)
+ new = old;
+
+ plic_setipl(new);
+
+ return (old);
+}
+
+void
+plic_setipl(int new)
+{
+ struct cpu_info *ci = curcpu();
+ uint64_t sie;
+
+ /* disable here is only to keep hardware in sync with ci->ci_cpl */
+ sie = disable_interrupts();
+ ci->ci_cpl = new;
+
+ /* higher values are higher priority */
+ plic_set_threshold(ci->ci_cpuid, new);
+
+ restore_interrupts(sie);
+}
+
+ /*
+ * update the max/min priority for an interrupt src,
+ * and enforce the updated priority to plic.
+ * this should be called whenever a new handler is attached.
+ */
+void
+plic_calc_mask(void)
+{
+ struct cpu_info *ci = curcpu();
+ struct plic_softc *sc = plic;
+ struct plic_intrhand *ih;
+ int irq;
+
+ /* PLIC irq 0 is reserved, thus we start from 1 */
+ for (irq = 1; irq <= sc->sc_ndev; irq++) {
+ int max = IPL_NONE;
+ int min = IPL_HIGH;
+ TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
+ if (ih->ih_ipl > max)
+ max = ih->ih_ipl;
+
+ if (ih->ih_ipl < min)
+ min = ih->ih_ipl;
+ }
+
+ if (max == IPL_NONE)
+ min = IPL_NONE;
+
+ if (sc->sc_isrcs[irq].is_irq_max == max &&
+ sc->sc_isrcs[irq].is_irq_min == min)
+ continue;
+
+ sc->sc_isrcs[irq].is_irq_max = max;
+ sc->sc_isrcs[irq].is_irq_min = min;
+
+ /* Enable interrupts at lower levels, clear -> enable */
+ /* Set interrupt priority/enable */
+ if (min != IPL_NONE) {
+ plic_intr_enable_with_pri(irq, min, ci->ci_cpuid);
+ } else {
+ plic_intr_disable(irq, ci->ci_cpuid);
+ }
+ }
+
+ plic_setipl(ci->ci_cpl);
+}
+
+/***************** helper functions *****************/
+
+/*
+ * OpenBSD saves cpu node info in ci struct, so we can search
+ * cpuid by node matching
+ */
+int
+plic_get_cpuid(int intc)
+{
+ uint32_t hart;
+ int parent_node;
+ struct cpu_info *ci;
+ CPU_INFO_ITERATOR cii;
+
+ /* Check the interrupt controller layout. */
+ if (OF_getpropintarray(intc, "#interrupt-cells", &hart,
+ sizeof(hart)) < 0) {
+ printf(": could not find #interrupt-cells for phandle %u\n", intc);
+ return (-1);
+ }
+
+ /*
+ * The parent of the interrupt-controller is the CPU we are
+ * interested in, so search for its OF node index.
+ */
+ parent_node = OF_parent(intc);
+ CPU_INFO_FOREACH(cii, ci) {
+ if(ci->ci_node == parent_node)
+ return ci->ci_cpuid;
+ }
+ return -1;
+}
+
+/* update priority for intr src 'irq' */
+void
+plic_set_priority(int irq, uint32_t pri)
+{
+ struct plic_softc *sc = plic;
+ uint32_t prival;
+
+ /*
+ * sifive plic only has 0 - 7 priority levels, yet OpenBSD defines
+ * 0 - 12 priority levels(level 1 - 4 are for SOFT*, level 12
+ * is for IPI. They should NEVER be passed to plic.
+ * So we calculate plic priority in the following way:
+ */
+ if(pri <= 4 || pri >= 12)//invalid input
+ prival = 0;//effectively disable this intr source
+ else
+ prival = pri - 4;
+
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh,
+ PLIC_PRIORITY(irq), prival);
+}
+
+/* update threshold for 'cpu' */
+void
+plic_set_threshold(int cpu, uint32_t threshold)
+{
+ struct plic_softc *sc = plic;
+ uint32_t prival;
+
+ if (threshold < 4) // enable everything (as far as plic is concerned)
+ prival = 0;
+ else if (threshold >= 12) // invalid priority level ?
+ prival = IPL_HIGH - 4; // XXX Device-specific high threshold
+ else // everything else
+ prival = threshold - 4; // XXX Device-specific threshold offset
+
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh,
+ PLIC_THRESHOLD(sc, cpu), prival);
+}
+
+/*
+ * turns on/off the route from intr source 'irq'
+ * to context 'ci' based on 'enable'
+ */
+void
+plic_intr_route_grid(int irq, int enable, int cpu)
+{
+ struct plic_softc *sc = plic;
+ uint32_t val, mask;
+
+ if(irq == 0)
+ return;
+
+ KASSERT(cpu < MAXCPUS);
+
+ mask = (1 << (irq % 32));
+ val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
+ PLIC_ENABLE(sc, irq, cpu));
+ if (enable == IRQ_ENABLE)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh,
+ PLIC_ENABLE(sc, irq, cpu), val);
+}
+
+/*
+ * Enable intr src 'irq' to cpu 'cpu' by setting:
+ * - priority
+ * - threshold
+ * - enable bit
+ */
+void
+plic_intr_enable_with_pri(int irq, uint32_t min_pri, int cpu)
+{
+ plic_set_priority(irq, min_pri);
+ plic_set_threshold(cpu, min_pri-1);
+ plic_intr_route_grid(irq, IRQ_ENABLE, cpu);
+}
+
+void
+plic_intr_disable(int irq, int cpu)
+{
+ plic_set_priority(irq, 0);
+ plic_set_threshold(cpu, IPL_HIGH);
+ plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
+}
+/***************** end of helper functions *****************/
--- /dev/null
+/*
+ * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com>
+ * Copyright (c) 2020, Brian Bamsch <bbamsch@google.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RISCV_PLIC_H_
+#define _RISCV_PLIC_H_
+
+#ifndef _LOCORE
+
+void plic_splx(int);
+int plic_spllower(int);
+int plic_splraise(int);
+void plic_setipl(int);
+
+void *plic_intr_establish(int, int, int (*)(void *),
+ void *, char *);
+void *plic_intr_establish_fdt(void *, int *, int,
+ int (*)(void *), void *, char *);
+void plic_intr_disestablish(void *);
+
+#endif /* ! _LOCORE */
+
+#endif /* _RISCV_PLIC_H_*/
--- /dev/null
+/*
+ * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/evcount.h>
+
+#include <machine/bus.h>
+#include <machine/fdt.h>
+#include <machine/riscvreg.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/fdt.h>
+
+#include "riscv64/dev/plic.h"
+#include "riscv_cpu_intc.h"
+
+struct intrhand {
+ int (*ih_func)(void *); /* handler */
+ void *ih_arg; /* arg for handler */
+ int ih_irq; /* IRQ number */
+ char *ih_name;
+};
+
+struct intrhand* intc_handler[INTC_NIRQS] = {NULL};
+struct interrupt_controller intc_ic;
+
+int riscv_intc_match(struct device *, void *, void *);
+void riscv_intc_attach(struct device *, struct device *, void *);
+
+void riscv_intc_irq_handler(void *);
+void *riscv_intc_intr_establish(int, int, int (*)(void *),
+ void *, char *);
+void riscv_intc_intr_disestablish(void *);
+
+
+struct cfattach intc_ca = {
+ sizeof (struct device), riscv_intc_match, riscv_intc_attach
+};
+
+struct cfdriver intc_cd = {
+ NULL, "rv_cpu_intc", DV_DULL
+};
+
+int
+riscv_intc_match(struct device *parent, void *match, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+ int node = faa->fa_node;
+ return (OF_getproplen(node, "interrupt-controller") >= 0 &&
+ OF_is_compatible(node, "riscv,cpu-intc"));
+}
+
+void
+riscv_intc_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct fdt_attach_args *faa = aux;/* should only use fa_node field */
+
+ riscv_init_smask();
+
+ /* hook the intr_handler */
+ riscv_set_intr_handler(riscv_intc_irq_handler);
+
+ intc_ic.ic_node = faa->fa_node;
+ intc_ic.ic_cookie = &intc_ic;
+
+ /*
+ * only allow install/uninstall handler to/from global vector
+ * by calling riscv_intc_intr_establish/disestablish
+ */
+ intc_ic.ic_establish = NULL;
+ intc_ic.ic_disestablish = NULL;
+
+ riscv_intr_register_fdt(&intc_ic);
+
+ /*
+ * XXX right time to enable interrupts ??
+ * might need to postpone untile autoconf is finished
+ */
+ enable_interrupts();
+}
+
+
+/* global interrupt handler */
+void
+riscv_intc_irq_handler(void *frame)
+{
+ int irq;
+ struct intrhand *ih;
+ struct trapframe *_frame;
+ _frame = (struct trapframe*) frame;
+
+ KASSERTMSG(_frame->tf_scause & EXCP_INTR,
+ "riscv_cpu_intr: wrong frame passed");
+
+ irq = (_frame->tf_scause & EXCP_MASK);
+#ifdef DEBUG_INTC
+ printf("irq %d fired\n", irq);
+#endif
+
+ ih = intc_handler[irq];
+ if (ih->ih_func(frame) == 0)
+#ifdef DEBUG_INTC
+ printf("fail in handleing irq %d %s\n", irq, ih->ih_name);
+#else
+ ;
+#endif /* DEBUG_INTC */
+}
+
+void *
+riscv_intc_intr_establish(int irqno, int dummy_level, int (*func)(void *),
+ void *arg, char *name)
+{
+ int sie;
+ struct intrhand *ih;
+
+ if (irqno < 0 || irqno >= INTC_NIRQS)
+ panic("intc_intr_establish: bogus irqnumber %d: %s",
+ irqno, name);
+ sie = disable_interrupts();
+
+ ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
+ ih->ih_func = func;
+ ih->ih_arg = arg;
+ ih->ih_irq = irqno;
+ ih->ih_name = name;
+
+ intc_handler[irqno] = ih;
+#ifdef DEBUG_INTC
+ printf("\nintc_intr_establish irq %d [%s]\n", irqno, name);
+#endif
+ restore_interrupts(sie);
+ return (ih);
+}
+
+void
+riscv_intc_intr_disestablish(void *cookie)
+{
+ int sie;
+ struct intrhand *ih = cookie;
+ int irqno = ih->ih_irq;
+ sie = disable_interrupts();
+
+ intc_handler[irqno] = NULL;
+ free(ih, M_DEVBUF, 0);
+
+ restore_interrupts(sie);
+}
--- /dev/null
+/*
+ * Copyright (c) 2020 Mars Li <mengshi.li.mars@gmai..com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RISCV_CPU_INTC_H_
+#define _RISCV_CPU_INTC_H_
+
+void *riscv_intc_intr_establish(int, int, int (*func)(void *),
+ void *, char *);
+void *riscv_intc_intr_establish_fdt(void *, int *, int, int (*)(void *),
+ void *, char *);
+void riscv_intc_intr_disestablish(void *cookie);
+
+#endif /* _RISCV_CPU_INTC_H_ */
+
--- /dev/null
+/*
+ * Copyright (c) 2016 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/fdt.h>
+
+#include <riscv64/fdt.h>
+#include <riscv64/dev/simplebusvar.h>
+
+int simplebus_match(struct device *, void *, void *);
+void simplebus_attach(struct device *, struct device *, void *);
+
+void simplebus_attach_node(struct device *, int);
+int simplebus_bs_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
+ bus_space_handle_t *);
+int simplebus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int, paddr_t *, int *, int);
+
+struct cfattach simplebus_ca = {
+ sizeof(struct simplebus_softc), simplebus_match, simplebus_attach
+};
+
+struct cfdriver simplebus_cd = {
+ NULL, "simplebus", DV_DULL
+};
+
+/*
+ * Simplebus is a generic bus with no special casings.
+ */
+int
+simplebus_match(struct device *parent, void *cfdata, void *aux)
+{
+ struct fdt_attach_args *fa = (struct fdt_attach_args *)aux;
+
+ if (fa->fa_node == 0)
+ return (0);
+
+ if (!OF_is_compatible(fa->fa_node, "simple-bus"))
+ return (0);
+
+ return (1);
+}
+
+void
+simplebus_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct simplebus_softc *sc = (struct simplebus_softc *)self;
+ struct fdt_attach_args *fa = (struct fdt_attach_args *)aux;
+ char name[32];
+ int node;
+
+ sc->sc_node = fa->fa_node;
+ sc->sc_iot = fa->fa_iot;
+ sc->sc_dmat = fa->fa_dmat;
+ sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
+ fa->fa_acells);
+ sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
+ fa->fa_scells);
+ sc->sc_pacells = fa->fa_acells;
+ sc->sc_pscells = fa->fa_scells;
+
+ if (OF_getprop(sc->sc_node, "name", name, sizeof(name)) > 0) {
+ name[sizeof(name) - 1] = 0;
+ printf(": \"%s\"", name);
+ }
+
+ printf("\n");
+
+ memcpy(&sc->sc_bus, sc->sc_iot, sizeof(sc->sc_bus));
+ sc->sc_bus.bus_private = sc;
+ sc->sc_bus._space_map = simplebus_bs_map;
+
+ sc->sc_rangeslen = OF_getproplen(sc->sc_node, "ranges");
+ if (sc->sc_rangeslen > 0 &&
+ (sc->sc_rangeslen % sizeof(uint32_t)) == 0) {
+ sc->sc_ranges = malloc(sc->sc_rangeslen, M_TEMP, M_WAITOK);
+ OF_getpropintarray(sc->sc_node, "ranges", sc->sc_ranges,
+ sc->sc_rangeslen);
+ }
+
+ memcpy(&sc->sc_dma, sc->sc_dmat, sizeof(sc->sc_dma));
+ sc->sc_dma._dmamap_load_buffer = simplebus_dmamap_load_buffer;
+ sc->sc_dma._cookie = sc;
+
+ sc->sc_dmarangeslen = OF_getproplen(sc->sc_node, "dma-ranges");
+ if (sc->sc_dmarangeslen > 0 &&
+ (sc->sc_dmarangeslen % sizeof(uint32_t)) == 0) {
+ sc->sc_dmaranges = malloc(sc->sc_dmarangeslen,
+ M_TEMP, M_WAITOK);
+ OF_getpropintarray(sc->sc_node, "dma-ranges",
+ sc->sc_dmaranges, sc->sc_dmarangeslen);
+ }
+
+ /* Scan the whole tree. */
+ sc->sc_early = 1;
+ for (node = OF_child(sc->sc_node); node; node = OF_peer(node))
+ simplebus_attach_node(self, node);
+
+ sc->sc_early = 0;
+ for (node = OF_child(sc->sc_node); node; node = OF_peer(node))
+ simplebus_attach_node(self, node);
+}
+
+int
+simplebus_submatch(struct device *self, void *match, void *aux)
+{
+ struct simplebus_softc *sc = (struct simplebus_softc *)self;
+ struct cfdata *cf = match;
+
+ if (cf->cf_loc[0] == sc->sc_early)
+ return (*cf->cf_attach->ca_match)(self, match, aux);
+ return 0;
+}
+
+int
+simplebus_print(void *aux, const char *pnp)
+{
+ struct fdt_attach_args *fa = aux;
+ char name[32];
+
+ if (!pnp)
+ return (QUIET);
+
+ if (OF_getprop(fa->fa_node, "name", name, sizeof(name)) > 0) {
+ name[sizeof(name) - 1] = 0;
+ printf("\"%s\"", name);
+ } else
+ printf("node %u", fa->fa_node);
+
+ printf(" at %s", pnp);
+
+ return (UNCONF);
+}
+
+/*
+ * Look for a driver that wants to be attached to this node.
+ */
+void
+simplebus_attach_node(struct device *self, int node)
+{
+ struct simplebus_softc *sc = (struct simplebus_softc *)self;
+ struct fdt_attach_args fa;
+ char buf[32];
+ int i, len, line;
+ uint32_t *cell, *reg;
+ struct device *child;
+
+ if (OF_getproplen(node, "compatible") <= 0)
+ return;
+
+ if (OF_getprop(node, "status", buf, sizeof(buf)) > 0 &&
+ strcmp(buf, "disabled") == 0)
+ return;
+
+ /* Skip if already attached early. */
+ for (i = 0; i < nitems(sc->sc_early_nodes); i++) {
+ if (sc->sc_early_nodes[i] == node)
+ return;
+ if (sc->sc_early_nodes[i] == 0)
+ break;
+ }
+
+ memset(&fa, 0, sizeof(fa));
+ fa.fa_name = "";
+ fa.fa_node = node;
+ fa.fa_iot = &sc->sc_bus;
+ fa.fa_dmat = &sc->sc_dma;
+ fa.fa_acells = sc->sc_acells;
+ fa.fa_scells = sc->sc_scells;
+
+ len = OF_getproplen(node, "reg");
+ line = (sc->sc_acells + sc->sc_scells) * sizeof(uint32_t);
+ if (len > 0 && line > 0 && (len % line) == 0) {
+ reg = malloc(len, M_TEMP, M_WAITOK);
+ OF_getpropintarray(node, "reg", reg, len);
+
+ fa.fa_reg = malloc((len / line) * sizeof(struct fdt_reg),
+ M_DEVBUF, M_WAITOK | M_ZERO);
+ fa.fa_nreg = (len / line);
+
+ for (i = 0, cell = reg; i < len / line; i++) {
+ if (sc->sc_acells >= 1)
+ fa.fa_reg[i].addr = cell[0];
+ if (sc->sc_acells == 2) {
+ fa.fa_reg[i].addr <<= 32;
+ fa.fa_reg[i].addr |= cell[1];
+ }
+ cell += sc->sc_acells;
+ if (sc->sc_scells >= 1)
+ fa.fa_reg[i].size = cell[0];
+ if (sc->sc_scells == 2) {
+ fa.fa_reg[i].size <<= 32;
+ fa.fa_reg[i].size |= cell[1];
+ }
+ cell += sc->sc_scells;
+ }
+
+ free(reg, M_TEMP, len);
+ }
+
+ len = OF_getproplen(node, "interrupts");
+ if (len > 0 && (len % sizeof(uint32_t)) == 0) {
+ fa.fa_intr = malloc(len, M_DEVBUF, M_WAITOK);
+ fa.fa_nintr = len / sizeof(uint32_t);
+
+ OF_getpropintarray(node, "interrupts", fa.fa_intr, len);
+ }
+
+ if (OF_getproplen(node, "dma-coherent") >= 0) {
+ fa.fa_dmat = malloc(sizeof(sc->sc_dma),
+ M_DEVBUF, M_WAITOK | M_ZERO);
+ memcpy(fa.fa_dmat, &sc->sc_dma, sizeof(sc->sc_dma));
+ fa.fa_dmat->_flags |= BUS_DMA_COHERENT;
+ }
+
+#ifdef DEBUG_AUTOCONF
+ if (OF_getprop(fa.fa_node, "name", buf, sizeof(buf)) > 0)
+ printf("\ncurrent parent: %s, current node: %d-%s\n", self->dv_xname, fa.fa_node, buf);
+#endif
+
+ child = config_found_sm(self, &fa, sc->sc_early ? NULL :
+ simplebus_print, simplebus_submatch);
+
+ /* Record nodes that we attach early. */
+ if (child && sc->sc_early) {
+ for (i = 0; i < nitems(sc->sc_early_nodes); i++) {
+ if (sc->sc_early_nodes[i] != 0)
+ continue;
+ sc->sc_early_nodes[i] = node;
+ break;
+ }
+ }
+
+ free(fa.fa_reg, M_DEVBUF, fa.fa_nreg * sizeof(struct fdt_reg));
+ free(fa.fa_intr, M_DEVBUF, fa.fa_nintr * sizeof(uint32_t));
+}
+
+/*
+ * Translate memory address if needed.
+ */
+int
+simplebus_bs_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
+ int flag, bus_space_handle_t *bshp)
+{
+ struct simplebus_softc *sc = t->bus_private;
+ uint64_t addr, rfrom, rto, rsize;
+ uint32_t *range;
+ int parent, rlen, rone;
+
+ addr = bpa;
+ parent = OF_parent(sc->sc_node);
+ if (parent == 0)
+ return bus_space_map(sc->sc_iot, addr, size, flag, bshp);
+
+ if (sc->sc_rangeslen < 0)
+ return EINVAL;
+ if (sc->sc_rangeslen == 0)
+ return bus_space_map(sc->sc_iot, addr, size, flag, bshp);
+
+ rlen = sc->sc_rangeslen / sizeof(uint32_t);
+ rone = sc->sc_pacells + sc->sc_acells + sc->sc_scells;
+
+ /* For each range. */
+ for (range = sc->sc_ranges; rlen >= rone; rlen -= rone, range += rone) {
+ /* Extract from and size, so we can see if we fit. */
+ rfrom = range[0];
+ if (sc->sc_acells == 2)
+ rfrom = (rfrom << 32) + range[1];
+ rsize = range[sc->sc_acells + sc->sc_pacells];
+ if (sc->sc_scells == 2)
+ rsize = (rsize << 32) +
+ range[sc->sc_acells + sc->sc_pacells + 1];
+
+ /* Try next, if we're not in the range. */
+ if (addr < rfrom || (addr + size) > (rfrom + rsize))
+ continue;
+
+ /* All good, extract to address and translate. */
+ rto = range[sc->sc_acells];
+ if (sc->sc_pacells == 2)
+ rto = (rto << 32) + range[sc->sc_acells + 1];
+
+ addr -= rfrom;
+ addr += rto;
+
+ return bus_space_map(sc->sc_iot, addr, size, flag, bshp);
+ }
+
+ return ESRCH;
+}
+
+int
+simplebus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
+ int *segp, int first)
+{
+ struct simplebus_softc *sc = t->_cookie;
+ int rlen, rone, seg;
+ int firstseg = *segp;
+ int error;
+
+ error = sc->sc_dmat->_dmamap_load_buffer(sc->sc_dmat, map, buf, buflen,
+ p, flags, lastaddrp, segp, first);
+ if (error)
+ return error;
+
+ if (sc->sc_dmaranges == NULL)
+ return 0;
+
+ rlen = sc->sc_dmarangeslen / sizeof(uint32_t);
+ rone = sc->sc_pacells + sc->sc_acells + sc->sc_scells;
+
+ /* For each segment. */
+ for (seg = firstseg; seg <= *segp; seg++) {
+ uint64_t addr, size, rfrom, rto, rsize;
+ uint32_t *range;
+
+ addr = map->dm_segs[seg].ds_addr;
+ size = map->dm_segs[seg].ds_len;
+
+ /* For each range. */
+ for (range = sc->sc_dmaranges; rlen >= rone;
+ rlen -= rone, range += rone) {
+ /* Extract from and size, so we can see if we fit. */
+ rfrom = range[sc->sc_acells];
+ if (sc->sc_pacells == 2)
+ rfrom = (rfrom << 32) + range[sc->sc_acells + 1];
+
+ rsize = range[sc->sc_acells + sc->sc_pacells];
+ if (sc->sc_scells == 2)
+ rsize = (rsize << 32) +
+ range[sc->sc_acells + sc->sc_pacells + 1];
+
+ /* Try next, if we're not in the range. */
+ if (addr < rfrom || (addr + size) > (rfrom + rsize))
+ continue;
+
+ /* All good, extract to address and translate. */
+ rto = range[0];
+ if (sc->sc_acells == 2)
+ rto = (rto << 32) + range[1];
+
+ map->dm_segs[seg].ds_addr -= rfrom;
+ map->dm_segs[seg].ds_addr += rto;
+ break;
+ }
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct simplebus_softc {
+ struct device sc_dev;
+ int sc_node;
+ bus_space_tag_t sc_iot;
+ bus_dma_tag_t sc_dmat;
+ int sc_acells;
+ int sc_scells;
+ int sc_pacells;
+ int sc_pscells;
+ struct bus_space sc_bus;
+ struct machine_bus_dma_tag sc_dma;
+ int *sc_ranges;
+ int sc_rangeslen;
+ int *sc_dmaranges;
+ int sc_dmarangeslen;
+ int sc_early;
+ int sc_early_nodes[64];
+};
+
+extern void simplebus_attach(struct device *, struct device *, void *);
--- /dev/null
+/*-
+ * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * RISC-V Timer
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/timetc.h>
+
+#include <machine/intr.h>
+#include <machine/bus.h>
+#include <machine/cpufunc.h>
+#include <machine/fdt.h>
+#include <machine/sbi.h>
+
+#include "riscv_cpu_intc.h"
+
+#include <dev/ofw/fdt.h>
+#include <dev/ofw/openfirm.h>
+
+
+#define TIMER_COUNTS 0x00
+#define TIMER_MTIMECMP(cpu) (cpu * 8)
+#define TIMER_FREQUENCY 10 * 1000 * 1000 /* RISC-V time clock */
+
+unsigned riscv_timer_get_timecount(struct timecounter *);
+
+static struct timecounter riscv_timer_timecount = {
+ .tc_name = "RISC-V Timecounter",
+ .tc_get_timecount = riscv_timer_get_timecount,
+ .tc_poll_pps = NULL,
+ .tc_counter_mask = ~0u,
+ .tc_frequency = 0,
+ .tc_quality = 1000,
+ .tc_priv = NULL,
+};
+
+struct riscv_timer_pcpu_softc {
+ uint64_t pc_nexttickevent;
+ uint64_t pc_nextstatevent;
+ u_int32_t pc_ticks_err_sum;
+};
+
+struct riscv_timer_softc {
+ struct device sc_dev;
+ int sc_node;
+
+ struct riscv_timer_pcpu_softc sc_pstat[MAXCPUS];
+
+ u_int32_t sc_ticks_err_cnt;
+ u_int32_t sc_ticks_per_second; // sc_clkfreq
+ u_int32_t sc_ticks_per_intr;
+ u_int32_t sc_statvar;
+ u_int32_t sc_statmin;
+
+ void *sc_ih;
+};
+
+static struct riscv_timer_softc *riscv_timer_sc = NULL;
+
+int riscv_timer_get_freq();
+int riscv_timer_match(struct device *, void *, void *);
+void riscv_timer_attach(struct device *, struct device *, void *);
+int riscv_timer_intr(void *);
+void riscv_timer_cpu_initclocks();
+void riscv_timer_delay(u_int);
+void riscv_timer_setstatclockrate(int);
+void riscv_timer_startclock();
+
+struct cfattach timer_ca = {
+ sizeof (struct riscv_timer_softc), riscv_timer_match,
+ riscv_timer_attach
+};
+
+struct cfdriver timer_cd = {
+ NULL, "riscv_timer", DV_DULL
+};
+
+static inline uint64_t
+get_cycles()
+{
+ return (rdtime());
+}
+
+long
+get_counts(struct riscv_timer_softc *sc)
+{
+ uint64_t counts;
+
+ counts = get_cycles();
+
+ return (counts);
+}
+
+unsigned
+riscv_timer_get_timecount(struct timecounter *tc)
+{
+ struct riscv_timer_softc *sc;
+
+ sc = tc->tc_priv;
+
+ return (get_counts(sc));
+}
+
+int
+riscv_timer_get_freq()
+{
+ int node, len;
+
+ node = OF_finddevice("/cpus");
+ if (node == -1) {
+ printf("Can't find cpus node.\n");
+ return (0);
+ }
+
+ len = OF_getproplen(node, "timebase-frequency");
+ if (len != 4) {
+ printf("Can't find timebase-frequency property.\n");
+ return (0);
+ }
+
+ return OF_getpropint(node, "timebase-frequency", 0);
+}
+
+int
+riscv_timer_match(struct device *parent, void *cfdata, void *aux)
+{
+ if (riscv_timer_sc) //already attached
+ return 0;
+
+ int node;
+ // struct fdt_attach_args *fa = (struct fdt_attach_args *)aux;
+
+ /*
+ * return 1 if:
+ * we can find valid "timebase-frequency" property from cpus
+ */
+ if ( (node = OF_finddevice("/cpus")) == 0)
+ return 0;
+
+ return (OF_getproplen(node, "timebase-frequency") == 4);//32bit uint
+}
+
+void
+riscv_timer_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct riscv_timer_softc *sc = (struct riscv_timer_softc *)self;
+
+ if (riscv_timer_sc)/* already attached */
+ return;
+
+ sc->sc_ticks_per_second = riscv_timer_get_freq();
+ if (sc->sc_ticks_per_second == 0) {
+ printf("Failed to resolve RISC-V Timer timebase\n");
+ return;
+ }
+ printf(": tick rate %d KHz\n", sc->sc_ticks_per_second/1000);
+
+ riscv_timer_sc = sc;
+ stathz = 0;
+
+ riscv_clock_register(riscv_timer_cpu_initclocks, riscv_timer_delay,
+ riscv_timer_setstatclockrate, riscv_timer_startclock);
+
+ riscv_timer_timecount.tc_frequency = sc->sc_ticks_per_second;
+ riscv_timer_timecount.tc_priv = sc;
+
+ tc_init(&riscv_timer_timecount);
+}
+
+
+int timer_mindelta = 0; /* what should this be? */
+int
+riscv_timer_intr(void *frame)
+{
+ struct riscv_timer_softc *sc;
+ uint64_t next, now, newnow;
+ int timermissed = 0;
+ u_int new_hz = 100;
+ int s;
+
+#ifdef DEBUG_TIMER
+ printf("RISC-V Timer Interrupt\n");
+#endif
+
+ sc = riscv_timer_sc;
+
+ s = splclock();
+
+ if (s < IPL_CLOCK)
+ hardclock(frame);
+
+ // XXX should base timer interval from the expected
+ // time of expiration, not 'now'
+ now = get_cycles();
+ next = now + ((sc->sc_ticks_per_second / new_hz));
+
+ do {
+ newnow = get_cycles();
+ if (next < (newnow + timer_mindelta)) {
+ /* slowly scale up miss timer. */
+ if (timermissed > 1)
+ timer_mindelta ++;
+ }
+ next = newnow + timer_mindelta;
+ sbi_set_timer(next);
+ csr_set(sip, SIE_STIE);
+
+ /* re-read current time to verif
+ * time hasn't been set into the past
+ */
+
+ newnow = get_cycles();
+ /* if we missed more than once, increment the min period */
+ timermissed++;
+ } while (next <= newnow);
+
+ splx(s);
+ return (1); // Handled
+}
+
+void
+riscv_timer_cpu_initclocks()
+{
+ struct riscv_timer_softc *sc = timer_cd.cd_devs[0];
+ struct riscv_timer_pcpu_softc *pc =
+ &sc->sc_pstat[CPU_INFO_UNIT(curcpu())];
+ uint64_t next;
+
+ stathz = hz;
+ profhz = hz * 10;
+
+ riscv_timer_setstatclockrate(stathz);
+
+ sc->sc_ticks_per_intr = sc->sc_ticks_per_second / hz;
+ sc->sc_ticks_err_cnt = sc->sc_ticks_per_second % hz;
+ pc->pc_ticks_err_sum = 0;
+
+ /* configure virtual timer interupt */
+ sc->sc_ih = riscv_intc_intr_establish(IRQ_TIMER_SUPERVISOR, 0,
+ riscv_timer_intr, NULL, "riscv_timer");
+
+ next = get_cycles() + sc->sc_ticks_per_intr;
+ pc->pc_nexttickevent = pc->pc_nextstatevent = next;
+
+ sbi_set_timer(next);
+ csr_set(sie, SIE_STIE);
+}
+
+void
+riscv_timer_delay(u_int usec)
+{
+ int64_t counts, counts_per_usec;
+ uint64_t first, last;
+
+ /*
+ * Check the timers are setup, if not just
+ * use a for loop for the meantime
+ */
+ if (riscv_timer_sc == NULL) {
+ for (; usec > 0; usec--)
+ for (counts = 200; counts > 0; counts--)
+ /*
+ * Prevent the compiler from optimizing
+ * out the loop
+ */
+ cpufunc_nullop();
+ return;
+ }
+
+ /* Get the number of times to count */
+ counts_per_usec = ((riscv_timer_timecount.tc_frequency / 1000000) + 1);
+
+ /*
+ * Clamp the timeout at a maximum value (about 32 seconds with
+ * a 66MHz clock). *Nobody* should be delay()ing for anywhere
+ * near that length of time and if they are, they should be hung
+ * out to dry.
+ */
+ if (usec >= (0x80000000U / counts_per_usec))
+ counts = (0x80000000U / counts_per_usec) - 1;
+ else
+ counts = usec * counts_per_usec;
+
+ first = get_counts(riscv_timer_sc);
+
+ while (counts > 0) {
+ last = get_counts(riscv_timer_sc);
+ counts -= (int64_t)(last - first);
+ first = last;
+ }
+}
+
+void
+riscv_timer_setstatclockrate(int newhz)
+{
+ /* dummy: clockrate on riscv is fixed*/
+}
+
+/* is only called from secondary cpu */
+void
+riscv_timer_startclock()
+{
+ struct riscv_timer_softc *sc = timer_cd.cd_devs[0];
+ struct riscv_timer_pcpu_softc *pc =
+ &sc->sc_pstat[CPU_INFO_UNIT(curcpu())];
+ uint64_t nextevent;
+
+ nextevent = get_cycles() + sc->sc_ticks_per_intr;
+ pc->pc_nexttickevent = pc->pc_nextstatevent = nextevent;
+
+ riscv_intr_route(sc->sc_ih, 1, curcpu());
+
+ sbi_set_timer(nextevent);
+ csr_set(sie, SIE_STIE);
+}
+
+/*
+ * called at early mainbus_attach, to provide delay func
+ * before timer and interrupt is ready
+ */
+void
+riscv_timer_init(void)
+{
+ uint64_t cntfrq = 0;
+
+ cntfrq = riscv_timer_get_freq();
+
+ if (cntfrq != 0) {
+ riscv_clock_register(NULL, riscv_timer_delay, NULL, NULL);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2020 Mars Li <mengshi.li.mars@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MACHINE_TIMER_H_
+#define _MACHINE_TIMER_H_
+
+int riscv_timer_match(struct device *, void *, void *);
+
+#endif /* _MACHINE_TIMER_H_ */
--- /dev/null
+/* $OpenBSD: _float.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE__FLOAT_H_
+#define _MACHINE__FLOAT_H_
+
+#define __FLT_RADIX 2 /* b */
+#define __FLT_ROUNDS __flt_rounds()
+#define __FLT_EVAL_METHOD 0 /* no promotions */
+
+#define __FLT_MANT_DIG 24 /* p */
+#define __FLT_EPSILON 1.19209290E-7F /* b**(1-p) */
+#define __FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define __FLT_MIN_EXP (-125) /* emin */
+#define __FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define __FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define __FLT_MAX_EXP 128 /* emax */
+#define __FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define __FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define __DBL_MANT_DIG 53
+#define __DBL_EPSILON 2.2204460492503131E-16
+#define __DBL_DIG 15
+#define __DBL_MIN_EXP (-1021)
+#define __DBL_MIN 2.2250738585072014E-308
+#define __DBL_MIN_10_EXP (-307)
+#define __DBL_MAX_EXP 1024
+#define __DBL_MAX 1.7976931348623157E+308
+#define __DBL_MAX_10_EXP 308
+
+#ifdef __LDBL_MANT_DIG__
+#define __LDBL_MANT_DIG __LDBL_MANT_DIG__
+#define __LDBL_EPSILON __LDBL_EPSILON__
+#define __LDBL_DIG __LDBL_DIG__
+#define __LDBL_MIN_EXP __LDBL_MIN_EXP__
+#define __LDBL_MIN __LDBL_MIN__
+#define __LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+#define __LDBL_MAX_EXP __LDBL_MAX_EXP__
+#define __LDBL_MAX __LDBL_MAX__
+#define __LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+#else
+#define __LDBL_MANT_DIG DBL_MANT_DIG
+#define __LDBL_EPSILON DBL_EPSILON
+#define __LDBL_DIG DBL_DIG
+#define __LDBL_MIN_EXP DBL_MIN_EXP
+#define __LDBL_MIN DBL_MIN
+#define __LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define __LDBL_MAX_EXP DBL_MAX_EXP
+#define __LDBL_MAX DBL_MAX
+#define __LDBL_MAX_10_EXP DBL_MAX_10_EXP
+#endif
+
+#define __DECIMAL_DIG 17
+
+#endif /* _MACHINE__FLOAT_H_ */
--- /dev/null
+/* $OpenBSD: _types.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)types.h 8.3 (Berkeley) 1/5/94
+ * @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ */
+
+#ifndef _MACHINE__TYPES_H_
+#define _MACHINE__TYPES_H_
+
+#if defined(_KERNEL)
+typedef struct label_t {
+ long val[13];
+} label_t;
+#endif
+
+/*
+ * _ALIGN(p) rounds p (pointer or byte index) up to a correctly-aligned
+ * value for all data types (int, long, ...). The result is an
+ * unsigned long and must be cast to any desired pointer type.
+ *
+ * _ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits).
+ */
+#define _ALIGNBYTES (sizeof(long) - 1)
+#define _STACKALIGNBYTES 15
+#define _ALIGN(p) (((unsigned long)(p) + _ALIGNBYTES) & ~_ALIGNBYTES)
+#define _ALIGNED_POINTER(p,t) ((((unsigned long)(p)) & (sizeof(t) - 1)) == 0)
+#define _MAX_PAGE_SHIFT 12 /* same as PAGE_SHIFT */
+
+/* 7.18.1.1 Exact-width integer types */
+typedef signed char __int8_t;
+typedef unsigned char __uint8_t;
+typedef short __int16_t;
+typedef unsigned short __uint16_t;
+typedef int __int32_t;
+typedef unsigned int __uint32_t;
+/* LONGLONG */
+typedef long long __int64_t;
+/* LONGLONG */
+typedef unsigned long long __uint64_t;
+
+/* 7.18.1.2 Minimum-width integer types */
+typedef __int8_t __int_least8_t;
+typedef __uint8_t __uint_least8_t;
+typedef __int16_t __int_least16_t;
+typedef __uint16_t __uint_least16_t;
+typedef __int32_t __int_least32_t;
+typedef __uint32_t __uint_least32_t;
+typedef __int64_t __int_least64_t;
+typedef __uint64_t __uint_least64_t;
+
+/* 7.18.1.3 Fastest minimum-width integer types */
+typedef __int32_t __int_fast8_t;
+typedef __uint32_t __uint_fast8_t;
+typedef __int32_t __int_fast16_t;
+typedef __uint32_t __uint_fast16_t;
+typedef __int32_t __int_fast32_t;
+typedef __uint32_t __uint_fast32_t;
+typedef __int64_t __int_fast64_t;
+typedef __uint64_t __uint_fast64_t;
+#define __INT_FAST8_MIN INT32_MIN
+#define __INT_FAST16_MIN INT32_MIN
+#define __INT_FAST32_MIN INT32_MIN
+#define __INT_FAST64_MIN INT64_MIN
+#define __INT_FAST8_MAX INT32_MAX
+#define __INT_FAST16_MAX INT32_MAX
+#define __INT_FAST32_MAX INT32_MAX
+#define __INT_FAST64_MAX INT64_MAX
+#define __UINT_FAST8_MAX UINT32_MAX
+#define __UINT_FAST16_MAX UINT32_MAX
+#define __UINT_FAST32_MAX UINT32_MAX
+#define __UINT_FAST64_MAX UINT64_MAX
+
+/* 7.18.1.4 Integer types capable of holding object pointers */
+typedef long __intptr_t;
+typedef unsigned long __uintptr_t;
+
+/* 7.18.1.5 Greatest-width integer types */
+typedef __int64_t __intmax_t;
+typedef __uint64_t __uintmax_t;
+
+/* Register size */
+typedef long __register_t;
+
+/* VM system types */
+typedef unsigned long __vaddr_t;
+typedef unsigned long __paddr_t;
+typedef unsigned long __vsize_t;
+typedef unsigned long __psize_t;
+
+/* Standard system types */
+typedef double __double_t;
+typedef float __float_t;
+typedef long __ptrdiff_t;
+typedef unsigned long __size_t;
+typedef long __ssize_t;
+#if defined(__GNUC__) && __GNUC__ >= 3
+typedef __builtin_va_list __va_list;
+#else
+typedef char * __va_list;
+#endif
+
+/* Wide character support types */
+#ifndef __cplusplus
+#ifdef __WCHAR_UNSIGNED__
+typedef unsigned int __wchar_t;
+#else
+typedef int __wchar_t;
+#endif
+#endif
+typedef int __wint_t;
+typedef int __rune_t;
+typedef void * __wctrans_t;
+typedef void * __wctype_t;
+
+#endif /* _MACHINE__TYPES_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)asm.h 5.5 (Berkeley) 5/7/91
+ */
+
+#ifndef _MACHINE_ASM_H_
+#define _MACHINE_ASM_H_
+
+#ifdef __ELF__
+# define _C_LABEL(x) x
+#else
+# ifdef __STDC__
+# define _C_LABEL(x) _ ## x
+# else
+# define _C_LABEL(x) _/**/x
+# endif
+#endif
+#define _ASM_LABEL(x) x
+
+#ifdef __STDC__
+# define __CONCAT(x,y) x ## y
+# define __STRING(x) #x
+#else
+# define __CONCAT(x,y) x/**/y
+# define __STRING(x) "x"
+#endif
+
+#ifndef _ALIGN_TEXT
+# define _ALIGN_TEXT .align 0
+#endif
+
+#if defined(PROF) || defined(GPROF)
+// XXX Profiler Support
+#define _PROF_PROLOGUE \
+ addi sp, sp, -16; \
+ sd ra, 8(sp); \
+ sd fp, 0(sp); \
+ mv fp, sp; \
+ call __mcount; \
+ ld ra, 8(sp); \
+ ld fp, 0(sp); \
+ add sp, sp, 16;
+#else
+#define _PROF_PROLOGUE
+#endif
+
+#if defined(_RET_PROTECTOR)
+// XXX Retguard Support
+#error RETGUARD not yet supported for riscv64
+#else
+#define RETGUARD_CALC_COOKIE(reg)
+#define RETGUARD_LOAD_RANDOM(x, reg)
+#define RETGUARD_SETUP(x, reg)
+#define RETGUARD_CHECK(x, reg)
+#define RETGUARD_PUSH(reg)
+#define RETGUARD_POP(reg)
+#define RETGUARD_SYMBOL(x)
+#endif
+
+#define _ENTRY(x) \
+ .text; .globl x; .type x,@function; .p2align 1; x:
+#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
+#define ENTRY_NP(y) _ENTRY(_C_LABEL(y))
+#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
+#define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y))
+#define END(y) .size y, . - y
+#define EENTRY(sym) .globl sym; sym:
+#define EEND(sym)
+
+#if defined(__ELF__) && defined(__PIC__)
+#ifdef __STDC__
+#define PIC_SYM(x,y) x ## ( ## y ## )
+#else
+#define PIC_SYM(x,y) x/**/(/**/y/**/)
+#endif
+#else
+#define PIC_SYM(x,y) x
+#endif
+
+#ifdef __ELF__
+#define STRONG_ALIAS(alias,sym) \
+ .global alias; \
+ alias = sym
+#define WEAK_ALIAS(alias,sym) \
+ .weak alias; \
+ alias = sym
+#endif
+
+#ifdef __STDC__
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg ## ,30,0,0,0 ; \
+ .stabs __STRING(_C_LABEL(sym)) ## ,1,0,0,0
+#elif defined(__ELF__)
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg,30,0,0,0 ; \
+ .stabs __STRING(sym),1,0,0,0
+#else
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg,30,0,0,0 ; \
+ .stabs __STRING(_/**/sym),1,0,0,0
+#endif /* __STDC__ */
+
+#define WEAK_REFERENCE(sym, alias) \
+ .weak alias; \
+ .set alias,sym
+
+#define SWAP_FAULT_HANDLER(handler, tmp0, tmp1) \
+ ld tmp0, CI_CURPCB(tp); /* Load the pcb */ \
+ ld tmp1, PCB_ONFAULT(tmp0); /* Save old handler */ \
+ sd handler, PCB_ONFAULT(tmp0); /* Set the handler */ \
+ mv handler, tmp1
+
+#define SET_FAULT_HANDLER(handler, pcb) \
+ ld pcb, CI_CURPCB(tp); /* Load the pcb */ \
+ sd handler, PCB_ONFAULT(pcb) /* Set the handler */
+
+#define ENTER_USER_ACCESS(tmp) \
+ li tmp, SSTATUS_SUM; \
+ csrs sstatus, tmp
+
+#define EXIT_USER_ACCESS(tmp) \
+ li tmp, SSTATUS_SUM; \
+ csrc sstatus, tmp
+
+#endif /* _MACHINE_ASM_H_ */
--- /dev/null
+/* Public Domain */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+#define __membar() do {__asm __volatile("fence" ::: "memory"); } while (0)
+
+#define membar_enter() __membar()
+#define membar_exit() __membar()
+#define membar_producer() __membar()
+#define membar_consumer() __membar()
+#define membar_sync() __membar()
+
+#if defined(_KERNEL)
+
+/* virtio needs MP membars even on SP kernels */
+#define virtio_membar_producer() __membar()
+#define virtio_membar_consumer() __membar()
+#define virtio_membar_sync() __membar()
+
+/*
+ * Set bits
+ * *p = *p | v
+ */
+static inline void
+atomic_setbits_int(volatile unsigned int *p, unsigned int v)
+{
+ __asm __volatile("amoor.w zero, %1, %0"
+ : "+A" (*p)
+ : "r" (v)
+ : "memory");
+}
+
+static inline void
+atomic_store_64(volatile uint64_t *p, uint64_t v)
+{
+ __asm __volatile("amoor.d zero, %1, %0"
+ : "+A" (*p)
+ : "r" (v)
+ : "memory");
+}
+
+/*
+ * Clear bits
+ * *p = *p & (~v)
+ */
+static inline void
+atomic_clearbits_int(volatile unsigned int *p, unsigned int v)
+{
+ __asm __volatile("amoand.w zero, %1, %0"
+ : "+A" (*p)
+ : "r" (~v)
+ : "memory");
+}
+
+#endif /* defined(_KERNEL) */
+#endif /* _MACHINE_ATOMIC_H_ */
--- /dev/null
+/*-
+ * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BOOTCONFIG_H_
+#define _MACHINE_BOOTCONFIG_H_
+
+struct riscv_bootparams {
+#if 0
+ vaddr_t modulep;
+#endif
+ vaddr_t kern_l1pt; /* L1 page table for the kernel */
+ uint64_t kern_delta; /* PA - VA */
+ vaddr_t kern_stack;
+#if 0
+ void *arg0; // passed to kernel in R0
+ void *arg1; // passed to kernel in R1
+ void *arg2; // passed to kernel in R2
+#endif
+ vaddr_t dtbp_virt; /* Device tree blob virtual addr */
+ vaddr_t dtbp_phys; /* Device tree blob physical addr */
+};
+
+extern char *boot_file;
+
+// XXX ???
+extern paddr_t physmap[];
+extern u_int physmap_idx;
+
+// XXX ???
+vaddr_t fake_preload_metadata(struct riscv_bootparams *rbp);
+void initriscv(struct riscv_bootparams *);
+
+#endif /* _MACHINE_BOOTCONFIG_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Opsycon AB Sweden. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+#ifdef __STDC__
+#define CAT(a,b) a##b
+#define CAT3(a,b,c) a##b##c
+#else
+#define CAT(a,b) a/**/b
+#define CAT3(a,b,c) a/**/b/**/c
+#endif
+
+/*
+ * Bus access types.
+ */
+struct bus_space;
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+typedef u_long bus_space_handle_t;
+typedef struct bus_space *bus_space_tag_t;
+typedef struct bus_space bus_space_t;
+
+struct bus_space {
+ bus_addr_t bus_base;
+ void *bus_private;
+ u_int8_t (*_space_read_1)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t);
+ void (*_space_write_1)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ u_int16_t (*_space_read_2)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t);
+ void (*_space_write_2)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ u_int32_t (*_space_read_4)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t);
+ void (*_space_write_4)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ u_int64_t (*_space_read_8)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t);
+ void (*_space_write_8)(bus_space_tag_t , bus_space_handle_t,
+ bus_size_t, u_int64_t);
+ void (*_space_read_raw_2)(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, u_int8_t *, bus_size_t);
+ void (*_space_write_raw_2)(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, const u_int8_t *, bus_size_t);
+ void (*_space_read_raw_4)(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, u_int8_t *, bus_size_t);
+ void (*_space_write_raw_4)(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, const u_int8_t *, bus_size_t);
+ void (*_space_read_raw_8)(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, u_int8_t *, bus_size_t);
+ void (*_space_write_raw_8)(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, const u_int8_t *, bus_size_t);
+ int (*_space_map)(bus_space_tag_t , bus_addr_t,
+ bus_size_t, int, bus_space_handle_t *);
+ void (*_space_unmap)(bus_space_tag_t, bus_space_handle_t,
+ bus_size_t);
+ int (*_space_subregion)(bus_space_tag_t, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *);
+ void * (*_space_vaddr)(bus_space_tag_t, bus_space_handle_t);
+ paddr_t (*_space_mmap)(bus_space_tag_t, bus_addr_t, off_t,
+ int, int);
+};
+
+#define bus_space_read_1(t, h, o) (*(t)->_space_read_1)((t), (h), (o))
+#define bus_space_read_2(t, h, o) (*(t)->_space_read_2)((t), (h), (o))
+#define bus_space_read_4(t, h, o) (*(t)->_space_read_4)((t), (h), (o))
+#define bus_space_read_8(t, h, o) (*(t)->_space_read_8)((t), (h), (o))
+
+#define bus_space_write_1(t, h, o, v) (*(t)->_space_write_1)((t), (h), (o), (v))
+#define bus_space_write_2(t, h, o, v) (*(t)->_space_write_2)((t), (h), (o), (v))
+#define bus_space_write_4(t, h, o, v) (*(t)->_space_write_4)((t), (h), (o), (v))
+#define bus_space_write_8(t, h, o, v) (*(t)->_space_write_8)((t), (h), (o), (v))
+
+#define bus_space_read_raw_2(t, h, o) \
+ (*(t)->_space_read_2)((t), (h), (o))
+#define bus_space_read_raw_4(t, h, o) \
+ (*(t)->_space_read_4)((t), (h), (o))
+#define bus_space_read_raw_8(t, h, o) \
+ (*(t)->_space_read_8)((t), (h), (o))
+
+#define bus_space_write_raw_2(t, h, o, v) \
+ (*(t)->_space_write_2)((t), (h), (o), (v))
+#define bus_space_write_raw_4(t, h, o, v) \
+ (*(t)->_space_write_4)((t), (h), (o), (v))
+#define bus_space_write_raw_8(t, h, o, v) \
+ (*(t)->_space_write_8)((t), (h), (o), (v))
+
+#define bus_space_read_raw_multi_2(t, h, a, b, l) \
+ (*(t)->_space_read_raw_2)((t), (h), (a), (b), (l))
+#define bus_space_read_raw_multi_4(t, h, a, b, l) \
+ (*(t)->_space_read_raw_4)((t), (h), (a), (b), (l))
+#define bus_space_read_raw_multi_8(t, h, a, b, l) \
+ (*(t)->_space_read_raw_8)((t), (h), (a), (b), (l))
+
+#define bus_space_write_raw_multi_2(t, h, a, b, l) \
+ (*(t)->_space_write_raw_2)((t), (h), (a), (b), (l))
+#define bus_space_write_raw_multi_4(t, h, a, b, l) \
+ (*(t)->_space_write_raw_4)((t), (h), (a), (b), (l))
+#define bus_space_write_raw_multi_8(t, h, a, b, l) \
+ (*(t)->_space_write_raw_8)((t), (h), (a), (b), (l))
+
+#define bus_space_map(t, o, s, c, p) (*(t)->_space_map)((t), (o), (s), (c), (p))
+#define bus_space_unmap(t, h, s) (*(t)->_space_unmap)((t), (h), (s))
+#define bus_space_subregion(t, h, o, s, p) \
+ (*(t)->_space_subregion)((t), (h), (o), (s), (p))
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_KSEG0 0x02
+#define BUS_SPACE_MAP_LINEAR 0x04
+#define BUS_SPACE_MAP_PREFETCHABLE 0x08
+
+#define bus_space_vaddr(t, h) (*(t)->_space_vaddr)((t), (h))
+#define bus_space_mmap(t, a, o, p, f) \
+ (*(t)->_space_mmap)((t), (a), (o), (p), (f))
+
+/*----------------------------------------------------------------------------*/
+#define bus_space_read_multi(n,m) \
+static __inline void \
+CAT(bus_space_read_multi_,n)(bus_space_tag_t bst, bus_space_handle_t bsh, \
+ bus_size_t o, CAT3(u_int,m,_t) *x, size_t cnt) \
+{ \
+ while (cnt--) \
+ *x++ = CAT(bus_space_read_,n)(bst, bsh, o); \
+}
+
+bus_space_read_multi(1,8)
+bus_space_read_multi(2,16)
+bus_space_read_multi(4,32)
+bus_space_read_multi(8,64)
+
+/*----------------------------------------------------------------------------*/
+#define bus_space_read_region(n,m) \
+static __inline void \
+CAT(bus_space_read_region_,n)(bus_space_tag_t bst, bus_space_handle_t bsh, \
+ bus_addr_t ba, CAT3(u_int,m,_t) *x, size_t cnt) \
+{ \
+ while (cnt--) \
+ *x++ = CAT(bus_space_read_,n)(bst, bsh, ba++); \
+}
+
+bus_space_read_region(1,8)
+bus_space_read_region(2,16)
+bus_space_read_region(4,32)
+bus_space_read_region(8,64)
+
+/*----------------------------------------------------------------------------*/
+#define bus_space_read_raw_region(n,m) \
+static __inline void \
+CAT(bus_space_read_raw_region_,n)(bus_space_tag_t bst, \
+ bus_space_handle_t bsh, \
+ bus_addr_t ba, u_int8_t *x, size_t cnt) \
+{ \
+ cnt >>= ((n) >> 1); \
+ while (cnt--) { \
+ CAT(bus_space_read_raw_multi_,n)(bst, bsh, ba, x, (n)); \
+ ba += (n); \
+ x += (n); \
+ } \
+}
+
+bus_space_read_raw_region(2,16)
+bus_space_read_raw_region(4,32)
+bus_space_read_raw_region(8,64)
+
+/*----------------------------------------------------------------------------*/
+#define bus_space_write_multi(n,m) \
+static __inline void \
+CAT(bus_space_write_multi_,n)(bus_space_tag_t bst, bus_space_handle_t bsh, \
+ bus_size_t o, const CAT3(u_int,m,_t) *x, size_t cnt) \
+{ \
+ while (cnt--) { \
+ CAT(bus_space_write_,n)(bst, bsh, o, *x++); \
+ } \
+}
+
+bus_space_write_multi(1,8)
+bus_space_write_multi(2,16)
+bus_space_write_multi(4,32)
+bus_space_write_multi(8,64)
+
+/*----------------------------------------------------------------------------*/
+#define bus_space_write_region(n,m) \
+static __inline void \
+CAT(bus_space_write_region_,n)(bus_space_tag_t bst, bus_space_handle_t bsh, \
+ bus_addr_t ba, const CAT3(u_int,m,_t) *x, size_t cnt) \
+{ \
+ while (cnt--) { \
+ CAT(bus_space_write_,n)(bst, bsh, ba, *x++); \
+ ba += sizeof(x); \
+ } \
+}
+
+bus_space_write_region(1,8)
+bus_space_write_region(2,16)
+bus_space_write_region(4,32)
+bus_space_write_region(8,64)
+
+/*----------------------------------------------------------------------------*/
+#define bus_space_write_raw_region(n,m) \
+static __inline void \
+CAT(bus_space_write_raw_region_,n)(bus_space_tag_t bst, \
+ bus_space_handle_t bsh, \
+ bus_addr_t ba, const u_int8_t *x, size_t cnt) \
+{ \
+ cnt >>= ((n) >> 1); \
+ while (cnt--) { \
+ CAT(bus_space_write_raw_multi_,n)(bst, bsh, ba, x, (n)); \
+ ba += (n); \
+ x += (n); \
+ } \
+}
+
+bus_space_write_raw_region(2,16)
+bus_space_write_raw_region(4,32)
+bus_space_write_raw_region(8,64)
+
+/*----------------------------------------------------------------------------*/
+#define bus_space_set_region(n,m) \
+static __inline void \
+CAT(bus_space_set_region_,n)(bus_space_tag_t bst, bus_space_handle_t bsh, \
+ bus_addr_t ba, CAT3(u_int,m,_t) x, size_t cnt) \
+{ \
+ while (cnt--) { \
+ CAT(bus_space_write_,n)(bst, bsh, ba, x); \
+ ba += sizeof(x); \
+ } \
+}
+
+bus_space_set_region(1,8)
+bus_space_set_region(2,16)
+bus_space_set_region(4,32)
+bus_space_set_region(8,64)
+
+/*----------------------------------------------------------------------------*/
+static __inline void
+bus_space_copy_1(void *v, bus_space_handle_t h1, bus_size_t o1,
+ bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
+{
+ char *s = (char *)(h1 + o1);
+ char *d = (char *)(h2 + o2);
+
+ while (c--)
+ *d++ = *s++;
+}
+
+
+static __inline void
+bus_space_copy_2(void *v, bus_space_handle_t h1, bus_size_t o1,
+ bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
+{
+ short *s = (short *)(h1 + o1);
+ short *d = (short *)(h2 + o2);
+
+ while (c--)
+ *d++ = *s++;
+}
+
+static __inline void
+bus_space_copy_4(void *v, bus_space_handle_t h1, bus_size_t o1,
+ bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
+{
+ int *s = (int *)(h1 + o1);
+ int *d = (int *)(h2 + o2);
+
+ while (c--)
+ *d++ = *s++;
+}
+
+static __inline void
+bus_space_copy_8(void *v, bus_space_handle_t h1, bus_size_t o1,
+ bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
+{
+ int64_t *s = (int64_t *)(h1 + o1);
+ int64_t *d = (int64_t *)(h2 + o2);
+
+ while (c--)
+ *d++ = *s++;
+}
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Bus read/write barrier methods.
+ *
+ * void bus_space_barrier(bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset,
+ * bus_size_t len, int flags);
+ *
+ */
+static inline void
+bus_space_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
+ bus_size_t length, int flags)
+{
+ __asm__ volatile ("sfence.vma" ::: "memory");//XXX What?? CMPE
+}
+#define BUS_SPACE_BARRIER_READ 0x01 /* force read barrier */
+#define BUS_SPACE_BARRIER_WRITE 0x02 /* force write barrier */
+
+#define BUS_DMA_WAITOK 0x0000
+#define BUS_DMA_NOWAIT 0x0001
+#define BUS_DMA_ALLOCNOW 0x0002
+#define BUS_DMA_COHERENT 0x0008
+#define BUS_DMA_BUS1 0x0010 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x0020
+#define BUS_DMA_BUS3 0x0040
+#define BUS_DMA_BUS4 0x0080
+#define BUS_DMA_READ 0x0100 /* mapping is device -> memory only */
+#define BUS_DMA_WRITE 0x0200 /* mapping is memory -> device only */
+#define BUS_DMA_STREAMING 0x0400 /* hint: sequential, unidirectional */
+#define BUS_DMA_ZERO 0x0800 /* zero memory in dmamem_alloc */
+#define BUS_DMA_NOCACHE 0x1000
+#define BUS_DMA_64BIT 0x2000 /* device handles 64bit dva */
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct proc;
+struct uio;
+
+#define BUS_DMASYNC_POSTREAD 0x0001
+#define BUS_DMASYNC_POSTWRITE 0x0002
+#define BUS_DMASYNC_PREREAD 0x0004
+#define BUS_DMASYNC_PREWRITE 0x0008
+
+typedef struct machine_bus_dma_tag *bus_dma_tag_t;
+typedef struct machine_bus_dmamap *bus_dmamap_t;
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+struct machine_bus_dma_segment {
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+
+ paddr_t _ds_paddr; /* CPU address */
+ vaddr_t _ds_vaddr; /* CPU address */
+};
+typedef struct machine_bus_dma_segment bus_dma_segment_t;
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the implementation of
+ * DMA for a given bus.
+ */
+
+struct machine_bus_dma_tag {
+ void *_cookie; /* cookie used in the guts */
+ int _flags; /* misc. flags */
+
+ /*
+ * DMA mapping methods.
+ */
+ int (*_dmamap_create)(bus_dma_tag_t , bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+ void (*_dmamap_destroy)(bus_dma_tag_t , bus_dmamap_t);
+ int (*_dmamap_load)(bus_dma_tag_t , bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+ int (*_dmamap_load_mbuf)(bus_dma_tag_t , bus_dmamap_t,
+ struct mbuf *, int);
+ int (*_dmamap_load_uio)(bus_dma_tag_t , bus_dmamap_t,
+ struct uio *, int);
+ int (*_dmamap_load_raw)(bus_dma_tag_t , bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+ int (*_dmamap_load_buffer)(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int, paddr_t *, int *, int);
+ void (*_dmamap_unload)(bus_dma_tag_t , bus_dmamap_t);
+ void (*_dmamap_sync)(bus_dma_tag_t , bus_dmamap_t,
+ bus_addr_t, bus_size_t, int);
+
+ /*
+ * DMA memory utility functions.
+ */
+ int (*_dmamem_alloc)(bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int);
+ void (*_dmamem_free)(bus_dma_tag_t, bus_dma_segment_t *, int);
+ int (*_dmamem_map)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, caddr_t *, int);
+ void (*_dmamem_unmap)(bus_dma_tag_t, caddr_t, size_t);
+ paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, off_t, int, int);
+
+ /*
+ * internal memory address translation information.
+ */
+ bus_addr_t _dma_mask;
+};
+
+#define bus_dmamap_create(t, s, n, m, b, f, p) \
+ (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p))
+#define bus_dmamap_destroy(t, p) \
+ (*(t)->_dmamap_destroy)((t), (p))
+#define bus_dmamap_load(t, m, b, s, p, f) \
+ (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f))
+#define bus_dmamap_load_mbuf(t, m, b, f) \
+ (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f))
+#define bus_dmamap_load_uio(t, m, u, f) \
+ (*(t)->_dmamap_load_uio)((t), (m), (u), (f))
+#define bus_dmamap_load_raw(t, m, sg, n, s, f) \
+ (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f))
+#define bus_dmamap_unload(t, p) \
+ (*(t)->_dmamap_unload)((t), (p))
+#define bus_dmamap_sync(t, p, a, l, o) \
+ (void)((t)->_dmamap_sync ? \
+ (*(t)->_dmamap_sync)((t), (p), (a), (l), (o)) : (void)0)
+
+#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \
+ (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f))
+#define bus_dmamem_free(t, sg, n) \
+ (*(t)->_dmamem_free)((t), (sg), (n))
+#define bus_dmamem_map(t, sg, n, s, k, f) \
+ (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f))
+#define bus_dmamem_unmap(t, k, s) \
+ (*(t)->_dmamem_unmap)((t), (k), (s))
+#define bus_dmamem_mmap(t, sg, n, o, p, f) \
+ (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f))
+
+int _dmamap_create(bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+void _dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
+int _dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+int _dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
+int _dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
+int _dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+int _dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int, paddr_t *, int *, int);
+void _dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
+void _dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ bus_size_t, int);
+
+int _dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int);
+void _dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
+int _dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, caddr_t *, int);
+void _dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
+paddr_t _dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *, int, off_t, int, int);
+int _dmamem_alloc_range(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
+ bus_dma_segment_t *, int, int *, int, paddr_t, paddr_t);
+
+/*
+ * bus_dmamap_t
+ *
+ * Describes a DMA mapping.
+ */
+struct machine_bus_dmamap {
+ /*
+ * PRIVATE MEMBERS: not for use by machine-independent code.
+ */
+ bus_size_t _dm_size; /* largest DMA transfer mappable */
+ int _dm_segcnt; /* number of segs this map can map */
+ bus_size_t _dm_maxsegsz; /* largest possible segment */
+ bus_size_t _dm_boundary; /* don't cross this */
+ int _dm_flags; /* misc. flags */
+
+ void *_dm_cookie; /* cookie for bus-specific functions */
+
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_size_t dm_mapsize; /* size of the mapping */
+ int dm_nsegs; /* # valid segments in mapping */
+ bus_dma_segment_t dm_segs[1]; /* segments; variable length */
+};
+
+int generic_space_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
+ bus_space_handle_t *);
+void generic_space_unmap(bus_space_tag_t, bus_space_handle_t, bus_size_t);
+int generic_space_region(bus_space_tag_t, bus_space_handle_t, bus_size_t,
+ bus_size_t, bus_space_handle_t *);
+void *generic_space_vaddr(bus_space_tag_t, bus_space_handle_t);
+paddr_t generic_space_mmap(bus_space_tag_t, bus_addr_t, off_t, int, int);
+uint8_t generic_space_read_1(bus_space_tag_t, bus_space_handle_t, bus_size_t);
+uint16_t generic_space_read_2(bus_space_tag_t, bus_space_handle_t, bus_size_t);
+uint32_t generic_space_read_4(bus_space_tag_t, bus_space_handle_t, bus_size_t);
+uint64_t generic_space_read_8(bus_space_tag_t, bus_space_handle_t, bus_size_t);
+void generic_space_read_raw_2(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, uint8_t *, bus_size_t);
+void generic_space_write_1(bus_space_tag_t, bus_space_handle_t, bus_size_t,
+ uint8_t);
+void generic_space_write_2(bus_space_tag_t, bus_space_handle_t, bus_size_t,
+ uint16_t);
+void generic_space_write_4(bus_space_tag_t, bus_space_handle_t, bus_size_t,
+ uint32_t);
+void generic_space_write_8(bus_space_tag_t, bus_space_handle_t, bus_size_t,
+ uint64_t);
+void generic_space_write_raw_2(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, const uint8_t *, bus_size_t);
+void generic_space_read_raw_4(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, uint8_t *, bus_size_t);
+void generic_space_write_raw_4(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, const uint8_t *, bus_size_t);
+void generic_space_read_raw_8(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, uint8_t *, bus_size_t);
+void generic_space_write_raw_8(bus_space_tag_t, bus_space_handle_t,
+ bus_addr_t, const uint8_t *, bus_size_t);
+
+#endif /* _MACHINE_BUS_H_ */
--- /dev/null
+/* $OpenBSD: cdefs.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+
+#ifndef _MACHINE_CDEFS_H_
+#define _MACHINE_CDEFS_H_
+
+#define __strong_alias(alias,sym) \
+ __asm__(".global " __STRING(alias) " ; " __STRING(alias) \
+ " = " __STRING(sym))
+#define __weak_alias(alias,sym) \
+ __asm__(".weak " __STRING(alias) " ; " __STRING(alias) \
+ " = " __STRING(sym))
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning." __STRING(sym) \
+ " ; .ascii \"" msg "\" ; .text")
+
+#endif /* !_MACHINE_CDEFS_H_ */
--- /dev/null
+/*
+ * Copyright (c) 1996 Christos Zoulas. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christos Zoulas.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_CONF_H_
+#define _MACHINE_CONF_H_
+
+#include <sys/conf.h>
+
+#define mmread mmrw
+#define mmwrite mmrw
+cdev_decl(mm);
+
+/* open, close, ioctl */
+#define cdev_openprom_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), (dev_type_read((*))) enodev, \
+ (dev_type_write((*))) enodev, dev_init(c,n,ioctl), \
+ (dev_type_stop((*))) nullop, 0, selfalse, \
+ (dev_type_mmap((*))) enodev }
+
+cdev_decl(openprom);
+
+/*
+ * These numbers have to be in sync with bdevsw/cdevsw.
+ */
+
+#define BMAJ_WD 0
+#define BMAJ_SW 1
+#define BMAJ_SD 4
+#define BMAJ_ST 5
+
+#define CMAJ_MM 2
+#define CMAJ_PTS 5
+#define CMAJ_PTC 6
+#define CMAJ_COM 8
+#define CMAJ_WSDISPLAY 12
+#define CMAJ_ST 14
+#define CMAJ_LPT 16
+#define CMAJ_CH 17
+#define CMAJ_UK 20
+#define CMAJ_BPF 23
+#define CMAJ_TUN 40
+#define CMAJ_AUDIO 42
+#define CMAJ_VIDEO 44
+#define CMAJ_BKTR 49
+#define CMAJ_MIDI 52
+#define CMAJ_USB 61
+#define CMAJ_UHID 62
+#define CMAJ_UGEN 63
+#define CMAJ_ULPT 64
+#define CMAJ_UCOM 66
+#define CMAJ_WSKBD 67
+#define CMAJ_WSMOUSE 68
+#ifdef USER_PCICONF
+#define CMAJ_PCI 72
+#endif
+#define CMAJ_RADIO 76
+#define CMAJ_DRM 87
+#define CMAJ_GPIO 88
+#define CMAJ_VSCSI 89
+
+#endif /* _MACHINE_CONF_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2019 Mike Larkin <mlarkin@openbsd.org>
+ * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_
+
+/*
+ * User-visible definitions
+ */
+
+/* CTL_MACHDEP definitions. */
+/* None for now */
+#define CPU_MAXID 0 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+}
+
+#ifdef _KERNEL
+
+/*
+ * Kernel-only definitions
+ */
+#include <machine/intr.h>
+#include <machine/frame.h>
+#include <machine/riscvreg.h>
+
+/* All the CLKF_* macros take a struct clockframe * as an argument. */
+
+#define clockframe trapframe
+/*
+ * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
+ * frame came from USR mode or not.
+ */
+#define CLKF_USERMODE(frame) ((frame->tf_sstatus & SSTATUS_SPP) == 0)
+
+/*
+ * CLKF_INTR: True if we took the interrupt from inside another
+ * interrupt handler.
+ */
+#define CLKF_INTR(frame) (curcpu()->ci_idepth > 1)
+
+/*
+ * CLKF_PC: Extract the program counter from a clockframe
+ */
+#define CLKF_PC(frame) (frame->tf_sepc)
+
+/*
+ * PROC_PC: Find out the program counter for the given process.
+ */
+#define PROC_PC(p) ((p)->p_addr->u_pcb.pcb_tf->tf_sepc)
+#define PROC_STACK(p) ((p)->p_addr->u_pcb.pcb_tf->tf_sp)
+
+#include <sys/device.h>
+#include <sys/sched.h>
+#include <sys/srp.h>
+
+struct cpu_info {
+ struct device *ci_dev; /* Device corresponding to this CPU */
+ struct cpu_info *ci_next;
+ struct schedstate_percpu ci_schedstate; /* scheduler state */
+
+ u_int32_t ci_cpuid;
+#if 0
+ uint64_t ci_mpidr;
+ u_int ci_acpi_proc_id;
+#endif
+ int ci_node;
+ struct cpu_info *ci_self;
+
+ struct proc *ci_curproc;
+ struct pmap *ci_curpm;
+#if 0
+ struct proc *ci_fpuproc;
+#endif
+ u_int32_t ci_randseed;
+
+ struct pcb *ci_curpcb;
+ struct pcb *ci_idle_pcb;
+
+ u_int32_t ci_ctrl; /* The CPU control register */
+
+ uint32_t ci_cpl;
+ uint32_t ci_ipending;
+ uint32_t ci_idepth;
+#ifdef DIAGNOSTIC
+ int ci_mutex_level;
+#endif
+ int ci_want_resched;
+
+ /* currently loaded fpu proc ctx */
+ struct proc *ci_fpuproc;
+
+#if 0
+ void (*ci_flush_bp)(void);
+
+ struct opp_table *ci_opp_table;
+ volatile int ci_opp_idx;
+ volatile int ci_opp_max;
+ uint32_t ci_cpu_supply;
+#endif
+
+#ifdef MULTIPROCESSOR
+ struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM];
+ volatile int ci_flags;
+#if 0
+ uint64_t ci_ttbr1;
+ vaddr_t ci_el1_stkend;
+#endif
+
+ volatile int ci_ddb_paused;
+#define CI_DDB_RUNNING 0
+#define CI_DDB_SHOULDSTOP 1
+#define CI_DDB_STOPPED 2
+#define CI_DDB_ENTERDDB 3
+#define CI_DDB_INDDB 4
+
+#endif
+
+#ifdef GPROF
+ struct gmonparam *ci_gmon;
+#endif
+};
+
+#define CPUF_PRIMARY (1<<0)
+#define CPUF_AP (1<<1)
+#define CPUF_IDENTIFY (1<<2)
+#define CPUF_IDENTIFIED (1<<3)
+#define CPUF_PRESENT (1<<4)
+#define CPUF_GO (1<<5)
+#define CPUF_RUNNING (1<<6)
+
+static inline struct cpu_info *
+curcpu(void)
+{
+ struct cpu_info *__ci = NULL;
+ __asm __volatile("mv %0, tp" : "=&r"(__ci));
+ return (__ci);
+}
+
+extern uint32_t boot_hart; /* The hart we booted on. */
+extern struct cpu_info cpu_info_primary;
+extern struct cpu_info *cpu_info_list;
+
+#ifndef MULTIPROCESSOR
+
+#define cpu_number() 0
+#define CPU_IS_PRIMARY(ci) 1
+#define CPU_INFO_ITERATOR int
+#define CPU_INFO_FOREACH(cii, ci) \
+ for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
+#define CPU_INFO_UNIT(ci) 0
+#define MAXCPUS 1
+#define cpu_unidle(ci)
+
+#else
+
+#define cpu_number() (curcpu()->ci_cpuid)
+#define CPU_IS_PRIMARY(ci) ((ci) == &cpu_info_primary)
+#define CPU_INFO_ITERATOR int
+#define CPU_INFO_FOREACH(cii, ci) for (cii = 0, ci = cpu_info_list; \
+ ci != NULL; ci = ci->ci_next)
+#define CPU_INFO_UNIT(ci) ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
+#define MAXCPUS 32
+
+extern struct cpu_info *cpu_info[MAXCPUS];
+
+void cpu_boot_secondary_processors(void);
+#endif /* !MULTIPROCESSOR */
+
+#define CPU_BUSY_CYCLE() do {} while (0)
+
+#define curpcb curcpu()->ci_curpcb
+
+static inline unsigned int
+cpu_rnd_messybits(void)
+{
+ // Should do bit reversal ^ with csr_read(time);
+ return csr_read(time);
+}
+
+/*
+ * Scheduling glue
+ */
+#define aston(p) ((p)->p_md.md_astpending = 1)
+#define setsoftast() aston(curcpu()->ci_curproc)
+
+/*
+ * Notify the current process (p) that it has a signal pending,
+ * process as soon as possible.
+ */
+
+#ifdef MULTIPROCESSOR
+void cpu_unidle(struct cpu_info *ci);
+#define signotify(p) (aston(p), cpu_unidle((p)->p_cpu))
+void cpu_kick(struct cpu_info *);
+#else
+#define cpu_kick(ci)
+#define cpu_unidle(ci)
+#define signotify(p) setsoftast()
+#endif
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ */
+void need_resched(struct cpu_info *);
+#define clear_resched(ci) ((ci)->ci_want_resched = 0)
+
+/*
+ * Give a profiling tick to the current process when the user profiling
+ * buffer pages are invalid. On the i386, request an ast to send us
+ * through trap(), marking the proc as needing a profiling tick.
+ */
+#define need_proftick(p) aston(p)
+
+// asm code to start new kernel contexts.
+void proc_trampoline(void);
+void child_trampoline(void);
+
+/*
+ * Random cruft
+ */
+void dumpconf(void);
+
+/* cpuswitch.S */
+struct pcb;
+void savectx (struct pcb *pcb);
+
+static inline void
+intr_enable(void)
+{
+ __asm __volatile("csrsi sstatus, %0" :: "i" (SSTATUS_SIE));
+}
+
+static inline u_long
+intr_disable(void)
+{
+ uint64_t ret;
+
+ __asm __volatile(
+ "csrrci %0, sstatus, %1"
+ : "=&r" (ret) : "i" (SSTATUS_SIE)
+ );
+
+ return (ret & (SSTATUS_SIE));
+}
+
+static inline void
+intr_restore(u_long s)
+{
+ __asm __volatile("csrs sstatus, %0" :: "r" (s));
+}
+
+void delay (unsigned);
+#define DELAY(x) delay(x)
+
+int fpu_valid_opcode(uint32_t);
+void fpu_save(struct proc *, struct trapframe *);
+void fpu_load(struct proc *);
+void fpu_discard(struct proc *p);
+
+#endif /* _KERNEL */
+
+#ifdef MULTIPROCESSOR
+#include <sys/mplock.h>
+#endif /* MULTIPROCESSOR */
+
+#endif /* !_MACHINE_CPU_H_ */
--- /dev/null
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: head/sys/cpu/include/cpufunc.h 299683 2016-05-13 16:03:50Z andrew $
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_
+
+static __inline void
+breakpoint(void)
+{
+ __asm("ebreak");
+}
+
+#ifdef _KERNEL
+
+#include <machine/riscvreg.h>
+
+#define rdcycle() csr_read(cycle)
+#define rdtime() csr_read(time)
+#define rdinstret() csr_read(instret)
+#define rdhpmcounter(n) csr_read(hpmcounter##n)
+
+static __inline void
+fence_i(void)
+{
+ __asm __volatile("fence.i" ::: "memory");
+}
+
+static __inline void
+sfence_vma(void)
+{
+ __asm __volatile("sfence.vma" ::: "memory");
+}
+
+static __inline void
+sfence_vma_page(uintptr_t addr)
+{
+ __asm __volatile("sfence.vma %0"
+ :
+ : "r" (addr)
+ : "memory");
+}
+
+// XXX ASIDs in riscv64 are only 16 bits.
+static __inline void
+sfence_vma_asid(uint64_t asid)
+{
+ __asm __volatile("sfence.vma x0, %0"
+ :
+ : "r" (asid)
+ : "memory");
+}
+
+static __inline void
+sfence_vma_page_asid(uintptr_t addr, uint64_t asid)
+{
+ __asm __volatile("sfence.vma %0, %1"
+ :
+ : "r" (addr), "r" (asid)
+ : "memory");
+}
+
+extern int64_t dcache_line_size;
+extern int64_t icache_line_size;
+
+#define cpu_dcache_wbinv_range(a, s)
+#define cpu_dcache_inv_range(a, s)
+#define cpu_dcache_wb_range(a, s)
+
+#define cpu_idcache_wbinv_range(a, s)
+#define cpu_icache_sync_range(a, s)
+#define cpu_icache_sync_range_checked(a, s)
+
+static __inline void
+load_satp(uint64_t val)
+{
+ __asm __volatile("csrw satp, %0" :: "r"(val));
+}
+
+#define cpufunc_nullop() riscv_nullop()
+
+void riscv_nullop(void);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_CPUFUNC_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2015-2016 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DB_MACHDEP_H_
+#define _MACHINE_DB_MACHDEP_H_
+
+#include <sys/param.h>
+#include <uvm/uvm_extern.h>
+#include <machine/riscvreg.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+
+#define T_BREAKPOINT (EXCP_BREAKPOINT)
+#define T_WATCHPOINT (0)
+
+typedef vaddr_t db_addr_t;
+typedef long db_expr_t;
+
+typedef trapframe_t db_regs_t;
+
+extern db_regs_t ddb_regs;
+#define DDB_REGS (&ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_ra)
+#define SET_PC_REGS(regs, value) (regs)->tf_ra = (register_t)(value)
+
+#define BKPT_INST (KERNEL_BREAKPOINT)
+#define BKPT_SIZE (INSN_SIZE)
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BREAKPOINT)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT)
+
+#define inst_trap_return(ins) (ins == 0x10000073) /* eret */
+#define inst_return(ins) (ins == 0x00008067) /* ret */
+#define inst_call(ins) (((ins) & 0x7f) == 0x6f || \
+ ((ins) & 0x7f) == 0x67) /* jal, jalr */
+#define inst_branch(ins) (((ins) & 0x7f) == 0x63) /* branch */
+
+#define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + INSN_SIZE))
+
+#define DB_MACHINE_COMMANDS
+
+#define SOFTWARE_SSTEP
+
+int db_trapper(vaddr_t, u_int, trapframe_t *, int);
+void db_machine_init (void);
+db_addr_t db_branch_taken(u_int inst, db_addr_t pc, db_regs_t *regs);
+
+#define branch_taken(ins, pc, fun, regs) \
+ db_branch_taken((ins), (pc), (regs))
+
+/* For ddb_state */
+#define DDB_STATE_NOT_RUNNING 0
+#define DDB_STATE_RUNNING 1
+#define DDB_STATE_EXITING 2
+
+#endif /* !_MACHINE_DB_MACHDEP_H_ */
--- /dev/null
+/* $OpenBSD: disklabel.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/*
+ * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_DISKLABEL_H_
+#define _MACHINE_DISKLABEL_H_
+
+#define LABELSECTOR 1 /* sector containing label */
+#define LABELOFFSET 0 /* offset of label in sector */
+#define MAXPARTITIONS 16 /* number of partitions */
+
+#endif /* _MACHINE_DISKLABEL_H_ */
--- /dev/null
+/*-
+ * Copyright (c) 1996-1997 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ELF_H_
+#define _MACHINE_ELF_H_
+
+/*
+ * ELF definitions for the RISC-V architecture.
+ */
+#if 0 //freebsd specific
+#include <sys/elf32.h> /* Definitions common to all 32 bit architectures. */
+#include <sys/elf64.h> /* Definitions common to all 64 bit architectures. */
+
+#define __ELF_WORD_SIZE 64 /* Used by <sys/elf_generic.h> */
+#include <sys/elf_generic.h>
+
+#define ELF_ARCH EM_RISCV
+
+#define ELF_MACHINE_OK(x) ((x) == (ELF_ARCH))
+
+/* Define "machine" characteristics */
+#define ELF_TARG_CLASS ELFCLASS64
+#define ELF_TARG_DATA ELFDATA2LSB
+#define ELF_TARG_MACH EM_RISCV
+#define ELF_TARG_VER 1
+
+/* TODO: set correct value */
+#define ET_DYN_LOAD_ADDR 0x100000
+#endif
+
+/* Flags passed in AT_HWCAP */
+#define HWCAP_ISA_BIT(c) (1 << ((c) - 'A'))
+#define HWCAP_ISA_I HWCAP_ISA_BIT('I')
+#define HWCAP_ISA_M HWCAP_ISA_BIT('M')
+#define HWCAP_ISA_A HWCAP_ISA_BIT('A')
+#define HWCAP_ISA_F HWCAP_ISA_BIT('F')
+#define HWCAP_ISA_D HWCAP_ISA_BIT('D')
+#define HWCAP_ISA_C HWCAP_ISA_BIT('C')
+#define HWCAP_ISA_G \
+ (HWCAP_ISA_I | HWCAP_ISA_M | HWCAP_ISA_A | HWCAP_ISA_F | HWCAP_ISA_D)
+
+#endif /* !_MACHINE_ELF_H_ */
--- /dev/null
+/* $OpenBSD: endian.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+
+/*
+ * Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_ENDIAN_H_
+#define _MACHINE_ENDIAN_H_
+
+#ifndef __FROM_SYS__ENDIAN
+#include <sys/_types.h>
+#endif
+
+static __inline __uint16_t
+__swap16md(__uint16_t _x)
+{
+ __uint32_t ret;
+ ret = ((_x >> 8) | ((_x << 8) & 0xff00));
+
+ return ((__uint16_t)ret);
+}
+
+static __inline __uint32_t
+__swap32md(__uint32_t _x)
+{
+ return ((_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
+ ((_x << 24) & 0xff000000));
+}
+
+static __inline __uint64_t
+__swap64md(__uint64_t _x)
+{
+ __uint64_t ret;
+
+ ret = (_x >> 56);
+ ret |= ((_x >> 40) & 0xff00);
+ ret |= ((_x >> 24) & 0xff0000);
+ ret |= ((_x >> 8) & 0xff000000);
+ ret |= ((_x << 8) & ((__uint64_t)0xff << 32));
+ ret |= ((_x << 24) & ((__uint64_t)0xff << 40));
+ ret |= ((_x << 40) & ((__uint64_t)0xff << 48));
+ ret |= (_x << 56);
+
+ return (ret);
+}
+
+/* Tell sys/endian.h we have MD variants of the swap macros. */
+#define __HAVE_MD_SWAP
+
+
+#define _BYTE_ORDER _LITTLE_ENDIAN
+#define __STRICT_ALIGNMENT
+
+#ifndef __FROM_SYS__ENDIAN
+#include <sys/endian.h>
+#endif
+#endif /* _MACHINE_ENDIAN_H_ */
--- /dev/null
+/* $OpenBSD: exec.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/*
+ * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_EXEC_H_
+#define _MACHINE_EXEC_H_
+
+#define __LDPGSZ 4096
+
+#define ARCH_ELFSIZE 64
+
+#define ELF_TARG_CLASS ELFCLASS64
+#define ELF_TARG_DATA ELFDATA2LSB
+#define ELF_TARG_MACH EM_RISCV
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2016 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RISCV_FDT_H__
+#define __RISCV_FDT_H__
+
+#define _RISCV64_BUS_DMA_PRIVATE
+#include <machine/bus.h>
+
+struct fdt_attach_args {
+ const char *fa_name;
+ int fa_node;
+ bus_space_tag_t fa_iot;
+ bus_dma_tag_t fa_dmat;
+ struct fdt_reg *fa_reg;
+ int fa_nreg;
+ uint32_t *fa_intr;
+ int fa_nintr;
+ int fa_acells;
+ int fa_scells;
+};
+
+extern int stdout_node;
+extern int stdout_speed;
+extern bus_space_tag_t fdt_cons_bs_tag;
+
+void *fdt_find_cons(const char *);
+
+#define fdt_intr_enable riscv_intr_enable
+#define fdt_intr_establish riscv_intr_establish_fdt
+#define fdt_intr_establish_idx riscv_intr_establish_fdt_idx
+#define fdt_intr_establish_imap riscv_intr_establish_fdt_imap
+#define fdt_intr_establish_msi riscv_intr_establish_fdt_msi
+#define fdt_intr_disable riscv_intr_disable
+#define fdt_intr_disestablish riscv_intr_disestablish_fdt
+#define fdt_intr_get_parent riscv_intr_get_parent
+#define fdt_intr_parent_establish riscv_intr_parent_establish_fdt
+#define fdt_intr_parent_disestablish riscv_intr_parent_disestablish_fdt
+#define fdt_intr_register riscv_intr_register_fdt
+
+#endif /* __RISCV_FDT_H__ */
--- /dev/null
+/* $OpenBSD: fenv.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+
+/*
+ * Copyright (c) 2011 Martynas Venckus <martynas@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_FENV_H_
+#define _MACHINE_FENV_H_
+
+/*
+ * Each symbol representing a floating point exception expands to an integer
+ * constant expression with values, such that bitwise-inclusive ORs of _all
+ * combinations_ of the constants result in distinct values.
+ *
+ * We use such values that allow direct bitwise operations on FPU registers.
+ */
+#define FE_INVALID 0x10
+#define FE_DIVBYZERO 0x08
+#define FE_OVERFLOW 0x04
+#define FE_UNDERFLOW 0x02
+#define FE_INEXACT 0x01
+
+/*
+ * The following symbol is simply the bitwise-inclusive OR of all floating-point
+ * exception constants defined above.
+ */
+#define FE_ALL_EXCEPT (FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW | \
+ FE_UNDERFLOW | FE_INEXACT )
+
+/*
+ * Each symbol representing the rounding direction, expands to an integer
+ * constant expression whose value is distinct non-negative value.
+ *
+ * We use such values that allow direct bitwise operations on FPU registers.
+ */
+#define FE_TONEAREST 0x0
+#define FE_UPWARD 0x1
+#define FE_DOWNWARD 0x2
+#define FE_TOWARDZERO 0x3
+#define FE_TONEAREST_MAX 0x4
+
+/*
+ * The following symbol is simply the bitwise-inclusive OR of all floating-point
+ * rounding direction constants defined above.
+ */
+#define _ROUND_MASK (FE_TONEAREST | FE_UPWARD | FE_DOWNWARD | \
+ FE_TOWARDZERO)
+#define _ROUND_SHIFT 0
+
+/*
+ * fenv_t represents the entire floating-point environment.
+ */
+typedef unsigned long long fenv_t;
+
+/*
+ * The following constant represents the default floating-point environment
+ * (that is, the one installed at program startup) and has type pointer to
+ * const-qualified fenv_t.
+ *
+ * It can be used as an argument to the functions within the <fenv.h> header
+ * that manage the floating-point environment, namely fesetenv() and
+ * feupdateenv().
+ */
+__BEGIN_DECLS
+extern fenv_t __fe_dfl_env;
+__END_DECLS
+#define FE_DFL_ENV ((const fenv_t *)&__fe_dfl_env)
+
+/*
+ * fexcept_t represents the floating-point status flags collectively, including
+ * any status the implementation associates with the flags.
+ *
+ * A floating-point status flag is a system variable whose value is set (but
+ * never cleared) when a floating-point exception is raised, which occurs as a
+ * side effect of exceptional floating-point arithmetic to provide auxiliary
+ * information.
+ *
+ * A floating-point control mode is a system variable whose value may be set by
+ * the user to affect the subsequent behavior of floating-point arithmetic.
+ */
+typedef unsigned long long fexcept_t;
+
+#endif /* !_MACHINE_FENV_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_FRAME_H_
+#define _MACHINE_FRAME_H_
+
+#ifndef _LOCORE
+
+#include <sys/signal.h>
+
+/*
+ * Exception/Trap Stack Frame
+ */
+#define clockframe trapframe
+typedef struct trapframe {
+ /* Standard Registers */
+ register_t tf_ra;
+ register_t tf_sp;
+ register_t tf_gp;
+ register_t tf_tp;
+ register_t tf_t[7];
+ register_t tf_s[12];
+ register_t tf_a[8];
+ /* Supervisor Trap CSRs */
+ register_t tf_sepc;
+ register_t tf_sstatus;
+ register_t tf_stval;
+ register_t tf_scause;
+} trapframe_t;
+
+/*
+ * pushed on stack for signal delivery
+ */
+struct sigframe {
+ int sf_signum;
+ struct sigcontext sf_sc;
+ siginfo_t sf_si;
+};
+
+/*
+ * System stack frames.
+ */
+
+/*
+ * Stack frame inside cpu_switch()
+ */
+struct switchframe {
+ register_t sf_s[12];
+ register_t sf_ra;
+};
+
+struct callframe {
+ struct callframe *f_frame;
+ register_t f_ra;
+};
+
+#endif /* !_LOCORE */
+
+#endif /* !_MACHINE_FRAME_H_ */
--- /dev/null
+/* $OpenBSD: ieee.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/* $NetBSD: ieee.h,v 1.1 1996/09/30 16:34:25 ws Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ieee.h 8.1 (Berkeley) 6/11/93
+ */
+
+/*
+ * ieee.h defines the machine-dependent layout of the machine's IEEE
+ * floating point. It does *not* define (yet?) any of the rounding
+ * mode bits, exceptions, and so forth.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ * k k+1
+ * Note that 1.0 x 2 == 0.1 x 2 and that denorms are represented
+ *
+ * (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2 . This means that
+ *
+ * -126
+ * the number 0.10000 x 2 , for instance, is the same as the normalized
+ *
+ * -127 -128
+ * float 1.0 x 2 . Thus, to represent 2 , we need one leading zero
+ *
+ * -129
+ * in the fraction; to represent 2 , we need two, and so on. This
+ *
+ * (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ * -126 -149
+ * instance, we get .00000000000000000000001 x 2 , or 1.0 x 2 , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+#define SNG_EXPBITS 8
+#define SNG_FRACBITS 23
+
+#define DBL_EXPBITS 11
+#define DBL_FRACHBITS 20
+#define DBL_FRACLBITS 32
+#define DBL_FRACBITS 52
+
+#define EXT_EXPBITS 15
+#define EXT_FRACHBITS 16
+#define EXT_FRACHMBITS 32
+#define EXT_FRACLMBITS 32
+#define EXT_FRACLBITS 32
+#define EXT_FRACBITS 112
+
+#define EXT_IMPLICIT_NBIT
+
+#define EXT_TO_ARRAY32(p, a) do { \
+ (a)[0] = (uint32_t)(p)->ext_fracl; \
+ (a)[1] = (uint32_t)(p)->ext_fraclm; \
+ (a)[2] = (uint32_t)(p)->ext_frachm; \
+ (a)[3] = (uint32_t)(p)->ext_frach; \
+} while(0)
+
+struct ieee_single {
+ u_int sng_frac:23;
+ u_int sng_exp:8;
+ u_int sng_sign:1;
+};
+
+struct ieee_double {
+ u_int dbl_fracl;
+ u_int dbl_frach:20;
+ u_int dbl_exp:11;
+ u_int dbl_sign:1;
+};
+
+struct ieee_ext {
+ u_int ext_fracl;
+ u_int ext_fraclm;
+ u_int ext_frachm;
+ u_int ext_frach:16;
+ u_int ext_exp:15;
+ u_int ext_sign:1;
+};
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'. Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define SNG_EXP_INFNAN 255
+#define DBL_EXP_INFNAN 2047
+#define EXT_EXP_INFNAN 32767
+
+#if 0
+#define SNG_QUIETNAN (1 << 22)
+#define DBL_QUIETNAN (1 << 19)
+#define EXT_QUIETNAN (1 << 15)
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define SNG_EXP_BIAS 127
+#define DBL_EXP_BIAS 1023
+#define EXT_EXP_BIAS 16383
--- /dev/null
+/* $OpenBSD: ieeefp.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/* $NetBSD: ieeefp.h,v 1.1 2001/01/10 19:02:06 bjh21 Exp $ */
+
+/*
+ * Based on ieeefp.h written by J.T. Conklin, Apr 28, 1995
+ * Public domain.
+ */
+
+#ifndef _MACHINE_IEEEFP_H_
+#define _MACHINE_IEEEFP_H_
+
+/* FP exception codes */
+
+#define FP_EXCEPT_INV 0
+#define FP_EXCEPT_DZ 1
+#define FP_EXCEPT_OFL 2
+#define FP_EXCEPT_UFL 3
+#define FP_EXCEPT_IMP 4
+
+/* Exception type (used by fpsetmask() et al.) */
+
+typedef int fp_except;
+
+/* Bit defines for fp_except */
+
+#define FP_X_INV (1 << FP_EXCEPT_INV) /* invalid operation exception */
+#define FP_X_DZ (1 << FP_EXCEPT_DZ) /* divide-by-zero exception */
+#define FP_X_OFL (1 << FP_EXCEPT_OFL) /* overflow exception */
+#define FP_X_UFL (1 << FP_EXCEPT_UFL) /* underflow exception */
+#define FP_X_IMP (1 << FP_EXCEPT_IMP) /* imprecise (loss of precision; "inexact") */
+#define FP_X_MASK 0x1f
+
+/* Rounding modes */
+
+typedef enum {
+ FP_RN=0, /* round to nearest representable number */
+ FP_RP=1, /* round toward positive infinity */
+ FP_RM=2, /* round toward negative infinity */
+ FP_RZ=3 /* round to zero (truncate) */
+} fp_rnd;
+
+#endif /* _MACHINE_IEEEFP_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define _MACHINE_INTR_H_
+
+/*
+ * The interrupt level ipl is a logical level; per-platform interrupt
+ * code will turn it into the appropriate hardware interrupt masks
+ * values.
+ *
+ * Interrupt sources on the CPU are kept enabled regardless of the
+ * current ipl value; individual hardware sources interrupting while
+ * logically masked are masked on the fly, remembered as pending, and
+ * unmasked at the first splx() opportunity.
+ */
+#ifdef _KERNEL
+
+/* Interrupt priority `levels'; not mutually exclusive. */
+#define IPL_NONE 0 /* nothing */
+#define IPL_SOFT 1 /* soft interrupts */
+#define IPL_SOFTCLOCK 2 /* soft clock interrupts */
+#define IPL_SOFTNET 3 /* soft network interrupts */
+#define IPL_SOFTTTY 4 /* soft terminal interrupts */
+#define IPL_BIO 5 /* block I/O */
+#define IPL_NET 6 /* network */
+#define IPL_TTY 7 /* terminal */
+#define IPL_VM 8 /* memory allocation */
+#define IPL_AUDIO 9 /* audio */
+#define IPL_CLOCK 10 /* clock */
+#define IPL_SCHED IPL_CLOCK
+#define IPL_STATCLOCK IPL_CLOCK
+#define IPL_HIGH 11 /* everything */
+#define IPL_IPI 12 /* interprocessor interrupt */
+#define NIPL 13 /* number of levels */
+
+#define IPL_MPFLOOR IPL_TTY
+/* Interrupt priority 'flags'. */
+#define IPL_IRQMASK 0xf /* priority only */
+#define IPL_FLAGMASK 0xf00 /* flags only*/
+#define IPL_MPSAFE 0x100 /* 'mpsafe' interrupt, no kernel lock */
+
+/* Interrupt sharing types. */
+#define IST_NONE 0 /* none */
+#define IST_PULSE 1 /* pulsed */
+#define IST_EDGE 2 /* edge-triggered */
+#define IST_LEVEL 3 /* level-triggered */
+
+#define IST_LEVEL_LOW IST_LEVEL
+#define IST_LEVEL_HIGH 4
+#define IST_EDGE_FALLING IST_EDGE
+#define IST_EDGE_RISING 5
+#define IST_EDGE_BOTH 6
+
+/* RISCV interrupt mcause, from freebsd */
+#define RISCV_NIRQ 1024
+
+#ifndef NIRQ
+#define NIRQ RISCV_NIRQ
+#endif
+
+enum {
+ IRQ_SOFTWARE_USER,
+ IRQ_SOFTWARE_SUPERVISOR,
+ IRQ_SOFTWARE_HYPERVISOR,
+ IRQ_SOFTWARE_MACHINE,
+ IRQ_TIMER_USER,
+ IRQ_TIMER_SUPERVISOR,
+ IRQ_TIMER_HYPERVISOR,
+ IRQ_TIMER_MACHINE,
+ IRQ_EXTERNAL_USER,
+ IRQ_EXTERNAL_SUPERVISOR,
+ IRQ_EXTERNAL_HYPERVISOR,
+ IRQ_EXTERNAL_MACHINE,
+ INTC_NIRQS
+};
+
+#ifndef _LOCORE
+#include <sys/device.h>
+#include <sys/queue.h>
+
+#include <machine/frame.h>
+
+int splraise(int);
+int spllower(int);
+void splx(int);
+
+void riscv_cpu_intr(void *);
+void riscv_do_pending_intr(int);
+void riscv_set_intr_func(int (*raise)(int), int (*lower)(int),
+ void (*x)(int), void (*setipl)(int));
+void riscv_set_intr_handler(void (*intr_handle)(void *));
+
+struct riscv_intr_func {
+ int (*raise)(int);
+ int (*lower)(int);
+ void (*x)(int);
+ void (*setipl)(int);
+};
+
+extern struct riscv_intr_func riscv_intr_func;
+
+#define splraise(cpl) (riscv_intr_func.raise(cpl))
+#define _splraise(cpl) (riscv_intr_func.raise(cpl))
+#define spllower(cpl) (riscv_intr_func.lower(cpl))
+#define splx(cpl) (riscv_intr_func.x(cpl))
+
+#define splsoft() splraise(IPL_SOFT)
+#define splsoftclock() splraise(IPL_SOFTCLOCK)
+#define splsoftnet() splraise(IPL_SOFTNET)
+#define splsofttty() splraise(IPL_SOFTTTY)
+#define splbio() splraise(IPL_BIO)
+#define splnet() splraise(IPL_NET)
+#define spltty() splraise(IPL_TTY)
+#define splvm() splraise(IPL_VM)
+#define splaudio() splraise(IPL_AUDIO)
+#define splclock() splraise(IPL_CLOCK)
+#define splsched() splraise(IPL_SCHED)
+#define splstatclock() splraise(IPL_STATCLOCK)
+#define splhigh() splraise(IPL_HIGH)
+
+#define spl0() spllower(IPL_NONE)
+
+#include <machine/riscvreg.h>
+
+void intr_barrier(void *);
+
+static inline void
+enable_interrupts(void)
+{
+ __asm volatile(
+ "csrsi sstatus, %0"
+ :: "i" (SSTATUS_SIE)
+ );
+}
+
+static inline uint64_t
+disable_interrupts(void)
+{
+ uint64_t ret;
+
+ __asm volatile(
+ "csrrci %0, sstatus, %1"
+ : "=&r" (ret) : "i" (SSTATUS_SIE)
+ );
+
+ return (ret & (SSTATUS_SIE));
+}
+
+static inline void
+restore_interrupts(uint64_t s)
+{
+ __asm volatile(
+ "csrs sstatus, %0"
+ :: "r" (s & (SSTATUS_SIE))
+ );
+}
+
+void riscv_init_smask(void); /* XXX */
+extern uint32_t riscv_smask[NIPL];
+
+#include <machine/softintr.h>
+
+void riscv_clock_register(void (*)(void), void (*)(u_int), void (*)(int),
+ void (*)(void));
+
+/*
+ **** interrupt controller structure and routines ****
+ */
+struct cpu_info;
+struct interrupt_controller {
+ int ic_node;
+ void *ic_cookie;
+ void *(*ic_establish)(void *, int *, int, int (*)(void *),
+ void *, char *);
+ void (*ic_disestablish)(void *);
+ void (*ic_enable)(void *);
+ void (*ic_disable)(void *);
+ void (*ic_route)(void *, int, struct cpu_info *);
+ void (*ic_cpu_enable)(void);
+
+ LIST_ENTRY(interrupt_controller) ic_list;
+ uint32_t ic_phandle;
+ uint32_t ic_cells;
+};
+
+void riscv_intr_init_fdt(void);
+void riscv_intr_register_fdt(struct interrupt_controller *);
+void *riscv_intr_establish_fdt(int, int, int (*)(void *),
+ void *, char *);
+void *riscv_intr_establish_fdt_idx(int, int, int, int (*)(void *),
+ void *, char *);
+void riscv_intr_disestablish_fdt(void *);
+void riscv_intr_enable(void *);
+void riscv_intr_disable(void *);
+void riscv_intr_route(void *, int, struct cpu_info *);
+void riscv_intr_cpu_enable(void);
+
+void riscv_send_ipi(struct cpu_info *, int);
+extern void (*intr_send_ipi_func)(struct cpu_info *, int);
+
+#define riscv_IPI_NOP 0
+#define riscv_IPI_DDB 1
+
+#ifdef DIAGNOSTIC
+/*
+ * Although this function is implemented in MI code, it must be in this MD
+ * header because we don't want this header to include MI includes.
+ */
+void splassert_fail(int, int, const char *);
+extern int splassert_ctl;
+void riscv_splassert_check(int, const char *);
+#define splassert(__wantipl) do { \
+ if (splassert_ctl > 0) { \
+ riscv_splassert_check(__wantipl, __func__); \
+ } \
+} while (0)
+#define splsoftassert(wantipl) splassert(wantipl)
+#else
+#define splassert(wantipl) do { /* nothing */ } while (0)
+#define splsoftassert(wantipl) do { /* nothing */ } while (0)
+#endif
+
+#endif /* ! _LOCORE */
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_INTR_H_ */
+
--- /dev/null
+/* $OpenBSD: kcore.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/* public domain */
+
+/* Make sure this is larger than DRAM_BLOCKS on all arm-based platforms */
+#define NPHYS_RAM_SEGS 8
+
+typedef struct cpu_kcore_hdr {
+ u_int64_t kernelbase; /* value of KERNEL_BASE */
+ u_int64_t kerneloffs; /* offset of kernel in RAM */
+ u_int64_t staticsize; /* size of contiguous mapping */
+ u_int64_t pmap_kernel_l1; /* pmap_kernel()->pm_l1 */
+ u_int64_t pmap_kernel_l2; /* pmap_kernel()->pm_l2 */
+ u_int64_t reserved[11];
+ phys_ram_seg_t ram_segs[NPHYS_RAM_SEGS];
+} cpu_kcore_hdr_t;
--- /dev/null
+/* $OpenBSD: limits.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/* $NetBSD: limits.h,v 1.4 2003/04/28 23:16:18 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1988 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)limits.h 7.2 (Berkeley) 6/28/90
+ */
+
+#ifndef _MACHINE_LIMITS_H_
+#define _MACHINE_LIMITS_H_
+
+#include <sys/cdefs.h>
+
+#if __POSIX_VISIBLE || __XPG_VISIBLE
+#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */
+#endif
+
+#if __BSD_VISIBLE
+#define SIZE_T_MAX ULONG_MAX /* max value for a size_t (historic) */
+
+#define UQUAD_MAX (ULONG_MAX) /* max unsigned quad */
+#define QUAD_MAX (LONG_MAX) /* max signed quad */
+#define QUAD_MIN (LONG_MIN) /* min signed quad */
+
+#endif /* __BSD_VISIBLE */
+
+#endif /* _MACHINE_LIMITS_H_ */
--- /dev/null
+/* $OpenBSD: loadfile_machdep.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/* $NetBSD: loadfile_machdep.h,v 1.1 1999/04/29 03:17:12 tsubai Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Christos Zoulas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define BOOT_ELF
+#define ELFSIZE 64
+
+#define LOAD_KERNEL (LOAD_ALL & ~LOAD_TEXTA)
+#define COUNT_KERNEL (COUNT_ALL & ~COUNT_TEXTA)
+
+extern u_long efi_loadaddr;
+#define LOADADDR(a) (((((u_long)(a)) + offset)&0x3fffffffff) + \
+ efi_loadaddr)
+#define ALIGNENTRY(a) ((u_long)(a))
+#define READ(f, b, c) read((f), (void *)LOADADDR(b), (c))
+#define BCOPY(s, d, c) memcpy((void *)LOADADDR(d), (void *)(s), (c))
+#define BZERO(d, c) memset((void *)LOADADDR(d), 0, (c))
+#define WARN(a) (void)(printf a, \
+ printf((errno ? ": %s\n" : "\n"), \
+ strerror(errno)))
+#define PROGRESS(a) (void) printf a
+#define ALLOC(a) alloc(a)
+#define FREE(a, b) free(a, b)
+
+void run_loadfile(uint64_t *, int);
+/* $OpenBSD: loadfile_machdep.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/* $NetBSD: loadfile_machdep.h,v 1.1 1999/04/29 03:17:12 tsubai Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Christos Zoulas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define BOOT_ELF
+#define ELFSIZE 64
+
+#define LOAD_KERNEL (LOAD_ALL & ~LOAD_TEXTA)
+#define COUNT_KERNEL (COUNT_ALL & ~COUNT_TEXTA)
+
+extern u_long efi_loadaddr;
+#define LOADADDR(a) (((((u_long)(a)) + offset)&0x3fffffffff) + \
+ efi_loadaddr)
+#define ALIGNENTRY(a) ((u_long)(a))
+#define READ(f, b, c) read((f), (void *)LOADADDR(b), (c))
+#define BCOPY(s, d, c) memcpy((void *)LOADADDR(d), (void *)(s), (c))
+#define BZERO(d, c) memset((void *)LOADADDR(d), 0, (c))
+#define WARN(a) (void)(printf a, \
+ printf((errno ? ": %s\n" : "\n"), \
+ strerror(errno)))
+#define PROGRESS(a) (void) printf a
+#define ALLOC(a) alloc(a)
+#define FREE(a, b) free(a, b)
+
+void run_loadfile(uint64_t *, int);
--- /dev/null
+/* $OpenBSD: mutex.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+
+#define __USE_MI_MUTEX
--- /dev/null
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_PARAM_H_
+#define _MACHINE_PARAM_H_
+
+#ifdef _KERNEL
+#ifndef _LOCORE
+#include <machine/cpu.h>
+#endif
+#endif
+
+#define _MACHINE riscv64
+#define MACHINE "riscv64"
+#define _MACHINE_ARC riscv64
+#define MACHINE_ARCH "riscv64"
+#define MID_MACHINE MID_RISCV64
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (PAGE_SIZE - 1)
+
+#ifdef _KERNEL
+
+#define NBPG PAGE_SIZE /* bytes/page */
+#define PGSHIFT PAGE_SHIFT /* LOG2(PAGE_SIZE) */
+#define PGOFSET PAGE_MASK /* byte offset into page */
+
+#define UPAGES 5 /* XXX pages of u-area */
+#define USPACE (UPAGES * PAGE_SIZE) /* XXX total size of u-area */
+#define USPACE_ALIGN 0 /* XXX u-area alignment 0-none */
+
+#define NMBCLUSTERS (64 * 1024) /* XXX max cluster allocation */
+
+#ifndef MSGBUFSIZE
+#define MSGBUFSIZE (16 * PAGE_SIZE) /* XXX default message buffer size */
+#endif
+
+#ifndef KSTACK_PAGES
+#define KSTACK_PAGES 4 /*pages of kernel stack, with pcb*/
+#endif
+
+/*
+ * XXX Maximum size of the kernel malloc arena in PAGE_SIZE-sized
+ * logical pages.
+ */
+#define NKMEMPAGES_MAX_DEFAULT ((128 * 1024 * 1024) >> PAGE_SHIFT)
+
+#define STACKALIGNBYTES (16 - 1)
+#define STACKALIGN(p) ((u_long)(p) &~ STACKALIGNBYTES)
+
+// XXX Advanced Configuration and Power Interface
+#define __HAVE_ACPI
+// XXX Flattened Device Tree
+#define __HAVE_FDT
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_PARAM_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+#include <machine/frame.h>
+
+#include <machine/pte.h>
+#include <machine/reg.h>
+
+struct trapframe;
+
+/*
+ * Warning certain fields must be within 256 bytes of the beginning
+ * of this structure.
+ */
+struct pcb {
+ u_int pcb_flags;
+#define PCB_FPU 0x00000001 /* Process had FPU initialized */
+#define PCB_SINGLESTEP 0x00000002 /* Single step process */
+ struct trapframe *pcb_tf;
+
+ register_t pcb_sp; // stack pointer of switchframe
+
+ caddr_t pcb_onfault; // On fault handler
+ struct fpreg pcb_fpstate; // Floating Point state */
+ struct cpu_info *pcb_fpcpu;
+};
+#endif /* _MACHINE_PCB_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2008,2009,2014 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#ifndef _LOCORE
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <machine/pte.h>
+#endif
+
+
+/* V->P mapping data */
+// XXX Only targeting compatibility with SV39
+#define VP_IDX0_CNT 512
+#define VP_IDX0_MASK (VP_IDX0_CNT-1)
+#define VP_IDX0_POS 39
+#define VP_IDX1_CNT 512
+#define VP_IDX1_MASK (VP_IDX1_CNT-1)
+#define VP_IDX1_POS 30
+#define VP_IDX2_CNT 512
+#define VP_IDX2_MASK (VP_IDX2_CNT-1)
+#define VP_IDX2_POS 21
+#define VP_IDX3_CNT 512
+#define VP_IDX3_MASK (VP_IDX3_CNT-1)
+#define VP_IDX3_POS 12
+
+/* cache flags */
+// XXX These are duplicated from arm64 and may need some reworking
+#define PMAP_CACHE_CI (PMAP_MD0) /* cache inhibit */
+#define PMAP_CACHE_WT (PMAP_MD1) /* writethru */
+#define PMAP_CACHE_WB (PMAP_MD1|PMAP_MD0) /* writeback */
+#define PMAP_CACHE_DEV (PMAP_MD2) /* device mapping */
+#define PMAP_CACHE_BITS (PMAP_MD0|PMAP_MD1|PMAP_MD2)
+
+#define PTED_VA_MANAGED_M (PMAP_MD3)
+#define PTED_VA_WIRED_M (PMAP_MD3 << 1)
+#define PTED_VA_EXEC_M (PMAP_MD3 << 2)
+
+#if defined(_KERNEL) && !defined(_LOCORE)
+/*
+ * Pmap stuff
+ */
+
+typedef struct pmap *pmap_t;
+
+struct pmap {
+ struct mutex pm_mtx;
+ union {
+ // XXX Sv48 not yet supported
+ // XXX Consider inverting Lx
+ struct pmapvp1 *l1; /* virtual to physical table 3 lvl */
+ } pm_vp;
+ uint64_t pm_satp;
+ int pm_privileged;
+ int pm_refs; /* ref count */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+};
+
+#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */
+#define PMAP_NOCACHE 0x1 /* non-cacheable memory */
+#define PMAP_DEVICE 0x2 /* device memory */
+
+#define PG_PMAP_MOD PG_PMAP0
+#define PG_PMAP_REF PG_PMAP1
+#define PG_PMAP_EXE PG_PMAP2
+
+// [NCPUS]
+extern paddr_t zero_page;
+extern paddr_t copy_src_page;
+extern paddr_t copy_dst_page;
+
+void pagezero(vaddr_t);
+
+extern struct pmap kernel_pmap_;
+#define pmap_kernel() (&kernel_pmap_)
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+
+vaddr_t pmap_bootstrap(long kvo, paddr_t lpt1,
+ vaddr_t kernelstart, vaddr_t kernelend,
+ paddr_t fdt_start, paddr_t fdt_end,
+ paddr_t ram_start, paddr_t ram_end);
+void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
+void pmap_page_ro(pmap_t pm, vaddr_t va, vm_prot_t prot);
+
+paddr_t pmap_steal_avail(size_t size, int align, void **kva);
+void pmap_avail_fixup();
+void pmap_physload_avail();
+
+#define PMAP_GROWKERNEL
+
+struct pv_entry;
+
+/* investigate */
+#define pmap_unuse_final(p) do { /* nothing */ } while (0)
+int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
+void pmap_postinit(void);
+
+#endif /* _KERNEL && !_LOCORE */
+
+#ifndef _LOCORE
+#define __HAVE_VM_PAGE_MD
+struct vm_page_md {
+ struct mutex pv_mtx;
+ LIST_HEAD(,pte_desc) pv_list;
+};
+
+#define VM_MDPAGE_INIT(pg) do { \
+ mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
+ LIST_INIT(&((pg)->mdpage.pv_list)); \
+} while (0)
+#endif /* _LOCORE */
+
+#endif /* _MACHINE_PMAP_H_ */
--- /dev/null
+/* $NetBSD: proc.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */
+
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)proc.h 7.1 (Berkeley) 5/15/91
+ */
+
+#ifndef _MACHINE_PROC_H_
+#define _MACHINE_PROC_H_
+
+/*
+ * Machine-dependent part of the proc structure for riscv64.
+ */
+struct mdproc {
+ volatile int md_astpending;
+};
+
+#endif /* _MACHINE_PROC_H_ */
--- /dev/null
+/* $OpenBSD: profile.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/*
+ * Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define _MCOUNT_DECL void _mcount
+
+#define MCOUNT_ASM_NAME "__mcount"
+
+#ifdef __PIC__
+#define PLTSYM "" /* XXX -aarch64 defaults to PLT? */
+#else
+#define PLTSYM ""
+#endif
+
+#define MCOUNT \
+__asm__ (".text \n;" \
+ ".align 3 \n;" \
+ ".globl " MCOUNT_ASM_NAME " \n;" \
+ ".type " MCOUNT_ASM_NAME ",@function \n;" \
+ MCOUNT_ASM_NAME ": \n;" \
+ " addi sp, sp, -176 \n" \
+ " sd fp, 0(sp) \n" \
+ " sd ra, 8(sp) \n" \
+ " sd s1, 24(sp) \n" \
+ " sd a0, 32(sp) \n" \
+ " sd a1, 40(sp) \n" \
+ " sd a2, 48(sp) \n" \
+ " sd a3, 56(sp) \n" \
+ " sd a4, 64(sp) \n" \
+ " sd a5, 72(sp) \n" \
+ " sd a6, 80(sp) \n" \
+ " sd a7, 88(sp) \n" \
+ " sd s2, 96(sp) \n" \
+ " sd s3, 104(sp) \n" \
+ " sd s4, 112(sp) \n" \
+ " sd s5, 120(sp) \n" \
+ " sd s6, 128(sp) \n" \
+ " sd s7, 136(sp) \n" \
+ " sd s8, 144(sp) \n" \
+ " sd s9, 152(sp) \n" \
+ " sd s10, 160(sp) \n" \
+ " sd s11, 168(sp) \n" \
+ " ld a0, 8(fp) \n" \
+ " mv a1, x1 \n" \
+ " call " __STRING(_mcount) PLTSYM " \n" \
+ /* restore argument registers */ \
+ " ld fp, 0(sp) \n" \
+ " ld ra, 8(sp) \n" \
+ " ld s1, 24(sp) \n" \
+ " ld a0, 32(sp) \n" \
+ " ld a1, 40(sp) \n" \
+ " ld a2, 48(sp) \n" \
+ " ld a3, 56(sp) \n" \
+ " ld a4, 64(sp) \n" \
+ " ld a5, 72(sp) \n" \
+ " ld a6, 80(sp) \n" \
+ " ld a7, 88(sp) \n" \
+ " ld s2, 96(sp) \n" \
+ " ld s3, 104(sp) \n" \
+ " ld s4, 112(sp) \n" \
+ " ld s5, 120(sp) \n" \
+ " ld s6, 128(sp) \n" \
+ " ld s7, 136(sp) \n" \
+ " ld s8, 144(sp) \n" \
+ " ld s9, 152(sp) \n" \
+ " ld s10, 160(sp) \n" \
+ " ld s11, 168(sp) \n" \
+ " addi sp, sp, 176 \n" \
+ " jr ra \n");
+
+#ifdef _KERNEL
+// Change this to dair read/set, then restore.
+#define MCOUNT_ENTER \
+__asm__ ("mrs %x0,daif; msr daifset, #0x2": "=r"(s));
+#define MCOUNT_EXIT \
+__asm__ ("msr daif, %x0":: "r"(s));
+
+#endif // _KERNEL
--- /dev/null
+/*
+ * Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2014 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _RISCV64_PTE_H_
+#define _RISCV64_PTE_H_
+
+#include "machine/vmparam.h"
+
+#define Lx_TABLE_ALIGN (4096)
+
+/* Block and Page attributes */
+/* Bits 9:8 are reserved for software */
+#define PTE_ATTR_MASK (0x3ffUL)
+#define PTE_SW_MANAGED (1 << 9)
+#define PTE_SW_WIRED (1 << 8)
+#define PTE_D (1 << 7) /* Dirty */
+#define PTE_A (1 << 6) /* Accessed */
+#define PTE_G (1 << 5) /* Global */
+#define PTE_U (1 << 4) /* User */
+#define PTE_X (1 << 3) /* Execute */
+#define PTE_W (1 << 2) /* Write */
+#define PTE_R (1 << 1) /* Read */
+#define PTE_V (1 << 0) /* Valid */
+#define PTE_RWX (PTE_R | PTE_W | PTE_X)
+#define PTE_RX (PTE_R | PTE_X)
+#define PTE_KERN (PTE_V | PTE_R | PTE_W | PTE_A | PTE_D)
+#define PTE_PROMOTE (PTE_V | PTE_RWX | PTE_D | PTE_A | PTE_G | PTE_U | \
+ PTE_SW_MANAGED | PTE_SW_WIRED
+
+/* Level 0 table, 512GiB per entry */
+#define L0_SHIFT 39
+
+/* Level 1 table, 1GiB per entry */
+#define L1_SHIFT 30
+#define L1_SIZE (1UL << L1_SHIFT)
+#define L1_OFFSET (L1_SIZE - 1)
+
+/* Level 2 table, 2MiB per entry */
+#define L2_SHIFT 21
+#define L2_SIZE (1UL << L2_SHIFT)
+#define L2_OFFSET (L2_SIZE - 1)
+
+/* Level 3 table, 4KiB per entry */
+#define L3_SHIFT 12
+#define L3_SIZE (1UL << L3_SHIFT)
+#define L3_OFFSET (L3_SIZE - 1)
+
+/* page mapping */
+#define Ln_ENTRIES_SHIFT 9
+#define Ln_ENTRIES (1 << Ln_ENTRIES_SHIFT)
+#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
+#define Ln_TABLE_MASK ((1 << 12) - 1)
+
+/* physical page number mask */
+#define PTE_RPGN (((1ULL << 56) - 1) & ~PAGE_MASK)
+
+#define PTE_PPN0_S 10
+#define PTE_PPN1_S 19
+#define PTE_PPN2_S 28
+#define PTE_PPN3_S 37
+#define PTE_SIZE 8
+
+#ifndef _LOCORE
+typedef uint64_t pt_entry_t; /* page table entry */
+typedef uint64_t pn_t; /* page number */
+#endif /* !_LOCORE */
+
+#endif /* _RISCV64_PTE_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define PT_STEP (PT_FIRSTMACH + 0)
+#define PT_GETREGS (PT_FIRSTMACH + 1)
+#define PT_SETREGS (PT_FIRSTMACH + 2)
+#if 0 // XXX ptrace fpreg support
+#define PT_GETFPREGS (PT_FIRSTMACH + 3)
+#define PT_SETFPREGS (PT_FIRSTMACH + 4)
+#endif
--- /dev/null
+/*-
+ * Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2015-2016 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_REG_H_
+#define _MACHINE_REG_H_
+
+struct reg {
+ uint64_t r_ra; /* return address */
+ uint64_t r_sp; /* stack pointer */
+ uint64_t r_gp; /* global pointer */
+ uint64_t r_tp; /* thread pointer */
+ uint64_t r_t[7]; /* temporary registers */
+ uint64_t r_s[12]; /* saved registers */
+ uint64_t r_a[8]; /* argument registers */
+ uint64_t r_sepc; /* exception program counter */
+ uint64_t r_sstatus; /* status register */
+};
+
+struct fpreg {
+ uint64_t fp_f[32]; /* floating-point registers */
+ uint64_t fp_fcsr; /* floating-point control register */
+};
+
+#endif /* !_MACHINE_REG_H_ */
--- /dev/null
+/* $OpenBSD: reloc.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/*
+ * RISCV64 static relocation types.
+ */
+
+/* Relocation types used by the dynamic linker. */
+#define R_RISCV_NONE 0
+#define R_RISCV_32 1
+#define R_RISCV_64 2
+#define R_RISCV_RELATIVE 3
+#define R_RISCV_COPY 4
+#define R_RISCV_JUMP_SLOT 5
+#define R_RISCV_TLS_DTPMOD32 6
+#define R_RISCV_TLS_DTPMOD64 7
+#define R_RISCV_TLS_DTPREL32 8
+#define R_RISCV_TLS_DTPREL64 9
+#define R_RISCV_TLS_TPREL32 10
+#define R_RISCV_TLS_TPREL64 11
+
--- /dev/null
+/*
+ * Copyright (c) 2020 Mengshi Li <mengshi.li.mars@gmail.com>
+ * Copyright (c) 2005,2008 Dale Rahn <drahn@openbsd.com>
+ * Copyright (c) 2012-2013 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RISCV64VAR_H__
+#define __RISCV64VAR_H__
+
+extern bus_space_t riscv64_bs_tag;
+
+#endif /* __RISCV64VAR_H__ */
--- /dev/null
+/*-
+ * Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_RISCVREG_H_
+#define _MACHINE_RISCVREG_H_
+
+#define EXCP_SHIFT 0
+#define EXCP_MASK (0xf << EXCP_SHIFT)
+#define EXCP_MISALIGNED_FETCH 0
+#define EXCP_FAULT_FETCH 1
+#define EXCP_ILLEGAL_INSTRUCTION 2
+#define EXCP_BREAKPOINT 3
+#define EXCP_MISALIGNED_LOAD 4
+#define EXCP_FAULT_LOAD 5
+#define EXCP_MISALIGNED_STORE 6
+#define EXCP_FAULT_STORE 7
+#define EXCP_USER_ECALL 8
+#define EXCP_SUPERVISOR_ECALL 9
+#define EXCP_HYPERVISOR_ECALL 10
+#define EXCP_MACHINE_ECALL 11
+#define EXCP_INST_PAGE_FAULT 12
+#define EXCP_LOAD_PAGE_FAULT 13
+#define EXCP_STORE_PAGE_FAULT 15
+#define EXCP_INTR (1ul << 63)
+
+#define MSTATUS_UIE (1 << 0)
+#define MSTATUS_SIE (1 << 1)
+#define MSTATUS_MIE (1 << 3)
+#define MSTATUS_UPIE (1 << 4)
+#define MSTATUS_SPIE (1 << 5)
+#define MSTATUS_MPIE (1 << 7)
+#define MSTATUS_SPP (1 << 8)
+#define MSTATUS_MPP_SHIFT 11
+#define MSTATUS_MPP_MASK (0x3 << MSTATUS_MPP_SHIFT)
+#define MSTATUS_FS_SHIFT 13
+#define MSTATUS_FS_MASK (0x3 << MSTATUS_FS_SHIFT)
+#define MSTATUS_XS_SHIFT 15
+#define MSTATUS_XS_MASK (0x3 << MSTATUS_XS_SHIFT)
+#define MSTATUS_MPRV (1 << 17)
+#define MSTATUS_SUM (1 << 18)
+#define MSTATUS_MXR (1 << 19)
+#define MSTATUS_TVM (1 << 20)
+#define MSTATUS_TW (1 << 21)
+#define MSTATUS_TSR (1 << 22)
+#define MSTATUS_UXL_SHIFT 32
+#define MSTATUS_UXL_MASK (0x3 << MSTATUS_UXL_SHIFT)
+#define MSTATUS_SXL_SHIFT 34
+#define MSTATUS_SXL_MASK (0x3 << MSTATUS_SXL_SHIFT)
+#define MSTATUS_SD (1 << (MXLEN - 1))
+
+#define SSTATUS_UIE (1 << 0)
+#define SSTATUS_SIE (1 << 1)
+#define SSTATUS_UPIE (1 << 4)
+#define SSTATUS_SPIE (1 << 5)
+#define SSTATUS_SPP (1 << 8)
+#define SSTATUS_FS_SHIFT 13
+#define SSTATUS_FS_MASK (0x3 << SSTATUS_FS_SHIFT)
+#define SSTATUS_FS_OFF (0x0 << SSTATUS_FS_SHIFT)
+#define SSTATUS_FS_INITIAL (0x1 << SSTATUS_FS_SHIFT)
+#define SSTATUS_FS_CLEAN (0x2 << SSTATUS_FS_SHIFT)
+#define SSTATUS_FS_DIRTY (0x3 << SSTATUS_FS_SHIFT)
+#define SSTATUS_XS_SHIFT 15
+#define SSTATUS_XS_MASK (0x3 << SSTATUS_XS_SHIFT)
+#define SSTATUS_SUM (1 << 18)
+#define SSTATUS_MXR (1 << 19)
+#define SSTATUS_UXL_SHIFT 32
+#define SSTATUS_UXL_MASK (0x3 << SSTATUS_UXL_SHIFT)
+#define SSTATUS_SD (1 << (SXLEN - 1))
+
+#define USTATUS_UIE (1 << 0)
+#define USTATUS_UPIE (1 << 4)
+
+#define MSTATUS_PRV_U 0 /* user */
+#define MSTATUS_PRV_S 1 /* supervisor */
+#define MSTATUS_PRV_H 2 /* hypervisor */
+#define MSTATUS_PRV_M 3 /* machine */
+
+#define MIE_USIE (1 << 0)
+#define MIE_SSIE (1 << 1)
+#define MIE_MSIE (1 << 3)
+#define MIE_UTIE (1 << 4)
+#define MIE_STIE (1 << 5)
+#define MIE_MTIE (1 << 7)
+#define MIE_UEIE (1 << 8)
+#define MIE_SEIE (1 << 9)
+#define MIE_MEIE (1 << 11)
+
+#define MIP_USIP (1 << 0)
+#define MIP_SSIP (1 << 1)
+#define MIP_MSIP (1 << 3)
+#define MIP_UTIP (1 << 4)
+#define MIP_STIP (1 << 5)
+#define MIP_MTIP (1 << 7)
+#define MIP_UEIP (1 << 8)
+#define MIP_SEIP (1 << 9)
+#define MIP_MEIP (1 << 11)
+
+#define SIE_USIE (1 << 0)
+#define SIE_SSIE (1 << 1)
+#define SIE_UTIE (1 << 4)
+#define SIE_STIE (1 << 5)
+#define SIE_UEIE (1 << 8)
+#define SIE_SEIE (1 << 9)
+
+#define SIP_USIP (1 << 0)
+#define SIP_SSIP (1 << 1)
+#define SIP_UTIP (1 << 4)
+#define SIP_STIP (1 << 5)
+#define SIP_UEIP (1 << 8)
+#define SIP_SEIP (1 << 9)
+
+#define UIE_USIE (1 << 0)
+#define UIE_UTIE (1 << 4)
+#define UIE_UEIE (1 << 8)
+
+#define UIP_USIP (1 << 0)
+#define UIP_UTIP (1 << 4)
+#define UIP_UEIP (1 << 8)
+
+#define PPN(pa) ((pa) >> PAGE_SHIFT)
+#define SATP_PPN_SHIFT 0
+#define SATP_PPN_MASK (0xfffffffffffULL << SATP_PPN_SHIFT)
+#define SATP_PPN(satp) (((satp) & SATP_PPN_MASK) >> SATP_PPN_SHIFT)
+#define SATP_FORMAT_PPN(ppn) (((uint64_t)(ppn) << SATP_PPN_SHIFT) & SATP_PPN_MASK)
+#define SATP_ASID_SHIFT 44
+#define SATP_ASID_MASK (0xffffULL << SATP_ASID_SHIFT)
+#define SATP_ASID(satp) (((satp) & SATP_ASID_MASK) >> SATP_ASID_SHIFT)
+#define SATP_FORMAT_ASID(asid) (((uint64_t)(asid) << SATP_ASID_SHIFT) & SATP_ASID_MASK)
+#define SATP_MODE_SHIFT 60
+#define SATP_MODE_MASK (0xfULL << SATP_MODE_SHIFT)
+#define SATP_MODE(mode) (((satp) & SATP_MODE_MASK) >> SATP_MODE_SHIFT)
+
+#define SATP_MODE_SV39 (8ULL << SATP_MODE_SHIFT)
+#define SATP_MODE_SV48 (9ULL << SATP_MODE_SHIFT)
+#define SATP_MODE_SV57 (10ULL << SATP_MODE_SHIFT)
+#define SATP_MODE_SV64 (11ULL << SATP_MODE_SHIFT)
+
+/**
+ * As of RISC-V Machine ISA v1.11, the XLEN can vary between
+ * Machine, Supervisor, and User modes. The Machine XLEN (MXLEN)
+ * is resolved from the MXL field of the 'misa' CSR. The
+ * Supervisor XLEN (SXLEN) and User XLEN (UXLEN) are resolved
+ * from the SXL and UXL fields of the 'mstatus' CSR, respectively.
+ *
+ * The Machine XLEN is reset to the widest supported ISA variant
+ * at machine reset. For now, assume that all modes will always
+ * use the same, static XLEN of 64 bits.
+ */
+#define XLEN 64
+#define XLEN_BYTES (XLEN / 8)
+#define MXLEN XLEN
+#define SXLEN XLEN
+#define UXLEN XLEN
+#define INSN_SIZE 4
+#define INSN_C_SIZE 2
+
+// Check if val can fit in the CSR immediate form
+#define CSR_ZIMM(val) \
+ (__builtin_constant_p(val) && ((u_long)(val) < 32))
+
+#define csr_swap(csr, val) \
+({ if (CSR_ZIMM(val)) \
+ __asm __volatile("csrrwi %0, " #csr ", %1" \
+ : "=r" (val) : "i" (val)); \
+ else \
+ __asm __volatile("csrrw %0, " #csr ", %1" \
+ : "=r" (val) : "r" (val)); \
+ val; \
+})
+
+#define csr_write(csr, val) \
+({ if (CSR_ZIMM(val)) \
+ __asm __volatile("csrwi " #csr ", %0" :: "i" (val)); \
+ else \
+ __asm __volatile("csrw " #csr ", %0" :: "r" (val)); \
+})
+
+#define csr_set(csr, val) \
+({ if (CSR_ZIMM(val)) \
+ __asm __volatile("csrsi " #csr ", %0" :: "i" (val)); \
+ else \
+ __asm __volatile("csrs " #csr ", %0" :: "r" (val)); \
+})
+
+#define csr_clear(csr, val) \
+({ if (CSR_ZIMM(val)) \
+ __asm __volatile("csrci " #csr ", %0" :: "i" (val)); \
+ else \
+ __asm __volatile("csrc " #csr ", %0" :: "r" (val)); \
+})
+
+#define csr_read(csr) \
+({ u_long val; \
+ __asm __volatile("csrr %0, " #csr : "=r" (val)); \
+ val; \
+})
+
+#endif /* !_MACHINE_RISCVREG_H_ */
--- /dev/null
+/*-
+ * Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ * Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SBI_H_
+#define _MACHINE_SBI_H_
+
+/* SBI Specification Version */
+#define SBI_SPEC_VERS_MAJOR_OFFSET 24
+#define SBI_SPEC_VERS_MAJOR_MASK (0x7F << SBI_SPEC_VERS_MAJOR_OFFSET)
+#define SBI_SPEC_VERS_MINOR_OFFSET 0
+#define SBI_SPEC_VERS_MINOR_MASK (0xFFFFFF << SBI_SPEC_VERS_MINOR_OFFSET)
+
+/* SBI Implementation IDs */
+#define SBI_IMPL_ID_BBL 0
+#define SBI_IMPL_ID_OPENSBI 1
+
+/* SBI Error Codes */
+#define SBI_SUCCESS 0
+#define SBI_ERR_FAILURE -1
+#define SBI_ERR_NOT_SUPPORTED -2
+#define SBI_ERR_INVALID_PARAM -3
+#define SBI_ERR_DENIED -4
+#define SBI_ERR_INVALID_ADDRESS -5
+
+/* SBI Base Extension */
+#define SBI_EXT_ID_BASE 0x10
+#define SBI_BASE_GET_SPEC_VERSION 0
+#define SBI_BASE_GET_IMPL_ID 1
+#define SBI_BASE_GET_IMPL_VERSION 2
+#define SBI_BASE_PROBE_EXTENSION 3
+#define SBI_BASE_GET_MVENDORID 4
+#define SBI_BASE_GET_MARCHID 5
+#define SBI_BASE_GET_MIMPID 6
+
+/* Legacy Extensions */
+#define SBI_SET_TIMER 0
+#define SBI_CONSOLE_PUTCHAR 1
+#define SBI_CONSOLE_GETCHAR 2
+#define SBI_CLEAR_IPI 3
+#define SBI_SEND_IPI 4
+#define SBI_REMOTE_FENCE_I 5
+#define SBI_REMOTE_SFENCE_VMA 6
+#define SBI_REMOTE_SFENCE_VMA_ASID 7
+#define SBI_SHUTDOWN 8
+
+#define SBI_CALL0(e, f) SBI_CALL4(e, f, 0, 0, 0, 0)
+#define SBI_CALL1(e, f, p1) SBI_CALL4(e, f, p1, 0, 0, 0)
+#define SBI_CALL2(e, f, p1, p2) SBI_CALL4(e, f, p1, p2, 0, 0)
+#define SBI_CALL3(e, f, p1, p2, p3) SBI_CALL4(e, f, p1, p2, p3, 0)
+#define SBI_CALL4(e, f, p1, p2, p3, p4) sbi_call(e, f, p1, p2, p3, p4)
+
+/*
+ * Documentation available at
+ * https://github.com/riscv/riscv-sbi-doc/blob/master/riscv-sbi.adoc
+ */
+
+struct sbi_ret {
+ long error;
+ long value;
+};
+
+static __inline struct sbi_ret
+sbi_call(uint64_t arg7, uint64_t arg6, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3)
+{
+ struct sbi_ret ret;
+
+ register uintptr_t a0 __asm ("a0") = (uintptr_t)(arg0);
+ register uintptr_t a1 __asm ("a1") = (uintptr_t)(arg1);
+ register uintptr_t a2 __asm ("a2") = (uintptr_t)(arg2);
+ register uintptr_t a3 __asm ("a3") = (uintptr_t)(arg3);
+ register uintptr_t a6 __asm ("a6") = (uintptr_t)(arg6);
+ register uintptr_t a7 __asm ("a7") = (uintptr_t)(arg7);
+
+ __asm __volatile( \
+ "ecall" \
+ :"+r"(a0), "+r"(a1) \
+ :"r"(a2), "r"(a3), "r"(a6), "r"(a7) \
+ :"memory");
+
+ ret.error = a0;
+ ret.value = a1;
+ return (ret);
+}
+
+/* Base extension functions and variables. */
+extern u_long sbi_spec_version;
+extern u_long sbi_impl_id;
+extern u_long sbi_impl_version;
+
+static __inline long
+sbi_probe_extension(long id)
+{
+ return (SBI_CALL1(SBI_EXT_ID_BASE, SBI_BASE_PROBE_EXTENSION, id).value);
+}
+
+/* Legacy extension functions. */
+static __inline void
+sbi_console_putchar(int ch)
+{
+
+ (void)SBI_CALL1(SBI_CONSOLE_PUTCHAR, 0, ch);
+}
+
+static __inline int
+sbi_console_getchar(void)
+{
+
+ /*
+ * XXX: The "error" is returned here because legacy SBI functions
+ * continue to return their value in a0.
+ */
+ return (SBI_CALL0(SBI_CONSOLE_GETCHAR, 0).error);
+}
+
+static __inline void
+sbi_set_timer(uint64_t val)
+{
+
+ (void)SBI_CALL1(SBI_SET_TIMER, 0, val);
+}
+
+static __inline void
+sbi_shutdown(void)
+{
+
+ (void)SBI_CALL0(SBI_SHUTDOWN, 0);
+}
+
+static __inline void
+sbi_clear_ipi(void)
+{
+
+ (void)SBI_CALL0(SBI_CLEAR_IPI, 0);
+}
+
+static __inline void
+sbi_send_ipi(const unsigned long *hart_mask)
+{
+
+ (void)SBI_CALL1(SBI_SEND_IPI, 0, (uint64_t)hart_mask);
+}
+
+static __inline void
+sbi_remote_fence_i(const unsigned long *hart_mask)
+{
+
+ (void)SBI_CALL1(SBI_REMOTE_FENCE_I, 0, (uint64_t)hart_mask);
+}
+
+static __inline void
+sbi_remote_sfence_vma(const unsigned long *hart_mask,
+ unsigned long start, unsigned long size)
+{
+
+ (void)SBI_CALL3(SBI_REMOTE_SFENCE_VMA, 0, (uint64_t)hart_mask, start,
+ size);
+}
+
+static __inline void
+sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+ unsigned long start, unsigned long size,
+ unsigned long asid)
+{
+
+ (void)SBI_CALL4(SBI_REMOTE_SFENCE_VMA_ASID, 0, (uint64_t)hart_mask,
+ start, size, asid);
+}
+
+void sbi_print_version(void);
+void sbi_init(void);
+
+#endif /* !_MACHINE_SBI_H_ */
--- /dev/null
+/* $OpenBSD: setjmp.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+
+/*
+ * machine/setjmp.h: machine dependent setjmp-related information.
+ */
+
+#define _JBLEN 256 /* sp, ra, [f]s0-11, magic val, sigmask */
+#define _JB_SIGMASK 27
--- /dev/null
+/*-
+ * Copyright (c) 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.1 (Berkeley) 6/11/93
+ * from: FreeBSD: src/sys/i386/include/signal.h,v 1.13 2000/11/09
+ * from: FreeBSD: src/sys/sparc64/include/signal.h,v 1.6 2001/09/30 18:52:17
+ * $FreeBSD: head/sys/riscv/include/signal.h 292407 2015-12-17 18:44:30Z br $
+ */
+
+#ifndef _MACHINE_SIGNAL_H_
+#define _MACHINE_SIGNAL_H_
+
+#include <sys/cdefs.h>
+
+typedef long sig_atomic_t;
+
+#if __BSD_VISIBLE || __XPG_VISIBLE >= 420
+
+#include <sys/_types.h>
+
+struct sigcontext {
+ int __sc_unused;
+ int sc_mask;
+
+ __register_t sc_ra;
+ __register_t sc_sp;
+ __register_t sc_gp;
+ __register_t sc_tp;
+ __register_t sc_t[7];
+ __register_t sc_s[12];
+ __register_t sc_a[8];
+ __register_t sc_sepc;
+
+ long sc_cookie;
+};
+
+#endif /* __BSD_VISIBLE || __XPG_VISIBLE >= 420 */
+
+#endif /* !_MACHINE_SIGNAL_H_ */
--- /dev/null
+/* $NetBSD: softintr.h,v 1.1 2002/01/29 22:54:14 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum, and by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_SOFTINTR_H_
+#define _MACHINE_SOFTINTR_H_
+
+#ifdef _KERNEL
+
+#include <sys/mutex.h>
+#include <sys/queue.h>
+
+/*
+ * Generic software interrupt support for all AArch64 platforms.
+ *
+ * To use this code, include <machine/softintr.h> from your platform's
+ * <machine/intr.h>.
+ */
+
+#define SIR_SOFT 0 /* for IPL_SOFT */
+#define SIR_CLOCK 1 /* for IPL_SOFTCLOCK */
+#define SIR_NET 2 /* for IPL_SOFTNET */
+#define SIR_TTY 3 /* for IPL_SOFTTTY */
+
+#define SI_NSOFTINTR 4
+
+struct soft_intrhand {
+ TAILQ_ENTRY(soft_intrhand)
+ sih_q;
+ struct soft_intr *sih_intrhead;
+ void (*sih_fn)(void *);
+ void (*sih_fnwrap)(void *);
+ void *sih_arg;
+ void *sih_argwrap;
+ int sih_pending;
+};
+
+struct soft_intr {
+ TAILQ_HEAD(, soft_intrhand)
+ softintr_q;
+ int softintr_ssir;
+ struct mutex softintr_lock;
+};
+
+#define SOFTINTR_ESTABLISH_MPSAFE 0x01
+void *softintr_establish_flags(int, void (*)(void *), void *, int);
+#define softintr_establish(i, f, a) \
+ softintr_establish_flags(i, f, a, 0)
+#define softintr_establish_mpsafe(i, f, a) \
+ softintr_establish_flags(i, f, a, SOFTINTR_ESTABLISH_MPSAFE)
+void softintr_disestablish(void *);
+void softintr_init(void);
+void softintr_dispatch(int);
+void softintr(int);
+
+#define softintr_schedule(arg) \
+do { \
+ struct soft_intrhand *__sih = (arg); \
+ struct soft_intr *__si = __sih->sih_intrhead; \
+ \
+ mtx_enter(&__si->softintr_lock); \
+ if (__sih->sih_pending == 0) { \
+ TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q); \
+ __sih->sih_pending = 1; \
+ softintr(__si->softintr_ssir); \
+ } \
+ mtx_leave(&__si->softintr_lock); \
+} while (/*CONSTCOND*/ 0)
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_SOFTINTR_H_ */
--- /dev/null
+#ifndef _MACHINE_SPINLOCK_H_
+#define _MACHINE_SPINLOCK_H_
+
+#define _ATOMIC_LOCK_UNLOCKED (0)
+#define _ATOMIC_LOCK_LOCKED (1)
+typedef int _atomic_lock_t;
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_SYSCALL_H_
+#define _MACHINE_SYSCALL_H_
+
+#ifdef _KERNEL
+
+// syscall.c
+void svc_handler (trapframe_t *);
+
+#endif
+
+#endif /* !_MACHINE_SYSCALL_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2011 Philip Guenther <guenther@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_TCB_H_
+#define _MACHINE_TCB_H_
+
+#ifdef _KERNEL
+
+#include <machine/pcb.h>
+
+#define TCB_GET(p) \
+ ((struct pcb *)(p)->p_addr)->pcb_tf->tf_tp
+
+#define TCB_SET(p, addr) \
+ do { \
+ ((struct pcb *)(p)->p_addr)->pcb_tf->tf_tp = (long)(addr); \
+ } while (0)
+
+#else /* _KERNEL */
+
+/* ELF TLS ABI calls for small TCB, with static TLS data after it */
+#define TLS_VARIANT 1
+
+static inline void *
+__riscv64_read_tcb(void)
+{
+ void *tcb;
+ __asm volatile("mv %0, tp": "=r" (tcb));
+ return tcb;
+}
+
+#define TCB_GET() __riscv64_read_tcb()
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_TCB_H_ */
--- /dev/null
+/* $OpenBSD: timetc.h,v 1.1 2021/04/23 02:42:16 drahn Exp $ */
+/*
+ * Copyright (c) 2020 Paul Irofti <paul@irofti.net>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_TIMETC_H_
+#define _MACHINE_TIMETC_H_
+
+
+#endif /* _MACHINE_TIMETC_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2019 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define KERNEL_BREAKPOINT 0x00100073 /* EBREAK -- Used by DDB */
+
+#define KBPT_ASM "ebreak"
+
+#define USER_BREAKPOINT 0x00100073 /* EBREAK */
--- /dev/null
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)vmparam.h 5.9 (Berkeley) 5/12/91
+ */
+
+#ifndef _MACHINE_VMPARAM_H_
+#define _MACHINE_VMPARAM_H_
+
+/*
+ * Machine dependent constants for riscv64.
+ */
+
+#define USRSTACK VM_MAXUSER_ADDRESS
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define MAXTSIZ ((paddr_t)1*1024*1024*1024) /* max text size */
+#endif
+#ifndef DFLDSIZ
+#define DFLDSIZ ((paddr_t)128*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ ((paddr_t)1*1024*1024*1024) /* max data size */
+#endif
+#ifndef BRKSIZ
+#define BRKSIZ ((paddr_t)1*1024*1024*1024) /* heap gap size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ ((paddr_t)128*1024*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ ((paddr_t)1*1024*1024*1024) /* max stack size */
+#endif
+
+#define STACKGAP_RANDOM 256*1024
+
+/*
+ * Size of shared memory map
+ */
+#ifndef SHMMAXPGS
+#define SHMMAXPGS 1024
+#endif
+
+/*
+ * Size of User Raw I/O map
+ */
+#define USRIOSIZE 300
+
+/**
+ * Address space layout.
+ *
+ * RISC-V implements multiple paging modes with different virtual address space
+ * sizes: SV32, SV39 and SV48. SV39 permits a virtual address space size of
+ * 512GB and uses a three-level page table. Since this is large enough for most
+ * purposes, we currently use SV39 for both userland and the kernel, avoiding
+ * the extra translation step required by SV48.
+ *
+ * The address space is split into two regions at each end of the 64-bit address
+ * space:
+ *
+ * 0x0000000000000000 - 0x0000003fffffffff 256GB user map
+ * 0x0000004000000000 - 0xffffffbfffffffff unmappable
+ * 0xffffffc000000000 - 0xffffffc7ffffffff 32GB kernel map
+ * 0xffffffc800000000 - 0xffffffcfffffffff 32GB unused
+ * 0xffffffd000000000 - 0xffffffefffffffff 128GB direct map
+ * 0xfffffff000000000 - 0xffffffffffffffff 64GB unused
+ *
+ * The kernel is loaded at the beginning of the kernel map.
+ *
+ * We define some interesting address constants:
+ *
+ * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
+ * 64 bit address space, mostly just for convenience.
+ *
+ * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
+ * mappable kernel virtual address space.
+ *
+ * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
+ * user address space.
+ */
+// XXX OpenBSD/arm64 starts VM_MIN_ADDRESS from PAGE_SIZE. Why?
+#define VM_MIN_ADDRESS (0x0000000000000000UL)
+#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
+
+#define VM_MIN_KERNEL_ADDRESS (0xffffffc000000000UL)
+#define VM_MAX_KERNEL_ADDRESS (0xffffffc800000000UL)
+
+// Kernel L1 Page Table Range
+#define L1_KERN_BASE (256)
+#define L1_KERN_ENTRIES (288 - L1_KERN_BASE)
+
+#define DMAP_MIN_ADDRESS (0xffffffd000000000UL)
+#define DMAP_MAX_ADDRESS (0xfffffff000000000UL)
+
+// DMAP L1 Page Table Range
+#define L1_DMAP_BASE (320)
+#define L1_DMAP_ENTRIES (448 - L1_DMAP_BASE)
+
+#define DMAP_MIN_PHYSADDR (dmap_phys_base)
+#define DMAP_MAX_PHYSADDR (dmap_phys_max)
+
+/* True if pa is in the dmap range */
+#define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \
+ (pa) < DMAP_MAX_PHYSADDR)
+/* True if va is in the dmap range */
+#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \
+ (va) < (dmap_max_addr))
+
+#define PMAP_HAS_DMAP 1
+#if 0 // XXX KASSERT missing. Find a better way to enforce boundary.
+#define PHYS_TO_DMAP(pa) \
+({ \
+ KASSERT(PHYS_IN_DMAP(pa), \
+ ("%s: PA out of range, PA: 0x%lx", __func__, \
+ (vm_paddr_t)(pa))); \
+ ((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS; \
+})
+#else
+#define PHYS_TO_DMAP(pa) \
+({ \
+ ((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS; \
+})
+#endif
+
+#if 0 // XXX KASSERT missing. Find a better way to enforce boundary.
+#define DMAP_TO_PHYS(va) \
+({ \
+ KASSERT(VIRT_IN_DMAP(va), \
+ ("%s: VA out of range, VA: 0x%lx", __func__, \
+ (vm_offset_t)(va))); \
+ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \
+})
+#else
+#define DMAP_TO_PHYS(va) \
+({ \
+ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \
+})
+#endif
+
+#define VM_MIN_USER_ADDRESS (0x0000000000000000UL)
+#define VM_MAX_USER_ADDRESS (0x0000004000000000UL) // 39 User Space Bits
+
+#define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS)
+// XXX OpenBSD/arm64 saves 8 * PAGE_SIZE at top of VM_MAXUSER_ADDRESS. Why?
+#define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS)
+
+#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
+
+#ifndef _LOCORE
+extern paddr_t dmap_phys_base;
+extern paddr_t dmap_phys_max;
+extern vaddr_t dmap_virt_max;
+extern vaddr_t vm_max_kernel_address;
+extern vaddr_t init_pt_va;
+#endif
+
+/* virtual sizes (bytes) for various kernel submaps */
+#define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE)
+
+#define VM_PHYSSEG_MAX 32
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
+
+#endif /* _MACHINE_VMPARAM_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/signal.h>
+#include <sys/syscall.h>
+#include <sys/syscall_mi.h>
+#include <machine/pcb.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/frame.h>
+
+/*
+ * Transform cpu ast to mi_ast
+ */
+
+void
+ast(struct trapframe *tf)
+{
+ struct proc *p = curcpu()->ci_curproc;
+
+ p->p_addr->u_pcb.pcb_tf = tf;
+
+ refreshcreds(p);
+ uvmexp.softs++;
+ mi_ast(p, curcpu()->ci_want_resched);
+ userret(p);
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Miodrag Vallat.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/device.h>
+#include <sys/reboot.h>
+#include <sys/socket.h>
+#include <sys/hibernate.h>
+#include <uvm/uvm.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <machine/bootconfig.h>
+
+extern void dumpconf(void);
+void parsepmonbp(void);
+
+struct device *bootdv = NULL;
+enum devclass bootdev_class = DV_DULL;
+
+void
+unmap_startup(void)
+{
+ extern void *_start, *endboot;
+ vaddr_t p = (vaddr_t)&_start;
+
+ do {
+ pmap_kremove(p, PAGE_SIZE);
+ p += PAGE_SIZE;
+ } while (p < (vaddr_t)&endboot);
+}
+
+void
+cpu_configure(void)
+{
+ (void)splhigh();
+
+ softintr_init();
+ (void)config_rootfound("mainbus", NULL);
+
+ unmap_startup();
+
+ cold = 0;
+ spl0();
+}
+
+void
+diskconf(void)
+{
+ size_t len;
+ char *p;
+ dev_t tmpdev;
+ extern uint8_t *bootmac;
+
+ if (*boot_file != '\0')
+ printf("bootfile: %s\n", boot_file);
+
+#if RAMDISK_HOOKS
+ bootdv = parsedisk("rd", 2, 0, &tmpdev);
+#endif /* RAMDISK_HOOKS */
+ if (bootdv == NULL) {
+
+ // boot_file is of the format <device>:/bsd we want the device part
+ if ((p = strchr(boot_file, ':')) != NULL)
+ len = p - boot_file;
+ else
+ len = strlen(boot_file);
+ bootdv = parsedisk(boot_file, len, 0, &tmpdev);
+ }
+
+#if defined(NFSCLIENT)
+ if (bootmac) {
+ struct ifnet *ifp;
+
+ TAILQ_FOREACH(ifp, &ifnet, if_list) {
+ if (ifp->if_type == IFT_ETHER &&
+ memcmp(bootmac, ((struct arpcom *)ifp)->ac_enaddr,
+ ETHER_ADDR_LEN) == 0)
+ break;
+ }
+ if (ifp)
+ bootdv = parsedisk(ifp->if_xname, strlen(ifp->if_xname),
+ 0, &tmpdev);
+ }
+#endif
+
+ if (bootdv != NULL)
+ printf("boot device: %s\n", bootdv->dv_xname);
+ else
+ printf("boot device: lookup %s failed \n", boot_file);
+
+ setroot(bootdv, 0, RB_USERREQ);
+ dumpconf();
+
+#ifdef HIBERNATE
+ hibernate_resume();
+#endif /* HIBERNATE */
+}
+
+void
+device_register(struct device *dev, void *aux)
+{
+}
+
+struct nam2blk nam2blk[] = {
+ { "wd", 0 },
+ { "sd", 4 },
+ { "cd", 6 },
+ { "vnd", 14 },
+ { "rd", 8 },
+ { NULL, -1 }
+};
--- /dev/null
+/*
+ * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+/*-
+ * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+
+/*
+ * Common function for DMA map creation. May be called by bus-specific
+ * DMA map creation functions.
+ */
+int
+_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
+ bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
+{
+ struct machine_bus_dmamap *map;
+ void *mapstore;
+ size_t mapsize;
+
+ /*
+ * Allocate and initialize the DMA map. The end of the map
+ * is a variable-sized array of segments, so we allocate enough
+ * room for them in one shot.
+ *
+ * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
+ * of ALLOCNOW notifies others that we've reserved these resources,
+ * and they are not to be freed.
+ *
+ * The bus_dmamap_t includes one bus_dma_segment_t, hence
+ * the (nsegments - 1).
+ */
+ mapsize = sizeof(struct machine_bus_dmamap) +
+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
+ if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
+ (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
+ return (ENOMEM);
+
+ map = (struct machine_bus_dmamap *)mapstore;
+ map->_dm_size = size;
+ map->_dm_segcnt = nsegments;
+ map->_dm_maxsegsz = maxsegsz;
+ map->_dm_boundary = boundary;
+ map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
+
+ *dmamp = map;
+ return (0);
+}
+
+/*
+ * Common function for DMA map destruction. May be called by bus-specific
+ * DMA map destruction functions.
+ */
+void
+_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ size_t mapsize;
+
+ mapsize = sizeof(struct machine_bus_dmamap) +
+ (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
+ free(map, M_DEVBUF, mapsize);
+}
+
+/*
+ * Common function for loading a DMA map with a linear buffer. May
+ * be called by bus-specific DMA map load functions.
+ */
+int
+_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
+ struct proc *p, int flags)
+{
+ paddr_t lastaddr;
+ int seg, error;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+
+ if (buflen > map->_dm_size)
+ return (EINVAL);
+
+ seg = 0;
+ error = (*t->_dmamap_load_buffer)(t, map, buf, buflen, p, flags,
+ &lastaddr, &seg, 1);
+ if (error == 0) {
+ map->dm_nsegs = seg + 1;
+ map->dm_mapsize = buflen;
+ }
+
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags)
+{
+ paddr_t lastaddr;
+ int seg, error, first;
+ struct mbuf *m;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+
+#ifdef DIAGNOSTIC
+ if ((m0->m_flags & M_PKTHDR) == 0)
+ panic("_dmamap_load_mbuf: no packet header");
+#endif
+
+ if (m0->m_pkthdr.len > map->_dm_size)
+ return (EINVAL);
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ error = (*t->_dmamap_load_buffer)(t, map, m->m_data, m->m_len,
+ NULL, flags, &lastaddr, &seg, first);
+ first = 0;
+ }
+ if (error == 0) {
+ map->dm_nsegs = seg + 1;
+ map->dm_mapsize = m0->m_pkthdr.len;
+ }
+
+ return (error);
+}
+
+/*
+ * Like _dmamap_load(), but for uios.
+ */
+int
+_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
+{
+ paddr_t lastaddr;
+ int seg, i, error, first;
+ bus_size_t minlen, resid;
+ struct proc *p = NULL;
+ struct iovec *iov;
+ void *addr;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ p = uio->uio_procp;
+#ifdef DIAGNOSTIC
+ if (p == NULL)
+ panic("_dmamap_load_uio: USERSPACE but no proc");
+#endif
+ }
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ addr = (void *)iov[i].iov_base;
+
+ error = (*t->_dmamap_load_buffer)(t, map, addr, minlen,
+ p, flags, &lastaddr, &seg, first);
+ first = 0;
+
+ resid -= minlen;
+ }
+ if (error == 0) {
+ map->dm_nsegs = seg + 1;
+ map->dm_mapsize = uio->uio_resid;
+ }
+
+ return (error);
+}
+
+/*
+ * Like _dmamap_load(), but for raw memory allocated with
+ * bus_dmamem_alloc().
+ */
+int
+_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
+ int nsegs, bus_size_t size, int flags)
+{
+ bus_addr_t paddr, baddr, bmask, lastaddr = 0;
+ bus_size_t plen, sgsize, mapsize;
+ vaddr_t vaddr;
+ int first = 1;
+ int i, seg = 0;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ if (nsegs > map->_dm_segcnt || size > map->_dm_size)
+ return (EINVAL);
+
+ mapsize = size;
+ bmask = ~(map->_dm_boundary - 1);
+
+ for (i = 0; i < nsegs && size > 0; i++) {
+ paddr = segs[i].ds_addr;
+ vaddr = segs[i]._ds_vaddr;
+ plen = MIN(segs[i].ds_len, size);
+
+ while (plen > 0) {
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)paddr & PGOFSET);
+ if (plen < sgsize)
+ sgsize = plen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary > 0) {
+ baddr = (paddr + map->_dm_boundary) & bmask;
+ if (sgsize > (baddr - paddr))
+ sgsize = (baddr - paddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ if (first) {
+ map->dm_segs[seg].ds_addr = paddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg]._ds_paddr = paddr;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
+ first = 0;
+ } else {
+ if (paddr == lastaddr &&
+ (map->dm_segs[seg].ds_len + sgsize) <=
+ map->_dm_maxsegsz &&
+ (map->_dm_boundary == 0 ||
+ (map->dm_segs[seg].ds_addr & bmask) ==
+ (paddr & bmask)))
+ map->dm_segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= map->_dm_segcnt)
+ return (EINVAL);
+ map->dm_segs[seg].ds_addr = paddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg]._ds_paddr = paddr;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
+ }
+ }
+
+ paddr += sgsize;
+ vaddr += sgsize;
+ plen -= sgsize;
+ size -= sgsize;
+
+ lastaddr = paddr;
+ }
+ }
+
+ map->dm_mapsize = mapsize;
+ map->dm_nsegs = seg + 1;
+ return (0);
+}
+
+/*
+ * Common function for unloading a DMA map. May be called by
+ * bus-specific DMA map unload functions.
+ */
+void
+_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ /*
+ * No resources to free; just mark the mappings as
+ * invalid.
+ */
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+}
+
+static void
+_dmamap_sync_segment(vaddr_t va, vsize_t len, int ops)
+{
+ switch (ops) {
+ case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
+ case BUS_DMASYNC_PREREAD:
+ cpu_dcache_wbinv_range(va, len);
+ break;
+
+ case BUS_DMASYNC_PREWRITE:
+ cpu_dcache_wb_range(va, len);
+ break;
+
+ /*
+ * Cortex CPUs can do speculative loads so we need to clean the cache
+ * after a DMA read to deal with any speculatively loaded cache lines.
+ * Since these can't be dirty, we can just invalidate them and don't
+ * have to worry about having to write back their contents.
+ */
+ case BUS_DMASYNC_POSTREAD:
+ case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
+ membar_sync();
+ cpu_dcache_inv_range(va, len);
+ break;
+ }
+}
+
+/*
+ * Common function for DMA map synchronization. May be called
+ * by bus-specific DMA map synchronization functions.
+ */
+void
+_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
+ bus_size_t size, int op)
+{
+ int nsegs;
+ int curseg;
+
+ /*
+ * If our tag tells us that the device we are doing DMA
+ * with is coherent, make sure the write buffer is synced
+ * and return.
+ */
+ if (t->_flags & BUS_DMA_COHERENT) {
+ membar_sync();
+ return;
+ }
+
+ nsegs = map->dm_nsegs;
+ curseg = 0;
+
+ while (size && nsegs) {
+ vaddr_t vaddr;
+ bus_size_t ssize;
+
+ ssize = map->dm_segs[curseg].ds_len;
+ vaddr = map->dm_segs[curseg]._ds_vaddr;
+
+ if (addr != 0) {
+ if (addr >= ssize) {
+ addr -= ssize;
+ ssize = 0;
+ } else {
+ vaddr += addr;
+ ssize -= addr;
+ addr = 0;
+ }
+ }
+ if (ssize > size)
+ ssize = size;
+
+ if (ssize != 0) {
+ _dmamap_sync_segment(vaddr, ssize, op);
+ size -= ssize;
+ }
+ curseg++;
+ nsegs--;
+ }
+
+ if (size != 0) {
+ panic("_dmamap_sync: ran off map!");
+ }
+}
+
+/*
+ * Common function for DMA-safe memory allocation. May be called
+ * by bus-specific DMA memory allocation functions.
+ */
+int
+_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
+ bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
+ int flags)
+{
+ return _dmamem_alloc_range(t, size, alignment, boundary,
+ segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1);
+}
+
+/*
+ * Common function for freeing DMA-safe memory. May be called by
+ * bus-specific DMA memory free functions.
+ */
+void
+_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
+{
+ vm_page_t m;
+ bus_addr_t addr;
+ struct pglist mlist;
+ int curseg;
+
+ /*
+ * Build a list of pages to free back to the VM system.
+ */
+ TAILQ_INIT(&mlist);
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += PAGE_SIZE) {
+ m = PHYS_TO_VM_PAGE(addr);
+ TAILQ_INSERT_TAIL(&mlist, m, pageq);
+ }
+ }
+
+ uvm_pglistfree(&mlist);
+}
+
+/*
+ * Common function for mapping DMA-safe memory. May be called by
+ * bus-specific DMA memory map functions.
+ */
+int
+_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
+ caddr_t *kvap, int flags)
+{
+ vaddr_t va, sva;
+ size_t ssize;
+ bus_addr_t addr;
+ int curseg, pmap_flags, cache;
+ const struct kmem_dyn_mode *kd;
+
+ size = round_page(size);
+ kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
+ va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
+ if (va == 0)
+ return (ENOMEM);
+
+ *kvap = (caddr_t)va;
+
+ sva = va;
+ ssize = size;
+ pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
+ cache = PMAP_CACHE_WB;
+ if (((t->_flags & BUS_DMA_COHERENT) == 0 &&
+ (flags & BUS_DMA_COHERENT)) || (flags & BUS_DMA_NOCACHE))
+ cache = PMAP_CACHE_CI;
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ segs[curseg]._ds_vaddr = va;
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += NBPG, va += NBPG, size -= NBPG) {
+ if (size == 0)
+ panic("_dmamem_map: size botch");
+ pmap_kenter_cache(va, addr,
+ PROT_READ | PROT_WRITE | pmap_flags,
+ cache);
+ }
+ pmap_update(pmap_kernel());
+ }
+
+ return (0);
+}
+
+/*
+ * Common function for unmapping DMA-safe memory. May be called by
+ * bus-specific DMA memory unmapping functions.
+ */
+void
+_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
+{
+ km_free(kva, round_page(size), &kv_any, &kp_none);
+}
+
+/*
+ * Common function for mmap(2)'ing DMA-safe memory. May be called by
+ * bus-specific DMA mmap(2)'ing functions.
+ */
+paddr_t
+_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
+ int prot, int flags)
+{
+ int i;
+ paddr_t pa;
+
+ for (i = 0; i < nsegs; i++) {
+#ifdef DIAGNOSTIC
+ if (off & PGOFSET)
+ panic("_dmamem_mmap: offset unaligned");
+ if (segs[i].ds_addr & PGOFSET)
+ panic("_dmamem_mmap: segment unaligned");
+ if (segs[i].ds_len & PGOFSET)
+ panic("_dmamem_mmap: segment size not multiple"
+ " of page size");
+#endif
+ if (off >= segs[i].ds_len) {
+ off -= segs[i].ds_len;
+ continue;
+ }
+
+ (void)pmap_extract (pmap_kernel(), segs[i].ds_addr, &pa);
+ return pa + off;
+ }
+
+ /* Page not found. */
+ return (-1);
+}
+
+/**********************************************************************
+ * DMA utility functions
+ **********************************************************************/
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrance, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+int
+_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
+ int *segp, int first)
+{
+ bus_size_t sgsize;
+ bus_addr_t lastaddr, baddr, bmask;
+ paddr_t curaddr;
+ vaddr_t vaddr = (vaddr_t)buf;
+ int seg;
+ pmap_t pmap;
+
+ if (p != NULL)
+ pmap = p->p_vmspace->vm_map.pmap;
+ else
+ pmap = pmap_kernel();
+
+ lastaddr = *lastaddrp;
+ bmask = ~(map->_dm_boundary - 1);
+ if (t->_dma_mask != 0)
+ bmask &= t->_dma_mask;
+
+ for (seg = *segp; buflen > 0; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (pmap_extract(pmap, vaddr, &curaddr) == FALSE)
+ panic("_dmapmap_load_buffer: pmap_extract(%p, %lx) failed!",
+ pmap, vaddr);
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = NBPG - ((u_long)vaddr & PGOFSET);
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary > 0) {
+ baddr = ((bus_addr_t)curaddr + map->_dm_boundary) &
+ bmask;
+ if (sgsize > (baddr - (bus_addr_t)curaddr))
+ sgsize = (baddr - (bus_addr_t)curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ if (first) {
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg]._ds_paddr = curaddr;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
+ first = 0;
+ } else {
+ if ((bus_addr_t)curaddr == lastaddr &&
+ (map->dm_segs[seg].ds_len + sgsize) <=
+ map->_dm_maxsegsz &&
+ (map->_dm_boundary == 0 ||
+ (map->dm_segs[seg].ds_addr & bmask) ==
+ ((bus_addr_t)curaddr & bmask)))
+ map->dm_segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= map->_dm_segcnt)
+ break;
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg]._ds_paddr = curaddr;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
+ }
+ }
+
+ lastaddr = (bus_addr_t)curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0)
+ return (EFBIG); /* XXX better return value here? */
+
+ return (0);
+}
+
+/*
+ * Allocate physical memory from the given physical address range.
+ * Called by DMA-safe memory allocation methods.
+ */
+int
+_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
+ bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
+ int flags, paddr_t low, paddr_t high)
+{
+ paddr_t curaddr, lastaddr;
+ vm_page_t m;
+ struct pglist mlist;
+ int curseg, error, plaflag;
+
+ /* Always round the size. */
+ size = round_page(size);
+
+ /*
+ * Allocate pages from the VM system.
+ */
+ plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
+ if (flags & BUS_DMA_ZERO)
+ plaflag |= UVM_PLA_ZERO;
+
+ TAILQ_INIT(&mlist);
+ error = uvm_pglistalloc(size, low, high, alignment, boundary,
+ &mlist, nsegs, plaflag);
+ if (error)
+ return (error);
+
+ /*
+ * Compute the location, size, and number of segments actually
+ * returned by the VM code.
+ */
+ m = TAILQ_FIRST(&mlist);
+ curseg = 0;
+ lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
+ segs[curseg].ds_len = PAGE_SIZE;
+ m = TAILQ_NEXT(m, pageq);
+
+ for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
+ curaddr = VM_PAGE_TO_PHYS(m);
+#ifdef DIAGNOSTIC
+ if (curaddr < low || curaddr >= high) {
+ printf("vm_page_alloc_memory returned non-sensical"
+ " address 0x%lx\n", curaddr);
+ panic("_dmamem_alloc_range");
+ }
+#endif
+ if (curaddr == (lastaddr + PAGE_SIZE))
+ segs[curseg].ds_len += PAGE_SIZE;
+ else {
+ curseg++;
+ segs[curseg].ds_addr = curaddr;
+ segs[curseg].ds_len = PAGE_SIZE;
+ }
+ lastaddr = curaddr;
+ }
+
+ *rsegs = curseg + 1;
+
+ return (0);
+}
--- /dev/null
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Simple generic bus access primitives.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+#include <uvm/uvm_extern.h>
+
+bus_space_t riscv64_bs_tag = {
+ .bus_base = 0ULL, // XXX
+ .bus_private = NULL,
+ ._space_read_1 = generic_space_read_1,
+ ._space_write_1 = generic_space_write_1,
+ ._space_read_2 = generic_space_read_2,
+ ._space_write_2 = generic_space_write_2,
+ ._space_read_4 = generic_space_read_4,
+ ._space_write_4 = generic_space_write_4,
+ ._space_read_8 = generic_space_read_8,
+ ._space_write_8 = generic_space_write_8,
+ ._space_read_raw_2 = generic_space_read_raw_2,
+ ._space_write_raw_2 = generic_space_write_raw_2,
+ ._space_read_raw_4 = generic_space_read_raw_4,
+ ._space_write_raw_4 = generic_space_write_raw_4,
+ ._space_read_raw_8 = generic_space_read_raw_8,
+ ._space_write_raw_8 = generic_space_write_raw_8,
+ ._space_map = generic_space_map,
+ ._space_unmap = generic_space_unmap,
+ ._space_subregion = generic_space_region,
+ ._space_vaddr = generic_space_vaddr,
+ ._space_mmap = generic_space_mmap
+};
+bus_space_t *fdt_cons_bs_tag = &riscv64_bs_tag;
+
+uint8_t
+generic_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
+{
+ return *(volatile uint8_t *)(h + o);
+}
+
+uint16_t
+generic_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
+{
+ return *(volatile uint16_t *)(h + o);
+}
+
+uint32_t
+generic_space_read_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
+{
+ return *(volatile uint32_t *)(h + o);
+}
+
+uint64_t
+generic_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
+{
+ return *(volatile uint64_t *)(h + o);
+}
+
+void
+generic_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
+ uint8_t v)
+{
+ *(volatile uint8_t *)(h + o) = v;
+}
+
+void
+generic_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
+ uint16_t v)
+{
+ *(volatile uint16_t *)(h + o) = v;
+}
+
+void
+generic_space_write_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
+ uint32_t v)
+{
+ *(volatile uint32_t *)(h + o) = v;
+}
+
+void
+generic_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
+ uint64_t v)
+{
+ *(volatile uint64_t *)(h + o) = v;
+}
+
+void
+generic_space_read_raw_2(bus_space_tag_t t, bus_space_handle_t h, bus_addr_t o,
+ uint8_t *buf, bus_size_t len)
+{
+ volatile uint16_t *addr = (volatile uint16_t *)(h + o);
+ len >>= 1;
+ while (len-- != 0) {
+ *(uint16_t *)buf = *addr;
+ buf += 2;
+ }
+}
+
+void
+generic_space_write_raw_2(bus_space_tag_t t, bus_space_handle_t h, bus_addr_t o,
+ const uint8_t *buf, bus_size_t len)
+{
+ volatile uint16_t *addr = (volatile uint16_t *)(h + o);
+ len >>= 1;
+ while (len-- != 0) {
+ *addr = *(uint16_t *)buf;
+ buf += 2;
+ }
+}
+
+void
+generic_space_read_raw_4(bus_space_tag_t t, bus_space_handle_t h, bus_addr_t o,
+ uint8_t *buf, bus_size_t len)
+{
+ volatile uint32_t *addr = (volatile uint32_t *)(h + o);
+ len >>= 2;
+ while (len-- != 0) {
+ *(uint32_t *)buf = *addr;
+ buf += 4;
+ }
+}
+
+void
+generic_space_write_raw_4(bus_space_tag_t t, bus_space_handle_t h, bus_addr_t o,
+ const uint8_t *buf, bus_size_t len)
+{
+ volatile uint32_t *addr = (volatile uint32_t *)(h + o);
+ len >>= 2;
+ while (len-- != 0) {
+ *addr = *(uint32_t *)buf;
+ buf += 4;
+ }
+}
+
+void
+generic_space_read_raw_8(bus_space_tag_t t, bus_space_handle_t h, bus_addr_t o,
+ uint8_t *buf, bus_size_t len)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)(h + o);
+ len >>= 3;
+ while (len-- != 0) {
+ *(uint64_t *)buf = *addr;
+ buf += 8;
+ }
+}
+
+void
+generic_space_write_raw_8(bus_space_tag_t t, bus_space_handle_t h, bus_addr_t o,
+ const uint8_t *buf, bus_size_t len)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)(h + o);
+ len >>= 3;
+ while (len-- != 0) {
+ *addr = *(uint64_t *)buf;
+ buf += 8;
+ }
+}
+
+int
+generic_space_map(bus_space_tag_t t, bus_addr_t offs, bus_size_t size,
+ int flags, bus_space_handle_t *bshp)
+{
+ u_long startpa, endpa, pa;
+ vaddr_t va;
+ int cache = flags & BUS_SPACE_MAP_CACHEABLE ?
+ PMAP_CACHE_WB : PMAP_CACHE_DEV;
+
+ startpa = trunc_page(offs);
+ endpa = round_page(offs + size);
+
+ va = (vaddr_t)km_alloc(endpa - startpa, &kv_any, &kp_none, &kd_nowait);
+ if (! va)
+ return(ENOMEM);
+
+ *bshp = (bus_space_handle_t)(va + (offs - startpa));
+
+ for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
+ pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, cache);
+ }
+ pmap_update(pmap_kernel());
+
+ return(0);
+}
+
+void
+generic_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
+{
+ vaddr_t va, endva;
+
+ va = trunc_page((vaddr_t)bsh);
+ endva = round_page((vaddr_t)bsh + size);
+
+ pmap_kremove(va, endva - va);
+ pmap_update(pmap_kernel());
+ km_free((void *)va, endva - va, &kv_any, &kp_none);
+}
+
+int
+generic_space_region(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp)
+{
+ *nbshp = bsh + offset;
+ return 0;
+}
+
+void *
+generic_space_vaddr(bus_space_tag_t t, bus_space_handle_t h)
+{
+ return (void *)h;
+}
+
+paddr_t
+generic_space_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off,
+ int prot, int flags)
+{
+ return (addr + off);
+}
--- /dev/null
+/*
+ * Copyright (c) 1994, 1995 Charles M. Hannum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles Hannum.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <sys/disklabel.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/vnode.h>
+
+#include <machine/conf.h>
+
+#include "sd.h"
+#include "cd.h"
+#include "uk.h"
+#include "vnd.h"
+#include "rd.h"
+//#include "apm.h"
+
+struct bdevsw bdevsw[] =
+{
+ /* XXX no riscv machine will have this but we are using arm64 as a template */
+ bdev_notdef(), /* */
+ bdev_swap_init(1,sw), /* 1: swap pseudo-device */
+ bdev_notdef(), /* 2: was floppy diskette */
+ bdev_notdef(), /* 3 */
+ bdev_disk_init(NSD,sd), /* 4: SCSI disk */
+ bdev_notdef(), /* 5: was: SCSI tape */
+ bdev_disk_init(NCD,cd), /* 6: SCSI CD-ROM */
+ bdev_notdef(), /* 7 */
+ bdev_disk_init(NRD,rd), /* 8: ram disk driver */
+ bdev_notdef(), /* 9 */
+ bdev_notdef(), /* 10 */
+ bdev_notdef(), /* 11 */
+ bdev_notdef(), /* 12 */
+ bdev_notdef(), /* 13 */
+ bdev_disk_init(NVND,vnd), /* 14: vnode disk driver */
+ bdev_notdef(), /* 15: was: Sony CD-ROM */
+ bdev_notdef(), /* 16: was: concatenated disk driver */
+ bdev_notdef(), /* 17 */
+ bdev_notdef(), /* 18 */
+};
+int nblkdev = nitems(bdevsw);
+
+/* open, close, read, write, ioctl, tty, mmap */
+#define cdev_pc_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), dev_init(c,n,read), \
+ dev_init(c,n,write), dev_init(c,n,ioctl), dev_init(c,n,stop), \
+ dev_init(c,n,tty), ttselect, dev_init(c,n,mmap), D_TTY }
+
+/* open, close, read, ioctl */
+#define cdev_joy_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), dev_init(c,n,read), \
+ (dev_type_write((*))) enodev, dev_init(c,n,ioctl), \
+ (dev_type_stop((*))) enodev, 0, seltrue, \
+ (dev_type_mmap((*))) enodev }
+
+/* open, close, ioctl, select -- XXX should be a generic device */
+#define cdev_ocis_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), (dev_type_read((*))) enodev, \
+ (dev_type_write((*))) enodev, dev_init(c,n,ioctl), \
+ (dev_type_stop((*))) enodev, 0, dev_init(c,n,poll), \
+ (dev_type_mmap((*))) enodev, 0 }
+
+/* open, close, read */
+#define cdev_nvram_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), dev_init(c,n,read), \
+ (dev_type_write((*))) enodev, (dev_type_ioctl((*))) enodev, \
+ (dev_type_stop((*))) enodev, 0, seltrue, \
+ (dev_type_mmap((*))) enodev, 0 }
+
+
+#define mmread mmrw
+#define mmwrite mmrw
+cdev_decl(mm);
+#include "bio.h"
+#include "pty.h"
+#include "com.h"
+cdev_decl(com);
+#include "lpt.h"
+cdev_decl(lpt);
+#include "ch.h"
+#include "bpfilter.h"
+cdev_decl(spkr);
+#include "tun.h"
+#include "audio.h"
+#include "video.h"
+#include "midi.h"
+//#include "bktr.h"
+#include "ksyms.h"
+//#include "usb.h"
+//#include "uhid.h"
+//#include "fido.h"
+//#include "ugen.h"
+//#include "ulpt.h"
+//#include "ucom.h"
+#include "radio.h"
+//#include "drm.h"
+cdev_decl(drm);
+
+//#include "wsdisplay.h"
+//#include "wskbd.h"
+//#include "wsmouse.h"
+//#include "wsmux.h"
+
+#ifdef USER_PCICONF
+#include "pci.h"
+cdev_decl(pci);
+#endif
+
+//#include "dt.h"
+#include "pf.h"
+#include "hotplug.h"
+#include "vscsi.h"
+#include "pppx.h"
+#include "fuse.h"
+//#include "openprom.h"
+#include "gpio.h"
+#include "ipmi.h"
+#include "switch.h"
+
+struct cdevsw cdevsw[] =
+{
+ cdev_cn_init(1,cn), /* 0: virtual console */
+ cdev_ctty_init(1,ctty), /* 1: controlling terminal */
+ cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
+ cdev_notdef(), /* 3: */
+ cdev_notdef(), /* 4 was /dev/drum */
+ cdev_tty_init(NPTY,pts), /* 5: pseudo-tty slave */
+ cdev_ptc_init(NPTY,ptc), /* 6: pseudo-tty master */
+ cdev_log_init(1,log), /* 7: /dev/klog */
+ cdev_tty_init(NCOM,com), /* 8: serial port */
+ cdev_notdef(), /* 9: was floppy disk */
+ cdev_notdef(), /* 10 */
+ cdev_notdef(), /* 11: Sony CD-ROM */
+ cdev_notdef(), /* 12: frame buffers, etc. \
+ cdev_wsdisplay_init(NWSDISPLAY, \
+ wsdisplay), */
+ cdev_disk_init(NSD,sd), /* 13: SCSI disk */
+ cdev_notdef(), /* 14: was: SCSI tape */
+ cdev_disk_init(NCD,cd), /* 15: SCSI CD-ROM */
+ cdev_lpt_init(NLPT,lpt), /* 16: parallel printer */
+ cdev_ch_init(NCH,ch), /* 17: SCSI autochanger */
+ cdev_notdef(), /* 18: was: concatenated disk driver */
+ cdev_notdef(), /* 19 */
+ cdev_uk_init(NUK,uk), /* 20: unknown SCSI */
+ cdev_notdef(), /* 21 */
+ cdev_fd_init(1,filedesc), /* 22: file descriptor pseudo-device */
+ cdev_bpf_init(NBPFILTER,bpf), /* 23: Berkeley packet filter */
+ cdev_notdef(), /* 24 */
+ cdev_notdef(), /* 25 */
+ cdev_notdef(), /* 26 */
+ cdev_notdef(), /* 27 */
+ cdev_notdef(), /* 28 was LKM */
+ cdev_notdef(), /* 29 */
+ cdev_notdef(), /* 30: dynamic tracer \
+ cdev_dt_init(NDT,dt), */
+ cdev_notdef(), /* 31 */
+ cdev_notdef(), /* 32 */
+ cdev_notdef(), /* 33 */
+ cdev_notdef(), /* 34 */
+ cdev_notdef(), /* 35: Microsoft mouse */
+ cdev_notdef(), /* 36: Logitech mouse */
+ cdev_notdef(), /* 37: Extended PS/2 mouse */
+ cdev_notdef(), /* 38: was: Cyclom serial port */
+ cdev_notdef(), /* 39: Mitsumi CD-ROM */
+ cdev_tun_init(NTUN,tun), /* 40: network tunnel */
+ cdev_disk_init(NVND,vnd), /* 41: vnode disk driver */
+ cdev_audio_init(NAUDIO,audio), /* 42: generic audio I/O */
+ cdev_notdef(), /* 43 */
+ cdev_video_init(NVIDEO,video), /* 44: generic video I/O */
+ cdev_random_init(1,random), /* 45: random data source */
+ cdev_notdef(), /* 46 */
+ cdev_disk_init(NRD,rd), /* 47: ram disk driver */
+ cdev_notdef(), /* 48 */
+ cdev_notdef(), /* 49: Bt848 video capture device \
+ cdev_bktr_init(NBKTR,bktr), */
+ cdev_ksyms_init(NKSYMS,ksyms), /* 50: Kernel symbols device */
+ cdev_notdef(), /* 51 */
+ cdev_midi_init(NMIDI,midi), /* 52: MIDI I/O */
+ cdev_notdef(), /* 53 was: sequencer I/O */
+ cdev_notdef(), /* 54 was: RAIDframe disk driver */
+ cdev_notdef(), /* 55: */
+ /* The following slots are reserved for isdn4bsd. */
+ cdev_notdef(), /* 56: i4b main device */
+ cdev_notdef(), /* 57: i4b control device */
+ cdev_notdef(), /* 58: i4b raw b-channel access */
+ cdev_notdef(), /* 59: i4b trace device */
+ cdev_notdef(), /* 60: i4b phone device */
+ /* End of reserved slots for isdn4bsd. */
+ cdev_notdef(), /* 61: USB controller \
+ cdev_usb_init(NUSB,usb), */
+ cdev_notdef(), /* 62: USB generic HID \
+ cdev_usbdev_init(NUHID,uhid), */
+ cdev_notdef(), /* 63: USB generic driver \
+ cdev_usbdev_init(NUGEN,ugen), */
+ cdev_notdef(), /* 64: USB printers \
+ cdev_ulpt_init(NULPT,ulpt), */
+ cdev_notdef(), /* 65: urio */
+ cdev_notdef(), /* 66: USB tty \
+ cdev_tty_init(NUCOM,ucom), */
+ cdev_notdef(), /* 67: keyboards \
+ cdev_mouse_init(NWSKBD, wskbd), */
+ cdev_notdef(), /* 68: mice \
+ cdev_mouse_init(NWSMOUSE, \
+ wsmouse), */
+ cdev_notdef(), /* 69: ws multiplexor \
+ cdev_mouse_init(NWSMUX, wsmux), */
+ cdev_notdef(), /* 70: /dev/openprom \
+ cdev_openprom_init(NOPENPROM,openprom), */
+ cdev_notdef(), /* 71: was: Cyclades-Z serial port */
+#ifdef USER_PCICONF
+ cdev_pci_init(NPCI,pci), /* 72: PCI user */
+#else
+ cdev_notdef(),
+#endif
+ cdev_pf_init(NPF,pf), /* 73: packet filter */
+ cdev_notdef(), /* 74: ALTQ (deprecated) */
+ cdev_notdef(),
+ cdev_radio_init(NRADIO, radio), /* 76: generic radio I/O */
+ cdev_notdef(), /* 77: was USB scanners */
+ cdev_notdef(), /* 78: was: system call tracing */
+ cdev_bio_init(NBIO,bio), /* 79: ioctl tunnel */
+ cdev_notdef(), /* 80 */
+ cdev_ptm_init(NPTY,ptm), /* 81: pseudo-tty ptm device */
+ cdev_hotplug_init(NHOTPLUG,hotplug), /* 82: devices hot plugging */
+ cdev_notdef(), /* 83: apm \
+ cdev_acpiapm_init(NAPM,acpiapm), */
+ cdev_notdef(), /* 84 */
+ cdev_notdef(), /* 85 */
+ cdev_notdef(), /* 86 */
+ cdev_notdef(), /* 87: drm \
+ cdev_drm_init(NDRM,drm), */
+ cdev_notdef(), /* 88: GPIO interface \
+ cdev_gpio_init(NGPIO,gpio), */
+ cdev_vscsi_init(NVSCSI,vscsi), /* 89: vscsi */
+ cdev_disk_init(1,diskmap), /* 90: disk mapper */
+ cdev_pppx_init(NPPPX,pppx), /* 91: pppx */
+ cdev_fuse_init(NFUSE,fuse), /* 92: fuse */
+ cdev_tun_init(NTUN,tap), /* 93: Ethernet network tunnel */
+ cdev_notdef(), /* 94 */
+ cdev_notdef(), /* 95 */
+ cdev_ipmi_init(NIPMI,ipmi), /* 96: ipmi */
+ cdev_switch_init(NSWITCH,switch), /* 97: switch(4) control interface */
+ cdev_notdef(), /* 98: FIDO/U2F security key \
+ cdev_fido_init(NFIDO,fido), */
+ cdev_notdef(), /* 99: PPP Access Concentrator \
+ cdev_pppx_init(NPPPX,pppac), */
+};
+int nchrdev = nitems(cdevsw);
+
+/*
+ * Swapdev is a fake device implemented
+ * in sw.c used only internally to get to swstrategy.
+ * It cannot be provided to the users, because the
+ * swstrategy routine munches the b_dev and b_blkno entries
+ * before calling the appropriate driver. This would horribly
+ * confuse, e.g. the hashing routines. Instead, /dev/drum is
+ * provided as a character (raw) device.
+ */
+dev_t swapdev = makedev(BMAJ_SW, 0);
+
+/*
+ * Returns true if dev is /dev/mem or /dev/kmem.
+ */
+int
+iskmemdev(dev_t dev)
+{
+
+ return (major(dev) == CMAJ_MM && (minor(dev) < 2 || minor(dev) == 14));
+}
+
+/*
+ * Returns true if dev is /dev/zero.
+ */
+int
+iszerodev(dev_t dev)
+{
+
+ return (major(dev) == CMAJ_MM && minor(dev) == 12);
+}
+
+dev_t
+getnulldev(void)
+{
+ return makedev(CMAJ_MM, 2);
+}
+
+int chrtoblktbl[] = {
+ /*VCHR*/ /*VBLK*/
+ /* 0 */ NODEV,
+ /* 1 */ NODEV,
+ /* 2 */ NODEV,
+ /* 3 */ NODEV,
+ /* 4 */ NODEV,
+ /* 5 */ NODEV,
+ /* 6 */ NODEV,
+ /* 7 */ NODEV,
+ /* 8 */ NODEV,
+ /* 9 */ NODEV, /* was fd */
+ /* 10 */ NODEV,
+ /* 11 */ NODEV,
+ /* 12 */ NODEV,
+ /* 13 */ 4, /* sd */
+ /* 14 */ NODEV,
+ /* 15 */ 6, /* cd */
+ /* 16 */ NODEV,
+ /* 17 */ NODEV,
+ /* 18 */ NODEV,
+ /* 19 */ NODEV,
+ /* 20 */ NODEV,
+ /* 21 */ NODEV,
+ /* 22 */ NODEV,
+ /* 23 */ NODEV,
+ /* 24 */ NODEV,
+ /* 25 */ NODEV,
+ /* 26 */ NODEV,
+ /* 27 */ NODEV,
+ /* 28 */ NODEV,
+ /* 29 */ NODEV,
+ /* 30 */ NODEV,
+ /* 31 */ NODEV,
+ /* 32 */ NODEV,
+ /* 33 */ NODEV,
+ /* 34 */ NODEV,
+ /* 35 */ NODEV,
+ /* 36 */ NODEV,
+ /* 37 */ NODEV,
+ /* 38 */ NODEV,
+ /* 39 */ NODEV,
+ /* 40 */ NODEV,
+ /* 41 */ 14, /* vnd */
+ /* 42 */ NODEV,
+ /* 43 */ NODEV,
+ /* 44 */ NODEV,
+ /* 45 */ NODEV,
+ /* 46 */ NODEV,
+ /* 47 */ 8, /* rd */
+};
+
+int nchrtoblktbl = nitems(chrtoblktbl);
+
+/*
+ * In order to map BSD bdev numbers of disks to their BIOS equivalents
+ * we use several heuristics, one being using checksums of the first
+ * few blocks of a disk to get a signature we can match with /boot's
+ * computed signatures. To know where from to read, we must provide a
+ * disk driver name -> bdev major number table, which follows.
+ * Note: floppies are not included as those are differentiated by the BIOS.
+ */
+int findblkmajor(struct device *dv);
+dev_t dev_rawpart(struct device *); /* XXX */
+
+dev_t
+dev_rawpart(struct device *dv)
+{
+ int majdev;
+
+ majdev = findblkmajor(dv);
+
+ switch (majdev) {
+ /* add here any device you want to be checksummed on boot */
+ case BMAJ_WD:
+ case BMAJ_SD:
+ return (MAKEDISKDEV(majdev, dv->dv_unit, RAW_PART));
+ break;
+ default:
+ ;
+ }
+
+ return (NODEV);
+}
--- /dev/null
+/*
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
+ * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "assym.h"
+#include <sys/syscall.h>
+#include <machine/asm.h>
+#include <machine/vmparam.h>
+#include <machine/riscvreg.h>
+#include <sys/errno.h>
+
+
+ .text
+ .align 4
+
+/*
+ * a0 = user space address
+ * a1 = kernel space address
+ * a2 = length
+ *
+ * Copies bytes from user space to kernel space
+ */
+ENTRY(copyin)
+ RETGUARD_SETUP(copyio, a6)
+ beqz a2, .Lcopyiodone /* If len = 0, skip loop. */
+ /* Check whether source+len overflows. */
+ add a3, a0, a2
+ bltu a3, a0, .Lcopyiofault_nopcb
+ /* Check that source+len is in userspace. */
+ li a4, VM_MAXUSER_ADDRESS
+ bgt a3, a4, .Lcopyiofault_nopcb
+
+ la a3, .Lcopyiofault_user
+ SWAP_FAULT_HANDLER(a3, a4, a5)
+ ENTER_USER_ACCESS(a4)
+
+// XXX optimize?
+.Lcopyio:
+1: lb a4, 0(a0)
+ addi a0, a0, 1
+ sb a4, 0(a1)
+ addi a1, a1, 1
+ addi a2, a2, -1
+ bnez a2, 1b
+
+ EXIT_USER_ACCESS(a4)
+ SET_FAULT_HANDLER(a3, a4)
+.Lcopyiodone:
+ mv a0, x0
+ RETGUARD_CHECK(copyio, a6)
+ ret
+
+.Lcopyiofault_user:
+ EXIT_USER_ACCESS(a4)
+.Lcopyiofault:
+ SET_FAULT_HANDLER(a3, a4)
+.Lcopyiofault_nopcb:
+ li a0, EFAULT
+ RETGUARD_CHECK(copyio, a6)
+ ret
+END(copyin)
+
+/*
+ * a0 = kernel space address
+ * a1 = user space address
+ * a2 = length
+ *
+ * Copies bytes from kernel space to user space
+ */
+
+ENTRY(copyout)
+ RETGUARD_SETUP(copyio, a6)
+ beqz a2, .Lcopyiodone /* If len = 0, skip loop. */
+ /* Check whether source+len overflows. */
+ add a3, a1, a2
+ bltu a3, a1, .Lcopyiofault_nopcb
+ /* Check that source+len is in userspace. */
+ li a4, VM_MAXUSER_ADDRESS
+ bgt a3, a4, .Lcopyiofault_nopcb
+
+ la a4, .Lcopyiofault_user
+ SWAP_FAULT_HANDLER(a3, a4, a5)
+ ENTER_USER_ACCESS(a4)
+
+ j .Lcopyio
+END(copyout)
+
+/*
+ * a0 = kernel space source address
+ * a1 = kernel space destination address
+ * a2 = length
+ *
+ * Copies bytes from kernel space to kernel space, aborting on page fault
+ */
+
+ENTRY(kcopy)
+ RETGUARD_SETUP(copyio, a6)
+ beqz a2, .Lcopyiodone /* If len = 0, skip loop. */
+
+ la a3, .Lcopyiofault
+ SWAP_FAULT_HANDLER(a3, a4, a5)
+
+ j .Lcopyio
+END(kcopy)
--- /dev/null
+/*-
+ * Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2019 Mitchell Horne
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+#include <machine/riscvreg.h>
+#include <sys/errno.h>
+
+/*
+ * Fault handler for the copy{in,out} functions below.
+ */
+ENTRY(copyio_fault)
+ SET_FAULT_HANDLER(x0, a1) /* Clear the handler */
+ EXIT_USER_ACCESS(a1)
+copyio_fault_nopcb:
+ li a0, EFAULT
+ RETGUARD_CHECK(copyio, t6)
+ ret
+END(copyio_fault)
+
+/*
+ * copycommon - common copy routine
+ *
+ * a0 - Source address
+ * a1 - Destination address
+ * a2 - Size of copy
+ */
+ .macro copycommon
+ la a6, copyio_fault /* Get the handler address */
+ SET_FAULT_HANDLER(a6, a7) /* Set the handler */
+ ENTER_USER_ACCESS(a7)
+
+ li t2, XLEN_BYTES
+ blt a2, t2, 4f /* Byte-copy if len < XLEN_BYTES */
+
+ /*
+ * Compare lower bits of src and dest.
+ * If they are aligned with each other, we can do word copy.
+ */
+ andi t0, a0, (XLEN_BYTES-1) /* Low bits of src */
+ andi t1, a1, (XLEN_BYTES-1) /* Low bits of dest */
+ bne t0, t1, 4f /* Misaligned. Go to byte copy */
+ beqz t0, 2f /* Already word-aligned, skip ahead */
+
+ /* Byte copy until the first word-aligned address */
+1: lb a4, 0(a0) /* Load byte from src */
+ addi a0, a0, 1
+ sb a4, 0(a1) /* Store byte in dest */
+ addi a1, a1, 1
+ addi a2, a2, -1 /* len-- */
+ andi t0, a0, (XLEN_BYTES-1)
+ bnez t0, 1b
+ j 3f
+
+ /* Copy words */
+2: ld a4, 0(a0) /* Load word from src */
+ addi a0, a0, XLEN_BYTES
+ sd a4, 0(a1) /* Store word in dest */
+ addi a1, a1, XLEN_BYTES
+ addi a2, a2, -XLEN_BYTES /* len -= XLEN_BYTES */
+3: bgeu a2, t2, 2b /* Again if len >= XLEN_BYTES */
+
+ /* Check if we're finished */
+ beqz a2, 5f
+
+ /* Copy any remaining bytes */
+4: lb a4, 0(a0) /* Load byte from src */
+ addi a0, a0, 1
+ sb a4, 0(a1) /* Store byte in dest */
+ addi a1, a1, 1
+ addi a2, a2, -1 /* len-- */
+ bnez a2, 4b
+
+5: EXIT_USER_ACCESS(a7)
+ SET_FAULT_HANDLER(x0, a7) /* Clear the handler */
+ .endm
+
+/*
+ * Copies from a kernel to user address
+ *
+ * int copyout(const void *kaddr, void *udaddr, size_t len)
+ */
+ENTRY(copyout)
+ RETGUARD_SETUP(copyio, t6)
+ beqz a2, copyout_end /* If len == 0 then skip loop */
+ add a3, a1, a2
+ li a4, VM_MAXUSER_ADDRESS
+ bgt a3, a4, copyio_fault_nopcb
+
+ copycommon
+
+copyout_end:
+ li a0, 0 /* return 0 */
+ RETGUARD_CHECK(copyio, t6)
+ ret
+END(copyout)
+
+/*
+ * Copies from a user to kernel address
+ *
+ * int copyin(const void *uaddr, void *kaddr, size_t len)
+ */
+ENTRY(copyin)
+ RETGUARD_SETUP(copyio, t6)
+ beqz a2, copyin_end /* If len == 0 then skip loop */
+ add a3, a0, a2
+ li a4, VM_MAXUSER_ADDRESS
+ bgt a3, a4, copyio_fault_nopcb
+
+ copycommon
+
+copyin_end:
+ li a0, 0 /* return 0 */
+ RETGUARD_CHECK(copyio, t6)
+ ret
+END(copyin)
+
+/*
+ * Copies a string from a user to kernel address
+ *
+ * int copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done)
+ */
+ENTRY(copyinstr)
+ RETGUARD_SETUP(copyinstr, t6)
+ mv a5, x0 /* count = 0 */
+ beqz a2, 3f /* If len == 0 then skip loop */
+
+ la a6, copyio_fault /* Get the handler address */
+ SET_FAULT_HANDLER(a6, a7) /* Set the handler */
+ ENTER_USER_ACCESS(a7)
+
+ li a7, VM_MAXUSER_ADDRESS
+1: bgt a0, a7, copyio_fault
+ lb a4, 0(a0) /* Load from uaddr */
+ addi a0, a0, 1
+ sb a4, 0(a1) /* Store in kaddr */
+ addi a1, a1, 1
+ beqz a4, 2f
+ addi a2, a2, -1 /* len-- */
+ addi a5, a5, 1 /* count++ */
+ bnez a2, 1b
+
+2: EXIT_USER_ACCESS(a7)
+ SET_FAULT_HANDLER(x0, a7) /* Clear the handler */
+
+3: beqz a3, 4f /* Check if done != NULL */
+ addi a5, a5, 1 /* count++ */
+ sd a5, 0(a3) /* done = count */
+
+4: mv a0, x0 /* return 0 */
+ beqz a4, 5f
+ li a0, ENAMETOOLONG
+
+5: RETGUARD_CHECK(copyinstr, t6)
+ ret
+END(copyinstr)
--- /dev/null
+/* $OpenBSD: copystr.S,v 1.1 2021/04/23 02:42:17 drahn Exp $ */
+/*
+ * Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
+ * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+#include <machine/param.h>
+#include <machine/vmparam.h>
+#include <machine/riscvreg.h>
+#include <sys/errno.h>
+
+ .text
+ .align 4
+
+/*
+ * a0 - from
+ * a1 - to
+ * a2 - maxlens
+ * a3 - lencopied
+ *
+ * Copy string from a0 to a1
+ */
+ENTRY(copystr)
+ RETGUARD_SETUP(copyiostr, a6)
+ beqz a2, .Lcopyiostrtoolong_early
+ la a4, .Lcopyiostrfault
+ SWAP_FAULT_HANDLER(a4, t0, t1)
+
+ mv a5, x0
+
+1: lb t0, 0(a0)
+ addi a0, a0, 1
+ sb t0, 0(a1)
+ addi a1, a1, 1
+ addi a2, a2, -1
+ addi a5, a5, 1
+ beqz t0, .Lcopyiostrsuccess
+ bnez a2, 1b
+
+.Lcopyiostrtoolong_user:
+ EXIT_USER_ACCESS(t0)
+.Lcopyiostrtoolong:
+ SET_FAULT_HANDLER(a4, t0)
+.Lcopyiostrtoolong_early:
+ li a0, ENAMETOOLONG
+ j .Lcopyiostrcleanup
+
+.Lcopyiostrfault_user:
+ EXIT_USER_ACCESS(t0)
+.Lcopyiostrfault:
+ SET_FAULT_HANDLER(a4, t0)
+ li a0, EFAULT
+ j .Lcopyiostrcleanup
+
+.Lcopyiostrsuccess_user:
+ EXIT_USER_ACCESS(t0)
+.Lcopyiostrsuccess:
+ SET_FAULT_HANDLER(a4, t0)
+ mv a0, x0
+
+.Lcopyiostrcleanup:
+ beqz a3, 2f
+ sd a5, 0(a3)
+2:
+ RETGUARD_CHECK(copyiostr, a6)
+ ret
+
+/*
+ * a0 - user space address
+ * a1 - kernel space address
+ * a2 - maxlens
+ * a3 - lencopied
+ *
+ * Copy string from user space to kernel space
+ */
+ENTRY(copyinstr)
+ RETGUARD_SETUP(copyiostr, a6)
+ beqz a2, .Lcopyiostrtoolong_early
+ la a4, .Lcopyiostrfault_user
+ SWAP_FAULT_HANDLER(a4, t0, t1)
+ ENTER_USER_ACCESS(t0)
+
+ mv a5, x0
+
+ li t1, VM_MAXUSER_ADDRESS
+1: bgt a0, t1, .Lcopyiostrfault_user
+ lb t0, 0(a0)
+ addi a0, a0, 1
+ sb t0, 0(a1)
+ addi a1, a1, 1
+ addi a2, a2, -1
+ addi a5, a5, 1
+ beqz t0, .Lcopyiostrsuccess_user
+ bnez a2, 1b
+
+ j .Lcopyiostrtoolong_user
+END(copyinstr)
+
+/*
+ * a0 - kernel space address
+ * a1 - user space address
+ * a2 - maxlens
+ * a3 - lencopied
+ *
+ * Copy string from kernel space to user space
+ */
+ENTRY(copyoutstr)
+ RETGUARD_SETUP(copyiostr, a6)
+ beqz a2, .Lcopyiostrtoolong_early
+ la a4, .Lcopyiostrfault_user
+ SWAP_FAULT_HANDLER(a4, t0, t1)
+ ENTER_USER_ACCESS(t0)
+
+ mv a5, x0
+
+ li t1, VM_MAXUSER_ADDRESS
+1: bgt a1, t1, .Lcopyiostrfault_user
+ lb t0, 0(a0)
+ addi a0, a0, 1
+ sb t0, 0(a1)
+ addi a1, a1, 1
+ addi a2, a2, -1
+ addi a5, a5, 1
+ beqz t0, .Lcopyiostrsuccess_user
+ bnez a2, 1b
+
+ j .Lcopyiostrtoolong_user
+END(copyoutstr)
--- /dev/null
+/*
+ * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
+ * Copyright (c) 2017 Mark Kettenis <kettenis@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/sysctl.h>
+#include <sys/task.h>
+
+#include <uvm/uvm.h>
+
+#include <machine/fdt.h>
+#include <machine/elf.h>
+#include <machine/cpufunc.h>
+#include <machine/riscvreg.h>
+#include "../dev/timer.h"
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_clock.h>
+#include <dev/ofw/ofw_regulator.h>
+#include <dev/ofw/ofw_thermal.h>
+#include <dev/ofw/fdt.h>
+
+#if 0
+#include "psci.h"
+#if NPSCI > 0
+#include <dev/fdt/pscivar.h>
+#endif
+#endif
+
+/* CPU Identification */
+
+// from FreeBSD
+/*
+ * 0x0000 CPU ID unimplemented
+ * 0x0001 UC Berkeley Rocket repo
+ * 0x0002Â0x7FFE Reserved for open-source repos
+ * 0x7FFF Reserved for extension
+ * 0x8000 Reserved for anonymous source
+ * 0x8001Â0xFFFE Reserved for proprietary implementations
+ * 0xFFFF Reserved for extension
+ */
+
+#define CPU_IMPL_SHIFT 0
+#define CPU_IMPL_MASK (0xffff << CPU_IMPL_SHIFT)
+#define CPU_IMPL(mimpid) ((mimpid & CPU_IMPL_MASK) >> CPU_IMPL_SHIFT)
+
+#define CPU_PART_SHIFT 62
+#define CPU_PART_MASK (0x3ul << CPU_PART_SHIFT)
+#define CPU_PART(misa) ((misa & CPU_PART_MASK) >> CPU_PART_SHIFT)
+
+#define CPU_IMPL_UNIMPLEMEN 0x00
+#define CPU_IMPL_QEMU 0x01
+#define CPU_IMPL_UCB_ROCKET 0x02
+#define CPU_IMPL_SIFIVE 0x03
+
+#define CPU_PART_RV32 0x01
+#define CPU_PART_RV64 0x02
+#define CPU_PART_RV128 0x03
+
+/*
+ * PART ID has only 2 bits
+ *
+#define CPU_PART_QEMU_SPIKE_V1_9 0x0
+#define CPU_PART_QEMU_SPIKE_V1_10 0x0
+*/
+#define CPU_PART_QEMU_SIFIVE_E 0x01
+#define CPU_PART_QEMU_SIFIVE_U 0x02
+#define CPU_PART_QEMU_VIRT 0x03
+
+/* Hardware implementation info. These values may be empty. */
+register_t mvendorid; /* The CPU's JEDEC vendor ID */
+register_t marchid; /* The architecture ID */
+register_t mimpid; /* The implementation ID */
+
+struct cpu_desc {
+ int cpu_impl;
+ int cpu_part_num;
+ const char *cpu_impl_name;
+ const char *cpu_part_name;
+};
+
+struct cpu_desc cpu_desc[MAXCPUS];
+
+struct cpu_parts {
+ int part_id;
+ char *part_name;
+};
+
+#define CPU_PART_NONE { -1, "Unknown Processor" }
+
+struct cpu_parts cpu_parts_std[] = {
+ { CPU_PART_RV32, "RV32" },
+ { CPU_PART_RV64, "RV64" },
+ { CPU_PART_RV128, "RV128" },
+ CPU_PART_NONE,
+};
+
+struct cpu_parts cpu_parts_qemu[] = {
+/*
+ { CPU_PART_QEMU_SPIKE_V1_9, "qemu-spike-V1.9.1" },
+ { CPU_PART_QEMU_SPIKE_V1_10, "qemu-spike-V1.10" },
+*/
+ { CPU_PART_QEMU_SIFIVE_E, "qemu-sifive-e" },
+ { CPU_PART_QEMU_SIFIVE_U, "qemu-sifive-u" },
+ { CPU_PART_QEMU_VIRT, "qemu-virt" },
+ CPU_PART_NONE,
+};
+
+struct cpu_parts cpu_parts_rocket[] = {//placeholder
+ CPU_PART_NONE,
+};
+
+struct cpu_parts cpu_parts_sifive[] = {//placeholder
+ CPU_PART_NONE,
+};
+
+/* riscv parts makers */
+const struct implementers {
+ int impl_id;
+ char *impl_name;
+ struct cpu_parts *impl_partlist;
+} cpu_implementers[] = {
+ { CPU_IMPL_QEMU, "QEMU", cpu_parts_qemu },
+ { CPU_IMPL_UCB_ROCKET, "UC Berkeley Rocket", cpu_parts_rocket },
+ { CPU_IMPL_SIFIVE, "SiFive", cpu_parts_sifive },
+ { CPU_IMPL_UNIMPLEMEN, "Unknown Implementer", cpu_parts_std },
+};
+
+char cpu_model[64];
+int cpu_node;
+uint64_t elf_hwcap;//will need it for multiple heter-processors
+
+struct cpu_info *cpu_info_list = &cpu_info_primary;
+
+int cpu_match(struct device *, void *, void *);
+void cpu_attach(struct device *, struct device *, void *);
+
+struct cfattach cpu_ca = {
+ sizeof(struct device), cpu_match, cpu_attach
+};
+
+struct cfdriver cpu_cd = {
+ NULL, "cpu", DV_DULL
+};
+#if 0 //XXX
+void cpu_flush_bp_psci(void);
+#endif
+
+/*
+ * The ISA string is made up of a small prefix (e.g. rv64) and up to 26 letters
+ * indicating the presence of the 26 possible standard extensions. Therefore 32
+ * characters will be sufficient.
+ */
+#define ISA_NAME_MAXLEN 32
+#define ISA_PREFIX "rv64" // ("rv" __XSTRING(XLEN))
+#define ISA_PREFIX_LEN (sizeof(ISA_PREFIX) - 1)
+
+void
+cpu_identify(struct cpu_info *ci)
+{
+ const struct cpu_parts *cpu_partsp;
+ int part_id;
+ int impl_id;
+ uint64_t mimpid;
+ uint64_t misa;
+ int cpu, i, node, len;
+
+ uint64_t *caps;
+ uint64_t hwcap;
+ char isa[ISA_NAME_MAXLEN];
+
+ cpu_partsp = NULL;
+
+ /* TODO: can we get mimpid and misa somewhere ? */
+ mimpid = 1;// for qemu
+ misa = (0x3ul << CPU_PART_SHIFT);// for virt
+
+ cpu = cpu_number();
+
+ caps = mallocarray(256, sizeof(uint64_t), M_DEVBUF, M_ZERO | M_WAITOK);
+
+ // identify vendor
+ impl_id = CPU_IMPL(mimpid);
+ for (i = 0; i < nitems(cpu_implementers); i++) {
+ if (impl_id == cpu_implementers[i].impl_id) {
+ cpu_desc[cpu].cpu_impl = impl_id;
+ cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
+ cpu_partsp = cpu_implementers[i].impl_partlist;
+ break;
+ }
+ }
+
+ // identify part number
+ part_id = CPU_PART(misa);
+ for (i = 0; &cpu_partsp[i] != NULL; i++) {
+ if (part_id == cpu_partsp[i].part_id) {
+ cpu_desc[cpu].cpu_part_num = part_id;
+ cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
+ break;
+ }
+ }
+
+ // identify supported isa set
+ node = OF_finddevice("/cpus");
+ if (node == -1) {
+ printf("fill_elf_hwcap: Can't find cpus node\n");
+ return;
+ }
+
+ caps['i'] = caps['I'] = HWCAP_ISA_I;
+ caps['m'] = caps['M'] = HWCAP_ISA_M;
+ caps['a'] = caps['A'] = HWCAP_ISA_A;
+ caps['f'] = caps['F'] = HWCAP_ISA_F;
+ caps['d'] = caps['D'] = HWCAP_ISA_D;
+ caps['c'] = caps['C'] = HWCAP_ISA_C;
+
+ /*
+ * Iterate through the CPUs and examine their ISA string. While we
+ * could assign elf_hwcap to be whatever the boot CPU supports, to
+ * handle the (unusual) case of running a system with hetergeneous
+ * ISAs, keep only the extension bits that are common to all harts.
+ */
+ for (node = OF_child(node); node > 0; node = OF_peer(node)) {
+ /* Skip any non-CPU nodes, such as cpu-map. */
+ if (!OF_is_compatible(node, "riscv"))
+ continue;
+
+ len = OF_getprop(node, "riscv,isa", isa, sizeof(isa));
+ KASSERT(len <= ISA_NAME_MAXLEN);
+ if (len == -1) {
+ printf("Can't find riscv,isa property\n");
+ return;
+ } else if (strncmp(isa, ISA_PREFIX, ISA_PREFIX_LEN) != 0) {
+ printf("Unsupported ISA string: %s\n", isa);
+ return;
+ }
+
+ hwcap = 0;
+ for (i = ISA_PREFIX_LEN; i < len; i++)
+ hwcap |= caps[(unsigned char)isa[i]];
+
+ if (elf_hwcap != 0)
+ elf_hwcap &= hwcap;
+ else
+ elf_hwcap = hwcap;
+ }
+
+ /* Print details for boot CPU */
+ if (cpu == 0) {
+ printf(": cpu%d: %s %s %s\n", cpu,
+ cpu_desc[cpu].cpu_impl_name,
+ cpu_desc[cpu].cpu_part_name,
+ isa);
+ }
+}
+
+#if 0//XXX
+int cpu_hatch_secondary(struct cpu_info *ci, int, uint64_t);
+#endif
+int cpu_clockspeed(int *);
+
+int
+cpu_match(struct device *parent, void *cfdata, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+
+ char buf[32];
+
+ if (OF_getprop(faa->fa_node, "device_type", buf, sizeof(buf)) <= 0 ||
+ strcmp(buf, "cpu") != 0)
+ return 0;
+
+ if (ncpus < MAXCPUS || faa->fa_reg[0].addr == boot_hart) /* the primary cpu */
+ return 1;
+
+ return 0;
+}
+
+void
+cpu_attach(struct device *parent, struct device *dev, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+ struct cpu_info *ci;
+
+ KASSERT(faa->fa_nreg > 0);
+
+ if (faa->fa_reg[0].addr == boot_hart) {/* the primary cpu */
+ ci = &cpu_info_primary;
+#ifdef MULTIPROCESSOR
+ ci->ci_flags |= CPUF_RUNNING | CPUF_PRESENT | CPUF_PRIMARY;
+#endif
+ }
+#ifdef MULTIPROCESSOR
+ else {
+ ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO);
+ cpu_info[dev->dv_unit] = ci;
+ ci->ci_next = cpu_info_list->ci_next;
+ cpu_info_list->ci_next = ci;
+ ci->ci_flags |= CPUF_AP;
+ ncpus++;
+ }
+#endif
+
+ ci->ci_dev = dev;
+ ci->ci_cpuid = dev->dv_unit;
+ ci->ci_node = faa->fa_node;
+ ci->ci_self = ci;
+
+#ifdef MULTIPROCESSOR // XXX TBD: CMPE
+ if (ci->ci_flags & CPUF_AP) {
+ char buf[32];
+ uint64_t spinup_data = 0;
+ int spinup_method = 0;
+ int timeout = 10000;
+ int len;
+
+ len = OF_getprop(ci->ci_node, "enable-method",
+ buf, sizeof(buf));
+ if (strcmp(buf, "psci") == 0) {
+ spinup_method = 1;
+ } else if (strcmp(buf, "spin-table") == 0) {
+ spinup_method = 2;
+ spinup_data = OF_getpropint64(ci->ci_node,
+ "cpu-release-addr", 0);
+ }
+
+ sched_init_cpu(ci);
+ if (cpu_hatch_secondary(ci, spinup_method, spinup_data)) {
+ atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFY);
+ __asm volatile("dsb sy; sev");
+
+ while ((ci->ci_flags & CPUF_IDENTIFIED) == 0 &&
+ --timeout)
+ delay(1000);
+ if (timeout == 0) {
+ printf(" failed to identify");
+ ci->ci_flags = 0;
+ }
+ } else {
+ printf(" failed to spin up");
+ ci->ci_flags = 0;
+ }
+ } else {
+#endif
+ cpu_identify(ci);
+
+ if (OF_getproplen(ci->ci_node, "clocks") > 0) {
+ cpu_node = ci->ci_node;
+ cpu_cpuspeed = cpu_clockspeed;
+ }
+
+ /*
+ * attach cpu-embedded timer
+ * Trick: timer has no fdt node to match,
+ * riscv_timer_match will always return 1 at first call,
+ * and return 0 for all following calls,
+ * therefore, must attach timer before any node
+ */
+ config_found_sm(dev, NULL, NULL, riscv_timer_match);
+ printf("\n");
+
+ /*
+ * attach cpu's children node, so far there is only the
+ * cpu-embedded interrupt controller
+ */
+ struct fdt_attach_args fa_intc;
+ int node;
+ for (node = OF_child(faa->fa_node); node; node = OF_peer(node)) {
+ fa_intc.fa_node = node;
+ /* no specifying match func, will call cfdata's match func*/
+ config_found(dev, &fa_intc, NULL);
+ printf("\n");
+ }
+
+#ifdef MULTIPROCESSOR
+ }
+#endif
+}
+
+int
+cpu_clockspeed(int *freq)
+{
+ *freq = clock_get_frequency(cpu_node, NULL) / 1000000;
+ return 0;
+}
--- /dev/null
+/*-
+ * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+ .text
+ .align 2
+
+ENTRY(riscv_nullop)
+ ret
+END(riscv_nullop)
--- /dev/null
+/*
+ * Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "machine/asm.h"
+#include "assym.h"
+
+/*
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
+ * a0 'struct proc *' of the old context
+ * a1 'struct proc *' of the new context
+ */
+ENTRY(cpu_switchto)
+ // check if old context needs to be saved
+ beqz a0, 1f
+
+ // create switchframe
+ addi sp, sp, -SWITCHFRAME_SIZEOF
+ sd s0, (SF_S + 0 * 8)(sp)
+ sd s1, (SF_S + 1 * 8)(sp)
+ sd s2, (SF_S + 2 * 8)(sp)
+ sd s3, (SF_S + 3 * 8)(sp)
+ sd s4, (SF_S + 4 * 8)(sp)
+ sd s5, (SF_S + 5 * 8)(sp)
+ sd s6, (SF_S + 6 * 8)(sp)
+ sd s7, (SF_S + 7 * 8)(sp)
+ sd s8, (SF_S + 8 * 8)(sp)
+ sd s9, (SF_S + 9 * 8)(sp)
+ sd s10, (SF_S + 10 * 8)(sp)
+ sd s11, (SF_S + 11 * 8)(sp)
+ sd ra, SF_RA(sp)
+
+ // store switchframe
+ ld a5, CI_CURPCB(tp)
+ sd sp, PCB_SP(a5)
+
+ // XXX store fpu, if necessary
+
+1:
+ RETGUARD_SYMBOL(cpu_switchto)
+ RETGUARD_LOAD_RANDOM(cpu_switchto, s0)
+
+ li a5, SONPROC
+ sb a5, P_STAT(a1) // Mark new on cpu
+ sd tp, P_CPU(a1) // Store curcpu
+ ld a5, P_ADDR(a1) // Load new pcb
+ sd a5, CI_CURPCB(tp)
+ sd a1, CI_CURPROC(tp)
+
+ // Unlike AArch64, RISC-V does not have a dedicated register in which
+ // we can also store pcb_tcb. Supervisor must access tcb indirectly.
+
+ ld s1, PCB_SP(a5) // load new stack pointer
+ mv a0, a1
+ la t0, pmap_set_satp
+ jalr t0
+
+ mv a7, s0 // move retguard random
+ mv sp, s1 // restore stack pointer
+
+ ld s0, (SF_S + 0 * 8)(sp)
+ ld s1, (SF_S + 1 * 8)(sp)
+ ld s2, (SF_S + 2 * 8)(sp)
+ ld s3, (SF_S + 3 * 8)(sp)
+ ld s4, (SF_S + 4 * 8)(sp)
+ ld s5, (SF_S + 5 * 8)(sp)
+ ld s6, (SF_S + 6 * 8)(sp)
+ ld s7, (SF_S + 7 * 8)(sp)
+ ld s8, (SF_S + 8 * 8)(sp)
+ ld s9, (SF_S + 9 * 8)(sp)
+ ld s10, (SF_S + 10 * 8)(sp)
+ ld s11, (SF_S + 11 * 8)(sp)
+ ld ra, SF_RA(sp)
+
+ // XXX restore fpu, if necessary
+
+ RETGUARD_CALC_COOKIE(a7)
+ addi sp, sp, SWITCHFRAME_SIZEOF
+ RETGUARD_CHECK(cpu_switchto, a7)
+ ret
+END(cpu_switch)
+
+
+ENTRY(proc_trampoline)
+#ifdef MULTIPROCESSOR
+ t0 _C_LABEL(proc_trampoline_mp)
+ jr t0
+#endif
+ // call it or just set the variable?
+ li a0, IPL_NONE
+ la t0, spllower
+ jalr t0
+ mv a0, s1
+ jalr s0
+ la t0, syscall_return
+ jr t0
+END(cpu_switch)
+
--- /dev/null
+/*
+ * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_interface.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+#include <ddb/db_access.h>
+
+#define RV64_MASK0 0xFC00707F /* 11111100000000000111000001111111 */
+#define RV64_MASK1 0x0600007F /* 00000110000000000000000001111111 */
+#define RV64_MASK2 0xFFF0707F /* 11111111111100000111000001111111 */
+#define RV64_MASK3 0xFFF0007F /* 11111111111100000000000001111111 */
+#define RV64_MASK4 0xFE00007F /* 11111110000000000000000001111111 */
+#define RV64_MASK5 0x0C00007F /* 00001100000000000000000001111111 */
+#define RV64_MASK6 0xF800707F /* 11111000000000000111000001111111 */
+#define RV64_MASK7 0xF9F0707F /* 11111001111100000111000001111111 */
+#define RV64_MASK8 0xFE007FFF /* 11111110000000000111111111111111 */
+#define RV64_MASK9 0xFFFFFFFF /* 11111111111111111111111111111111 */
+#define RV64_MASK10 0xF01FFFFF /* 11110000000111111111111111111111 */
+#define RV64_MASK11 0xFE00707F /* 11111110000000000111000001111111 */
+#define RV64_MASK12 0x0000707F /* 00000000000000000111000001111111 */
+#define RV64_MASK13 0x0000007F /* 00000000000000000000000001111111 */
+
+#define RV64I_LUI_OPCODE 0x00000037 /* lui */
+#define RV64I_AUIPC_OPCODE 0x00000017 /* auipc */
+#define RV64I_JAL_OPCODE 0x0000006F /* jal */
+#define RV64I_JALR_OPCODE 0x00000067 /* jalr */
+#define RV64I_BEQ_OPCODE 0x00000063 /* beq */
+#define RV64I_BNE_OPCODE 0x00001063 /* bne */
+#define RV64I_BLT_OPCODE 0x00004063 /* blt */
+#define RV64I_BGE_OPCODE 0x00005063 /* bge */
+#define RV64I_BLTU_OPCODE 0x00006063 /* bltu */
+#define RV64I_BGEU_OPCODE 0x00007063 /* bgeu */
+#define RV64I_LB_OPCODE 0x00000003 /* lb */
+#define RV64I_LH_OPCODE 0x00001003 /* lh */
+#define RV64I_LHU_OPCODE 0x00005003 /* lhu */
+#define RV64I_LW_OPCODE 0x00002003 /* lw */
+#define RV64I_LBU_OPCODE 0x00004003 /* lbu */
+#define RV64I_SB_OPCODE 0x00000023 /* sb */
+#define RV64I_SH_OPCODE 0x00001023 /* sh */
+#define RV64I_SW_OPCODE 0x00002023 /* sw */
+#define RV64I_ADDI_OPCODE 0x00000013 /* addi */
+#define RV64I_SLTI_OPCODE 0x00002013 /* slti */
+#define RV64I_SLTIU_OPCODE 0x00003013 /* sltiu */
+#define RV64I_XORI_OPCODE 0x00004013 /* xori */
+#define RV64I_ORI_OPCODE 0x00006013 /* ori */
+#define RV64I_ANDI_OPCODE 0x00007013 /* andi */
+#define RV64I_ADD_OPCODE 0x00000033 /* add */
+#define RV64I_SUB_OPCODE 0x40000033 /* sub */
+#define RV64I_SLL_OPCODE 0x00001033 /* sll */
+#define RV64I_SLT_OPCODE 0x00002033 /* slt */
+#define RV64I_SLTU_OPCODE 0x00003033 /* sltu */
+#define RV64I_XOR_OPCODE 0x00004033 /* xor */
+#define RV64I_SRL_OPCODE 0x00005033 /* srl */
+#define RV64I_SRA_OPCODE 0x40005033 /* sra */
+#define RV64I_OR_OPCODE 0x00006033 /* or */
+#define RV64I_AND_OPCODE 0x00007033 /* and */
+#define RV64I_FENCE_OPCODE 0x0000000F /* fence */
+#define RV64I_FENCE_I_OPCODE 0x0000100F /* fence.i */
+#define RV64I_WFI_OPCODE 0x10500073 /* wfi */
+#define RV64I_SFENCE_VMA_OPCODE 0x120000E7 /* sfence.vma */
+#define RV64I_ECALL_OPCODE 0x00000073 /* ecall */
+#define RV64I_EBREAK_OPCODE 0x00100073 /* ebreak */
+#define RV64I_CSRRW_OPCODE 0x00001073 /* csrrw */
+#define RV64I_CSRRS_OPCODE 0x00002073 /* csrrs */
+#define RV64I_CSRRC_OPCODE 0x00003073 /* csrrc */
+#define RV64I_CSRRWI_OPCODE 0x00005073 /* csrrwi */
+#define RV64I_CSRRSI_OPCODE 0x00006073 /* csrrsi */
+#define RV64I_CSRRCI_OPCODE 0x00007073 /* csrrci */
+#define RV64I_SRET_OPCODE 0x10200073 /* sret */
+#define RV64I_MRET_OPCODE 0x30200073 /* mret */
+#define RV64M_MUL_OPCODE 0x02000033 /* mul */
+#define RV64M_MULH_OPCODE 0x02001033 /* mulh */
+#define RV64M_MULHSU_OPCODE 0x02002033 /* mulhsu */
+#define RV64M_MULHU_OPCODE 0x02003033 /* mulhu */
+#define RV64M_DIV_OPCODE 0x02004033 /* div */
+#define RV64M_DIVU_OPCODE 0x02005033 /* divu */
+#define RV64M_REM_OPCODE 0x02006033 /* rem */
+#define RV64M_REMU_OPCODE 0x02007033 /* remu */
+#define RV64A_LR_W_OPCODE 0x1000202F /* lr.w */
+#define RV64A_SC_W_OPCODE 0x1800202F /* sc.w */
+#define RV64A_AMOSWAP_W_OPCODE 0x0800202F /* amoswap.w */
+#define RV64A_AMOADD_W_OPCODE 0x0000202F /* amoadd.w */
+#define RV64A_AMOXOR_W_OPCODE 0x2000202F /* amoxor.w */
+#define RV64A_AMOAND_W_OPCODE 0x6000202F /* amoand.w */
+#define RV64A_AMOOR_W_OPCODE 0x4000202F /* amoor.w */
+#define RV64A_AMOMIN_W_OPCODE 0x8000202F /* amomin.w */
+#define RV64A_AMOMAX_W_OPCODE 0xA000202F /* amomax.w */
+#define RV64A_AMOMINU_W_OPCODE 0xC000202F /* amominu.w */
+#define RV64A_AMOMAXU_W_OPCODE 0xE000202F /* amomaxu.w */
+#define RV64F_FLW_OPCODE 0x00002007 /* flw */
+#define RV64F_FSW_OPCODE 0x00002027 /* fsw */
+#define RV64F_FMADD_S_OPCODE 0x00000043 /* fmadd.s */
+#define RV64F_FMSUB_S_OPCODE 0x00000047 /* fmsub.s */
+#define RV64F_FNMSUB_S_OPCODE 0x0000004B /* fnmsub.s */
+#define RV64F_FNMADD_S_OPCODE 0x0000004F /* fnmadd.s */
+#define RV64F_FADD_S_OPCODE 0x00000053 /* fadd.s */
+#define RV64F_FSUB_S_OPCODE 0x08000053 /* fsub.s */
+#define RV64F_FMUL_S_OPCODE 0x10000053 /* fmul.s */
+#define RV64F_FDIV_S_OPCODE 0x18000053 /* fdiv.s */
+#define RV64F_FSQRT_S_OPCODE 0x58000053 /* fsqrt.s */
+#define RV64F_FSGNJ_S_OPCODE 0x20000053 /* fsgnj.s */
+#define RV64F_FSGNJN_S_OPCODE 0x20001053 /* fsgnjn.s */
+#define RV64F_FSGNJX_S_OPCODE 0x20002053 /* fsgnjx.s */
+#define RV64F_FMIN_S_OPCODE 0x28000053 /* fmin.s */
+#define RV64F_FMAX_S_OPCODE 0x28001053 /* fmax.s */
+#define RV64F_FMAX_S_OPCODE 0x28001053 /* fmax.s */
+#define RV64F_FCVT_W_S_OPCODE 0xC0000053 /* fcvt.w.s */
+#define RV64F_FCVT_WU_S_OPCODE 0xC0100053 /* fcvt.wu.s */
+#define RV64F_FMV_X_W_OPCODE 0xE0000053 /* fmv.x.w */
+#define RV64F_FEQ_S_OPCODE 0xA0002053 /* feq.s */
+#define RV64F_FLT_S_OPCODE 0xA0001053 /* flt.s */
+#define RV64F_FLE_S_OPCODE 0xA0000053 /* fle.s */
+#define RV64F_FCLASS_S_OPCODE 0xE0001053 /* fclass.s */
+#define RV64F_FCVT_S_W_OPCODE 0xD0000053 /* fcvt.s.w */
+#define RV64F_FCVT_S_WU_OPCODE 0xD0100053 /* fcvt.s.wu */
+#define RV64F_FMV_W_X_OPCODE 0xF0000053 /* fmv.w.x */
+#define RV64D_FLD_OPCODE 0x00003007 /* fld */
+#define RV64D_FSD_OPCODE 0x00003027 /* fsd */
+#define RV64D_FMADD_D_OPCODE 0x00000043 /* fmadd.d */
+#define RV64D_FMSUB_D_OPCODE 0x00000047 /* fmsub.d */
+#define RV64D_FNMSUB_D_OPCODE 0x0000004B /* fnmsub.d */
+#define RV64D_FNMADD_D_OPCODE 0x0000004F /* fnmadd.d */
+#define RV64D_FADD_D_OPCODE 0x02000053 /* fadd.d */
+#define RV64D_FSUB_D_OPCODE 0x0A000053 /* fsub.d */
+#define RV64D_FMUL_D_OPCODE 0x12000053 /* fmul.d */
+#define RV64D_FDIV_D_OPCODE 0x1A000053 /* fdiv.d */
+#define RV64D_FSQRT_D_OPCODE 0x5A000053 /* fsqrt.d */
+#define RV64D_FSGNJ_D_OPCODE 0x22000053 /* fsgnj.d */
+#define RV64D_FSGNJN_D_OPCODE 0x22001053 /* fsgnjn.d */
+#define RV64D_FSGNJX_D_OPCODE 0x22002053 /* fsgnjx.d */
+#define RV64D_FMIN_D_OPCODE 0x2A000053 /* fmin.d */
+#define RV64D_FMAX_D_OPCODE 0x2A001053 /* fmax.d */
+#define RV64D_FCVT_S_D_OPCODE 0x40100053 /* fcvt.s.d */
+#define RV64D_FCVT_D_S_OPCODE 0x42000053 /* fcvt.d.s */
+#define RV64D_FEQ_D_OPCODE 0xA2002053 /* feq.d */
+#define RV64D_FLT_D_OPCODE 0xA2001053 /* flt.d */
+#define RV64D_FLE_D_OPCODE 0xA2000053 /* fle.d */
+#define RV64D_FCLASS_D_OPCODE 0xE2001053 /* fclass.d */
+#define RV64D_FCVT_W_D_OPCODE 0xC2000053 /* fcvt.w.d */
+#define RV64D_FCVT_WU_D_OPCODE 0xC2100053 /* fcvt.wu.d */
+#define RV64D_FCVT_D_W_OPCODE 0xD2000053 /* fcvt.d.w */
+#define RV64D_FCVT_D_WU_OPCODE 0xD2100053 /* fcvt.d.wu */
+#define RV64I_LWU_OPCODE 0x00006003 /* lwu */
+#define RV64I_LD_OPCODE 0x00003003 /* ld */
+#define RV64I_SD_OPCODE 0x00003023 /* sd */
+#define RV64I_SLLI_OPCODE 0x00001013 /* slli */
+#define RV64I_SRLI_OPCODE 0x00005013 /* srli */
+#define RV64I_SRAI_OPCODE 0x40005013 /* srai */
+#define RV64I_ADDIW_OPCODE 0x0000001B /* addiw */
+#define RV64I_SLLIW_OPCODE 0x0000101B /* slliw */
+#define RV64I_SRLIW_OPCODE 0x0000501B /* srliw */
+#define RV64I_SRAIW_OPCODE 0x4000501B /* sraiw */
+#define RV64I_ADDW_OPCODE 0x0000003B /* addw */
+#define RV64I_SUBW_OPCODE 0x4000003B /* subw */
+#define RV64I_SLLW_OPCODE 0x0000103B /* sllw */
+#define RV64I_SRLW_OPCODE 0x0000503B /* srlw */
+#define RV64I_SRAW_OPCODE 0x4000503B /* sraw */
+#define RV64M_MULW_OPCODE 0x0200003B /* mulw */
+#define RV64M_DIVW_OPCODE 0x0200403B /* divw */
+#define RV64M_DIVUW_OPCODE 0x0200503B /* divuw */
+#define RV64M_REMW_OPCODE 0x0200603B /* remw */
+#define RV64M_REMUW_OPCODE 0x0200703B /* remuw */
+#define RV64A_LR_D_OPCODE 0x1000302F /* lr.d */
+#define RV64A_SC_D_OPCODE 0x1800302F /* sc.d */
+#define RV64A_AMOSWAP_D_OPCODE 0x0800302F /* amoswap.d */
+#define RV64A_AMOADD_D_OPCODE 0x0000302F /* amoadd.d */
+#define RV64A_AMOXOR_D_OPCODE 0x2000302F /* amoxor.d */
+#define RV64A_AMOAND_D_OPCODE 0x6000302F /* amoand.d */
+#define RV64A_AMOOR_D_OPCODE 0x4000302F /* amoor.d */
+#define RV64A_AMOMIN_D_OPCODE 0x8000302F /* amomin.d */
+#define RV64A_AMOMAX_D_OPCODE 0xA000302F /* amomax.d */
+#define RV64A_AMOMAX_D_OPCODE 0xA000302F /* amomax.d */
+#define RV64A_AMOMINU_D_OPCODE 0xC000302F /* amominu.d */
+#define RV64A_AMOMAXU_D_OPCODE 0xE000302F /* amomaxu.d */
+#define RV64F_FCVT_L_S_OPCODE 0xC0200053 /* fcvt.l.s */
+#define RV64F_FCVT_LU_S_OPCODE 0xC0300053 /* fcvt.lu.s */
+#define RV64F_FCVT_S_L_OPCODE 0xD0200053 /* fcvt.s.l */
+#define RV64F_FCVT_S_LU_OPCODE 0xD0300053 /* fcvt.s.lu */
+#define RV64D_FCVT_L_D_OPCODE 0xC2200053 /* fcvt.l.d */
+#define RV64D_FCVT_LU_D_OPCODE 0xC2300053 /* fcvt.lu.d */
+#define RV64D_FMV_X_D_OPCODE 0xE2000053 /* fmv.x.d */
+#define RV64D_FCVT_D_L_OPCODE 0xD2200053 /* fcvt.d.l */
+#define RV64D_FCVT_D_LU_OPCODE 0xD2300053 /* fcvt.d.lu */
+#define RV64D_FMV_D_X_OPCODE 0xF2000053 /* fmv.d.x */
+#define RV64Q_URET_OPCODE 0x00200073 /* uret */
+#define RV64Q_DRET_OPCODE 0x7B200073 /* dret */
+#define RV64Q_FADD_Q_OPCODE 0x06000053 /* fadd.q */
+#define RV64Q_FSUB_Q_OPCODE 0x0E000053 /* fsub.q */
+#define RV64Q_FMUL_Q_OPCODE 0x16000053 /* fmul.q */
+#define RV64Q_FDIV_Q_OPCODE 0x1E000053 /* fdiv.q */
+#define RV64Q_FSGNJ_Q_OPCODE 0x26000053 /* fsgnj.q */
+#define RV64Q_FSGNJN_Q_OPCODE 0x26001053 /* fsgnjn.q */
+#define RV64Q_FSGNJX_Q_OPCODE 0x26002053 /* fsgnjx.q */
+#define RV64Q_FMIN_Q_OPCODE 0x2E000053 /* fmin.q */
+#define RV64Q_FMAX_Q_OPCODE 0x2E001053 /* fmax.q */
+#define RV64Q_FCVT_S_Q_OPCODE 0x40300053 /* fcvt.s.q */
+#define RV64Q_FCVT_Q_S_OPCODE 0x46000053 /* fcvt.q.s */
+#define RV64Q_FCVT_D_Q_OPCODE 0x42300053 /* fcvt.d.q */
+#define RV64Q_FCVT_Q_D_OPCODE 0x46100053 /* fcvt.q.d */
+#define RV64Q_FSQRT_Q_OPCODE 0x5E000053 /* fsqrt.q */
+#define RV64Q_FLE_Q_OPCODE 0xA6000053 /* fle.q */
+#define RV64Q_FLT_Q_OPCODE 0xA6001053 /* flt.q */
+#define RV64Q_FEQ_Q_OPCODE 0xA6002053 /* feq.q */
+#define RV64Q_FCVT_W_Q_OPCODE 0xC6000053 /* fcvt.w.q */
+#define RV64Q_FCVT_WU_Q_OPCODE 0xC6100053 /* fcvt.wu.q */
+#define RV64Q_FCVT_L_Q_OPCODE 0xC6200053 /* fcvt.l.q */
+#define RV64Q_FCVT_LU_Q_OPCODE 0xC6300053 /* fcvt.lu.q */
+#define RV64Q_FMV_X_Q_OPCODE 0xE6000053 /* fmv.x.q */
+#define RV64Q_FCLASS_Q_OPCODE 0xE6001053 /* fclass.q */
+#define RV64Q_FCVT_Q_W_OPCODE 0xD6000053 /* fcvt.q.w */
+#define RV64Q_FCVT_Q_WU_OPCODE 0xD6100053 /* fcvt.q.wu */
+#define RV64Q_FCVT_Q_L_OPCODE 0xD6200053 /* fcvt.q.l */
+#define RV64Q_FCVT_Q_LU_OPCODE 0xD6300053 /* fcvt.q.lu */
+#define RV64Q_FMV_Q_X_OPCODE 0xF6000053 /* fmv.q.x */
+#define RV64Q_FLQ_OPCODE 0x00004007 /* flq */
+#define RV64Q_FSQ_OPCODE 0x00004027 /* fsq */
+#define RV64Q_FMADD_Q_OPCODE 0x06000043 /* fmadd.q */
+#define RV64Q_FMSUB_Q_OPCODE 0x06000047 /* fmsub.q */
+#define RV64Q_FNMSUB_Q_OPCODE 0x0600004B /* fnmsub.q */
+#define RV64Q_FNMADD_Q_OPCODE 0x0600004F /* fnmadd.q */
+
+struct rv64_op {
+ char *opcode;
+ uint32_t num_op;
+ uint32_t num_mask;
+} rv64_opcodes[] = {
+ { "lui", RV64I_LUI_OPCODE, RV64_MASK13 },
+ { "auipc", RV64I_AUIPC_OPCODE, RV64_MASK13 },
+ { "jal", RV64I_JAL_OPCODE, RV64_MASK13 },
+ { "jalr", RV64I_JALR_OPCODE, RV64_MASK12 },
+ { "beq", RV64I_BEQ_OPCODE, RV64_MASK12 },
+ { "bne", RV64I_BNE_OPCODE, RV64_MASK12 },
+ { "blt", RV64I_BLT_OPCODE, RV64_MASK12 },
+ { "bge", RV64I_BGE_OPCODE, RV64_MASK12 },
+ { "bltu", RV64I_BLTU_OPCODE, RV64_MASK12 },
+ { "bgeu", RV64I_BGEU_OPCODE, RV64_MASK12 },
+ { "lb", RV64I_LB_OPCODE, RV64_MASK12 },
+ { "lh", RV64I_LH_OPCODE, RV64_MASK12 },
+ { "lhu", RV64I_LHU_OPCODE, RV64_MASK12 },
+ { "lw", RV64I_LW_OPCODE, RV64_MASK12 },
+ { "lbu", RV64I_LBU_OPCODE, RV64_MASK12 },
+ { "sb", RV64I_SB_OPCODE, RV64_MASK12 },
+ { "sh", RV64I_SH_OPCODE, RV64_MASK12 },
+ { "sw", RV64I_SW_OPCODE, RV64_MASK12 },
+ { "addi", RV64I_ADDI_OPCODE, RV64_MASK12 },
+ { "slti", RV64I_SLTI_OPCODE, RV64_MASK12 },
+ { "sltiu", RV64I_SLTIU_OPCODE, RV64_MASK12 },
+ { "xori", RV64I_XORI_OPCODE, RV64_MASK12 },
+ { "ori", RV64I_ORI_OPCODE, RV64_MASK12 },
+ { "andi", RV64I_ANDI_OPCODE, RV64_MASK12 },
+ { "add", RV64I_ADD_OPCODE, RV64_MASK11 },
+ { "sub", RV64I_SUB_OPCODE, RV64_MASK11 },
+ { "sll", RV64I_SLL_OPCODE, RV64_MASK11 },
+ { "slt", RV64I_SLT_OPCODE, RV64_MASK11 },
+ { "sltu", RV64I_SLTU_OPCODE, RV64_MASK11 },
+ { "xor", RV64I_XOR_OPCODE, RV64_MASK11 },
+ { "srl", RV64I_SRL_OPCODE, RV64_MASK11 },
+ { "sra", RV64I_SRA_OPCODE, RV64_MASK11 },
+ { "or", RV64I_OR_OPCODE, RV64_MASK11 },
+ { "and", RV64I_AND_OPCODE, RV64_MASK11 },
+ { "fence", RV64I_FENCE_OPCODE, RV64_MASK10 },
+ { "fence.i", RV64I_FENCE_I_OPCODE, RV64_MASK9 },
+ { "wfi", RV64I_WFI_OPCODE, RV64_MASK9 },
+ { "sfence.vma", RV64I_SFENCE_VMA_OPCODE, RV64_MASK8 },
+ { "ecall", RV64I_ECALL_OPCODE, RV64_MASK9 },
+ { "ebreak", RV64I_EBREAK_OPCODE, RV64_MASK9 },
+ { "csrrw", RV64I_CSRRW_OPCODE, RV64_MASK12 },
+ { "csrrs", RV64I_CSRRS_OPCODE, RV64_MASK12 },
+ { "csrrc", RV64I_CSRRC_OPCODE, RV64_MASK12 },
+ { "csrrwi", RV64I_CSRRWI_OPCODE, RV64_MASK12 },
+ { "csrrsi", RV64I_CSRRSI_OPCODE, RV64_MASK12 },
+ { "csrrci", RV64I_CSRRCI_OPCODE, RV64_MASK12 },
+ { "sret", RV64I_SRET_OPCODE, RV64_MASK9 },
+ { "mret", RV64I_MRET_OPCODE, RV64_MASK9 },
+ { "mul", RV64M_MUL_OPCODE, RV64_MASK11 },
+ { "mulh", RV64M_MULH_OPCODE, RV64_MASK11 },
+ { "mulhsu", RV64M_MULHSU_OPCODE, RV64_MASK11 },
+ { "mulhu", RV64M_MULHU_OPCODE, RV64_MASK11 },
+ { "div", RV64M_DIV_OPCODE, RV64_MASK11 },
+ { "divu", RV64M_DIVU_OPCODE, RV64_MASK11 },
+ { "rem", RV64M_REM_OPCODE, RV64_MASK11 },
+ { "remu", RV64M_REMU_OPCODE, RV64_MASK11 },
+ { "lr.w", RV64A_LR_W_OPCODE, RV64_MASK7 },
+ { "sc.w", RV64A_SC_W_OPCODE, RV64_MASK6 },
+ { "amoswap.w", RV64A_AMOSWAP_W_OPCODE, RV64_MASK6 },
+ { "amoadd.w", RV64A_AMOADD_W_OPCODE, RV64_MASK6 },
+ { "amoxor.w", RV64A_AMOXOR_W_OPCODE, RV64_MASK6 },
+ { "amoand.w", RV64A_AMOAND_W_OPCODE, RV64_MASK6 },
+ { "amoor.w", RV64A_AMOOR_W_OPCODE, RV64_MASK6 },
+ { "amomin.w", RV64A_AMOMIN_W_OPCODE, RV64_MASK6 },
+ { "amomax.w", RV64A_AMOMAX_W_OPCODE, RV64_MASK6 },
+ { "amominu.w", RV64A_AMOMINU_W_OPCODE, RV64_MASK6 },
+ { "amomaxu.w", RV64A_AMOMAXU_W_OPCODE, RV64_MASK6 },
+ { "flw", RV64F_FLW_OPCODE, RV64_MASK12 },
+ { "fsw", RV64F_FSW_OPCODE, RV64_MASK12 },
+ { "fmadd.s", RV64F_FMADD_S_OPCODE, RV64_MASK5 },
+ { "fmsub.s", RV64F_FMSUB_S_OPCODE, RV64_MASK5 },
+ { "fnmsub.s", RV64F_FNMSUB_S_OPCODE, RV64_MASK5 },
+ { "fnmadd.s", RV64F_FNMADD_S_OPCODE, RV64_MASK5 },
+ { "fadd.s", RV64F_FADD_S_OPCODE, RV64_MASK4 },
+ { "fsub.s", RV64F_FSUB_S_OPCODE, RV64_MASK4 },
+ { "fmul.s", RV64F_FMUL_S_OPCODE, RV64_MASK4 },
+ { "fdiv.s", RV64F_FDIV_S_OPCODE, RV64_MASK4 },
+ { "fsqrt.s", RV64F_FSQRT_S_OPCODE, RV64_MASK3 },
+ { "fsgnj.s", RV64F_FSGNJ_S_OPCODE, RV64_MASK11 },
+ { "fsgnjn.s", RV64F_FSGNJN_S_OPCODE, RV64_MASK11 },
+ { "fsgnjx.s", RV64F_FSGNJX_S_OPCODE, RV64_MASK11 },
+ { "fmin.s", RV64F_FMIN_S_OPCODE, RV64_MASK11 },
+ { "fmax.s", RV64F_FMAX_S_OPCODE, RV64_MASK11 },
+ { "fmax.s", RV64F_FMAX_S_OPCODE, RV64_MASK11 },
+ { "fcvt.w.s", RV64F_FCVT_W_S_OPCODE, RV64_MASK3 },
+ { "fcvt.wu.s", RV64F_FCVT_WU_S_OPCODE, RV64_MASK3 },
+ { "fmv.x.w", RV64F_FMV_X_W_OPCODE, RV64_MASK2 },
+ { "feq.s", RV64F_FEQ_S_OPCODE, RV64_MASK11 },
+ { "flt.s", RV64F_FLT_S_OPCODE, RV64_MASK11 },
+ { "fle.s", RV64F_FLE_S_OPCODE, RV64_MASK11 },
+ { "fclass.s", RV64F_FCLASS_S_OPCODE, RV64_MASK2 },
+ { "fcvt.s.w", RV64F_FCVT_S_W_OPCODE, RV64_MASK3 },
+ { "fcvt.s.wu", RV64F_FCVT_S_WU_OPCODE, RV64_MASK3 },
+ { "fmv.w.x", RV64F_FMV_W_X_OPCODE, RV64_MASK2 },
+ { "fld", RV64D_FLD_OPCODE, RV64_MASK12 },
+ { "fsd", RV64D_FSD_OPCODE, RV64_MASK12 },
+ { "fmadd.d", RV64D_FMADD_D_OPCODE, RV64_MASK1 },
+ { "fmsub.d", RV64D_FMSUB_D_OPCODE, RV64_MASK1 },
+ { "fnmsub.d", RV64D_FNMSUB_D_OPCODE, RV64_MASK1 },
+ { "fnmadd.d", RV64D_FNMADD_D_OPCODE, RV64_MASK1 },
+ { "fadd.d", RV64D_FADD_D_OPCODE, RV64_MASK4 },
+ { "fsub.d", RV64D_FSUB_D_OPCODE, RV64_MASK4 },
+ { "fmul.d", RV64D_FMUL_D_OPCODE, RV64_MASK4 },
+ { "fdiv.d", RV64D_FDIV_D_OPCODE, RV64_MASK4 },
+ { "fsqrt.d", RV64D_FSQRT_D_OPCODE, RV64_MASK3 },
+ { "fsgnj.d", RV64D_FSGNJ_D_OPCODE, RV64_MASK11 },
+ { "fsgnjn.d", RV64D_FSGNJN_D_OPCODE, RV64_MASK11 },
+ { "fsgnjx.d", RV64D_FSGNJX_D_OPCODE, RV64_MASK11 },
+ { "fmin.d", RV64D_FMIN_D_OPCODE, RV64_MASK11 },
+ { "fmax.d", RV64D_FMAX_D_OPCODE, RV64_MASK11 },
+ { "fcvt.s.d", RV64D_FCVT_S_D_OPCODE, RV64_MASK3 },
+ { "fcvt.d.s", RV64D_FCVT_D_S_OPCODE, RV64_MASK3 },
+ { "feq.d", RV64D_FEQ_D_OPCODE, RV64_MASK11 },
+ { "flt.d", RV64D_FLT_D_OPCODE, RV64_MASK11 },
+ { "fle.d", RV64D_FLE_D_OPCODE, RV64_MASK11 },
+ { "fclass.d", RV64D_FCLASS_D_OPCODE, RV64_MASK2 },
+ { "fcvt.w.d", RV64D_FCVT_W_D_OPCODE, RV64_MASK3 },
+ { "fcvt.wu.d", RV64D_FCVT_WU_D_OPCODE, RV64_MASK3 },
+ { "fcvt.d.w", RV64D_FCVT_D_W_OPCODE, RV64_MASK3 },
+ { "fcvt.d.wu", RV64D_FCVT_D_WU_OPCODE, RV64_MASK3 },
+ { "lwu", RV64I_LWU_OPCODE, RV64_MASK12 },
+ { "ld", RV64I_LD_OPCODE, RV64_MASK12 },
+ { "sd", RV64I_SD_OPCODE, RV64_MASK12 },
+ { "slli", RV64I_SLLI_OPCODE, RV64_MASK0 },
+ { "srli", RV64I_SRLI_OPCODE, RV64_MASK0 },
+ { "srai", RV64I_SRAI_OPCODE, RV64_MASK0 },
+ { "addiw", RV64I_ADDIW_OPCODE, RV64_MASK12 },
+ { "slliw", RV64I_SLLIW_OPCODE, RV64_MASK11 },
+ { "srliw", RV64I_SRLIW_OPCODE, RV64_MASK11 },
+ { "sraiw", RV64I_SRAIW_OPCODE, RV64_MASK11 },
+ { "addw", RV64I_ADDW_OPCODE, RV64_MASK11 },
+ { "subw", RV64I_SUBW_OPCODE, RV64_MASK11 },
+ { "sllw", RV64I_SLLW_OPCODE, RV64_MASK11 },
+ { "srlw", RV64I_SRLW_OPCODE, RV64_MASK11 },
+ { "sraw", RV64I_SRAW_OPCODE, RV64_MASK11 },
+ { "mulw", RV64M_MULW_OPCODE, RV64_MASK11 },
+ { "divw", RV64M_DIVW_OPCODE, RV64_MASK11 },
+ { "divuw", RV64M_DIVUW_OPCODE, RV64_MASK11 },
+ { "remw", RV64M_REMW_OPCODE, RV64_MASK11 },
+ { "remuw", RV64M_REMUW_OPCODE, RV64_MASK11 },
+ { "lr.d", RV64A_LR_D_OPCODE, RV64_MASK7 },
+ { "sc.d", RV64A_SC_D_OPCODE, RV64_MASK6 },
+ { "amoswap.d", RV64A_AMOSWAP_D_OPCODE, RV64_MASK6 },
+ { "amoadd.d", RV64A_AMOADD_D_OPCODE, RV64_MASK6 },
+ { "amoxor.d", RV64A_AMOXOR_D_OPCODE, RV64_MASK6 },
+ { "amoand.d", RV64A_AMOAND_D_OPCODE, RV64_MASK6 },
+ { "amoor.d", RV64A_AMOOR_D_OPCODE, RV64_MASK6 },
+ { "amomin.d", RV64A_AMOMIN_D_OPCODE, RV64_MASK6 },
+ { "amomax.d", RV64A_AMOMAX_D_OPCODE, RV64_MASK6 },
+ { "amomax.d", RV64A_AMOMAX_D_OPCODE, RV64_MASK6 },
+ { "amominu.d", RV64A_AMOMINU_D_OPCODE, RV64_MASK6 },
+ { "amomaxu.d", RV64A_AMOMAXU_D_OPCODE, RV64_MASK6 },
+ { "fcvt.l.s", RV64F_FCVT_L_S_OPCODE, RV64_MASK3 },
+ { "fcvt.lu.s", RV64F_FCVT_LU_S_OPCODE, RV64_MASK3 },
+ { "fcvt.s.l", RV64F_FCVT_S_L_OPCODE, RV64_MASK3 },
+ { "fcvt.s.lu", RV64F_FCVT_S_LU_OPCODE, RV64_MASK3 },
+ { "fcvt.l.d", RV64D_FCVT_L_D_OPCODE, RV64_MASK3 },
+ { "fcvt.lu.d", RV64D_FCVT_LU_D_OPCODE, RV64_MASK3 },
+ { "fmv.x.d", RV64D_FMV_X_D_OPCODE, RV64_MASK2 },
+ { "fcvt.d.l", RV64D_FCVT_D_L_OPCODE, RV64_MASK3 },
+ { "fcvt.d.lu", RV64D_FCVT_D_LU_OPCODE, RV64_MASK3 },
+ { "fmv.d.x", RV64D_FMV_D_X_OPCODE, RV64_MASK2 },
+ { "uret", RV64Q_URET_OPCODE, RV64_MASK9 },
+ { "dret", RV64Q_DRET_OPCODE, RV64_MASK9 },
+ { "fadd.q", RV64Q_FADD_Q_OPCODE, RV64_MASK4 },
+ { "fsub.q", RV64Q_FSUB_Q_OPCODE, RV64_MASK4 },
+ { "fmul.q", RV64Q_FMUL_Q_OPCODE, RV64_MASK4 },
+ { "fdiv.q", RV64Q_FDIV_Q_OPCODE, RV64_MASK4 },
+ { "fsgnj.q", RV64Q_FSGNJ_Q_OPCODE, RV64_MASK11 },
+ { "fsgnjn.q", RV64Q_FSGNJN_Q_OPCODE, RV64_MASK11 },
+ { "fsgnjx.q", RV64Q_FSGNJX_Q_OPCODE, RV64_MASK11 },
+ { "fmin.q", RV64Q_FMIN_Q_OPCODE, RV64_MASK11 },
+ { "fmax.q", RV64Q_FMAX_Q_OPCODE, RV64_MASK11 },
+ { "fcvt.s.q", RV64Q_FCVT_S_Q_OPCODE, RV64_MASK3 },
+ { "fcvt.q.s", RV64Q_FCVT_Q_S_OPCODE, RV64_MASK3 },
+ { "fcvt.d.q", RV64Q_FCVT_D_Q_OPCODE, RV64_MASK3 },
+ { "fcvt.q.d", RV64Q_FCVT_Q_D_OPCODE, RV64_MASK3 },
+ { "fsqrt.q", RV64Q_FSQRT_Q_OPCODE, RV64_MASK3 },
+ { "fle.q", RV64Q_FLE_Q_OPCODE, RV64_MASK11 },
+ { "flt.q", RV64Q_FLT_Q_OPCODE, RV64_MASK11 },
+ { "feq.q", RV64Q_FEQ_Q_OPCODE, RV64_MASK11 },
+ { "fcvt.w.q", RV64Q_FCVT_W_Q_OPCODE, RV64_MASK3 },
+ { "fcvt.wu.q", RV64Q_FCVT_WU_Q_OPCODE, RV64_MASK3 },
+ { "fcvt.l.q", RV64Q_FCVT_L_Q_OPCODE, RV64_MASK3 },
+ { "fcvt.lu.q", RV64Q_FCVT_LU_Q_OPCODE, RV64_MASK3 },
+ { "fmv.x.q", RV64Q_FMV_X_Q_OPCODE, RV64_MASK2 },
+ { "fclass.q", RV64Q_FCLASS_Q_OPCODE, RV64_MASK2 },
+ { "fcvt.q.w", RV64Q_FCVT_Q_W_OPCODE, RV64_MASK3 },
+ { "fcvt.q.wu", RV64Q_FCVT_Q_WU_OPCODE, RV64_MASK3 },
+ { "fcvt.q.l", RV64Q_FCVT_Q_L_OPCODE, RV64_MASK3 },
+ { "fcvt.q.lu", RV64Q_FCVT_Q_LU_OPCODE, RV64_MASK3 },
+ { "fmv.q.x", RV64Q_FMV_Q_X_OPCODE, RV64_MASK2 },
+ { "flq", RV64Q_FLQ_OPCODE, RV64_MASK12 },
+ { "fsq", RV64Q_FSQ_OPCODE, RV64_MASK12 },
+ { "fmadd.q", RV64Q_FMADD_Q_OPCODE, RV64_MASK1 },
+ { "fmsub.q", RV64Q_FMSUB_Q_OPCODE, RV64_MASK1 },
+ { "fnmsub.q", RV64Q_FNMSUB_Q_OPCODE, RV64_MASK1 },
+ { "fnmadd.q", RV64Q_FNMADD_Q_OPCODE, RV64_MASK1 },
+ { NULL, 0, 0 }
+};
+
+vaddr_t
+db_disasm(vaddr_t loc, int altfmt)
+{
+ struct rv64_op *rvo;
+ uint32_t instruction;
+
+ db_read_bytes(loc, sizeof(instruction), (char *)&instruction);
+
+ for (rvo = &rv64_opcodes[0]; rvo->opcode != NULL; rvo++) {
+ if ((instruction & rvo->num_mask) == rvo->num_op) {
+ db_printf("%s\n", rvo->opcode);
+ return loc + 4;
+ }
+ }
+
+ /*
+ * we went through the last instruction and didn't find it in our
+ * list, pretend it's a compressed instruction then (for now)
+ */
+
+ db_printf("[not displaying compressed instruction]\n");
+ return loc + 2;
+}
--- /dev/null
+/*
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU)
+ */
+
+/*
+ * Interface to new debugger.
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/exec.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/db_machdep.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_run.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_extern.h>
+#include <ddb/db_interface.h>
+#include <dev/cons.h>
+
+
+struct db_variable db_regs[] = {
+ { "ra", (long *)&DDB_REGS->tf_ra, FCN_NULL, }, /* x1 */
+ { "sp", (long *)&DDB_REGS->tf_sp, FCN_NULL, }, /* x2 */
+ { "gp", (long *)&DDB_REGS->tf_gp, FCN_NULL, }, /* x3 */
+ { "tp", (long *)&DDB_REGS->tf_tp, FCN_NULL, }, /* x4 */
+ { "t0", (long *)&DDB_REGS->tf_t[0], FCN_NULL, }, /* x5 */
+ { "t1", (long *)&DDB_REGS->tf_t[1], FCN_NULL, }, /* x6 */
+ { "t2", (long *)&DDB_REGS->tf_t[2], FCN_NULL, }, /* x7 */
+ { "s0", (long *)&DDB_REGS->tf_s[0], FCN_NULL, }, /* x8 */
+ { "s1", (long *)&DDB_REGS->tf_s[1], FCN_NULL, }, /* x9 */
+ { "a0", (long *)&DDB_REGS->tf_a[0], FCN_NULL, }, /* x10 */
+ { "a1", (long *)&DDB_REGS->tf_a[1], FCN_NULL, }, /* x11 */
+ { "a2", (long *)&DDB_REGS->tf_a[2], FCN_NULL, }, /* x12 */
+ { "a3", (long *)&DDB_REGS->tf_a[3], FCN_NULL, }, /* x13 */
+ { "a4", (long *)&DDB_REGS->tf_a[4], FCN_NULL, }, /* x14 */
+ { "a5", (long *)&DDB_REGS->tf_a[5], FCN_NULL, }, /* x15 */
+ { "a6", (long *)&DDB_REGS->tf_a[6], FCN_NULL, }, /* x16 */
+ { "a7", (long *)&DDB_REGS->tf_a[7], FCN_NULL, }, /* x17 */
+ { "s2", (long *)&DDB_REGS->tf_s[2], FCN_NULL, }, /* x18 */
+ { "s3", (long *)&DDB_REGS->tf_s[3], FCN_NULL, }, /* x19 */
+ { "s4", (long *)&DDB_REGS->tf_s[4], FCN_NULL, }, /* x20 */
+ { "s5", (long *)&DDB_REGS->tf_s[5], FCN_NULL, }, /* x21 */
+ { "s6", (long *)&DDB_REGS->tf_s[6], FCN_NULL, }, /* x22 */
+ { "s7", (long *)&DDB_REGS->tf_s[7], FCN_NULL, }, /* x23 */
+ { "s8", (long *)&DDB_REGS->tf_s[8], FCN_NULL, }, /* x24 */
+ { "s9", (long *)&DDB_REGS->tf_s[9], FCN_NULL, }, /* x25 */
+ { "s10", (long *)&DDB_REGS->tf_s[10], FCN_NULL, }, /* x26 */
+ { "s11", (long *)&DDB_REGS->tf_s[11], FCN_NULL, }, /* x27 */
+ { "t3", (long *)&DDB_REGS->tf_t[3], FCN_NULL, }, /* x28 */
+ { "t4", (long *)&DDB_REGS->tf_t[4], FCN_NULL, }, /* x29 */
+ { "t5", (long *)&DDB_REGS->tf_t[5], FCN_NULL, }, /* x30 */
+ { "t6", (long *)&DDB_REGS->tf_t[6], FCN_NULL, } /* x31 */
+};
+
+extern label_t *db_recover;
+
+struct db_variable * db_eregs = db_regs + nitems(db_regs);
+
+#ifdef DDB
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+int
+kdb_trap(int type, db_regs_t *regs)
+{
+ int s;
+
+ switch (type) {
+ case T_BREAKPOINT: /* breakpoint */
+ case -1: /* keyboard interrupt */
+ break;
+ default:
+ if (db_recover != 0) {
+ db_error("Faulted in DDB; continuing...\n");
+ /* NOTREACHED */
+ }
+ }
+
+ ddb_regs = *regs;
+
+ s = splhigh();
+ db_active++;
+ cnpollc(1);
+ db_trap(type, 0/*code*/);
+ cnpollc(0);
+ db_active--;
+ splx(s);
+
+ *regs = ddb_regs;
+
+ return (1);
+}
+#endif
+
+#define INKERNEL(va) (((vaddr_t)(va)) & (1ULL << 63))
+
+static int db_validate_address(vaddr_t addr);
+
+static int
+db_validate_address(vaddr_t addr)
+{
+ struct proc *p = curproc;
+ struct pmap *pmap;
+
+ if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap ||
+ INKERNEL(addr))
+ pmap = pmap_kernel();
+ else
+ pmap = p->p_vmspace->vm_map.pmap;
+
+ return (pmap_extract(pmap, addr, NULL) == FALSE);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+void
+db_read_bytes(db_addr_t addr, size_t size, char *data)
+{
+ char *src = (char *)addr;
+
+ if (db_validate_address((vaddr_t)src)) {
+ db_printf("address %p is invalid\n", src);
+ return;
+ }
+
+ if (size == 8 && (addr & 7) == 0 && ((vaddr_t)data & 7) == 0) {
+ *((uint64_t*)data) = *((uint64_t*)src);
+ return;
+ }
+
+ if (size == 4 && (addr & 3) == 0 && ((vaddr_t)data & 3) == 0) {
+ *((int*)data) = *((int*)src);
+ return;
+ }
+
+ if (size == 2 && (addr & 1) == 0 && ((vaddr_t)data & 1) == 0) {
+ *((short*)data) = *((short*)src);
+ return;
+ }
+
+ while (size-- > 0) {
+ if (db_validate_address((vaddr_t)src)) {
+ db_printf("address %p is invalid\n", src);
+ return;
+ }
+ *data++ = *src++;
+ }
+}
+
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(db_addr_t addr, size_t size, char *data)
+{
+ // XXX
+}
+
+void
+db_enter(void)
+{
+ asm("ebreak");
+}
+
+struct db_command db_machine_command_table[] = {
+ { NULL, NULL, 0, NULL }
+};
+
+int
+db_trapper(vaddr_t addr, u_int inst, trapframe_t *frame, int fault_code)
+{
+
+ if (fault_code == EXCP_BREAKPOINT) {
+ kdb_trap(T_BREAKPOINT, frame);
+ frame->tf_sepc += 4;
+ } else
+ kdb_trap(-1, frame);
+
+ return (0);
+}
+
+
+extern vaddr_t esym;
+extern vaddr_t end;
+
+void
+db_machine_init(void)
+{
+ db_machine_commands_install(db_machine_command_table);
+}
+
+db_addr_t
+db_branch_taken(u_int insn, db_addr_t pc, db_regs_t *db_regs)
+{
+ // XXX
+ return pc + 4;
+}
--- /dev/null
+/*
+ * Copyright (c) 2000, 2001 Ben Harris
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+
+#include <sys/proc.h>
+#include <sys/stacktrace.h>
+#include <sys/user.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_interface.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+
+db_regs_t ddb_regs;
+
+#define INKERNEL(va) (((vaddr_t)(va)) & (1ULL << 63))
+
+void
+db_stack_trace_print(db_expr_t addr, int have_addr, db_expr_t count,
+ char *modif, int (*pr)(const char *, ...))
+{
+ u_int64_t frame, lastframe, ra, lastra, sp;
+ char c, *cp = modif;
+ db_expr_t offset;
+ Elf_Sym * sym;
+ char *name;
+ int kernel_only = 1;
+
+ while ((c = *cp++) != 0) {
+ if (c == 'u')
+ kernel_only = 0;
+ if (c == 't') {
+ db_printf("tracing threads not yet supported\n");
+ return;
+ }
+ }
+
+ if (!have_addr) {
+ sp = ddb_regs.tf_sp;
+ ra = ddb_regs.tf_ra;
+ lastra = ddb_regs.tf_ra;
+ frame = ddb_regs.tf_s[0];
+ } else {
+ sp = addr;
+ db_read_bytes(sp - 16, sizeof(vaddr_t), (char *)&frame);
+ db_read_bytes(sp - 8, sizeof(vaddr_t), (char *)&ra);
+ lastra = 0;
+ }
+
+ while (count-- && frame != 0) {
+ lastframe = frame;
+
+ sym = db_search_symbol(lastra, DB_STGY_ANY, &offset);
+ db_symbol_values(sym, &name, NULL);
+
+ if (name == NULL || strcmp(name, "end") == 0) {
+ (*pr)("%llx at 0x%lx", lastra, ra - 4);
+ } else {
+ (*pr)("%s() at ", name);
+ db_printsym(ra - 4, DB_STGY_PROC, pr);
+ }
+ (*pr)("\n");
+
+ // can we detect traps ?
+ db_read_bytes(frame - 16, sizeof(vaddr_t), (char *)&frame);
+ if (frame == 0)
+ break;
+ lastra = ra;
+ db_read_bytes(frame - 8, sizeof(vaddr_t), (char *)&ra);
+
+#if 0
+ if (name != NULL) {
+ if ((strcmp (name, "handle_el0_irq") == 0) ||
+ (strcmp (name, "handle_el1_irq") == 0)) {
+ (*pr)("--- interrupt ---\n");
+ } else if (
+ (strcmp (name, "handle_el0_sync") == 0) ||
+ (strcmp (name, "handle_el1_sync") == 0)) {
+ (*pr)("--- trap ---\n");
+ }
+ }
+#endif
+ if (INKERNEL(frame)) {
+ if (frame <= lastframe) {
+ (*pr)("Bad frame pointer: 0x%lx\n", frame);
+ break;
+ }
+ } else {
+ if (kernel_only)
+ break;
+ }
+
+ --count;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1996 Theo de Raadt
+ * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/buf.h>
+#include <sys/disklabel.h>
+#include <sys/disk.h>
+
+/*
+ * Attempt to read a disk label from a device
+ * using the indicated strategy routine.
+ * The label must be partly set up before this:
+ * secpercyl, secsize and anything required for a block i/o read
+ * operation in the driver's strategy/start routines
+ * must be filled in before calling us.
+ *
+ * If dos partition table requested, attempt to load it and
+ * find disklabel inside a DOS partition.
+ *
+ * We would like to check if each MBR has a valid DOSMBR_SIGNATURE, but
+ * we cannot because it doesn't always exist. So.. we assume the
+ * MBR is valid.
+ */
+int
+readdisklabel(dev_t dev, void (*strat)(struct buf *),
+ struct disklabel *lp, int spoofonly)
+{
+ struct buf *bp = NULL;
+ int error;
+
+ if ((error = initdisklabel(lp)))
+ goto done;
+
+ /* get a buffer and initialize it */
+ bp = geteblk(lp->d_secsize);
+ bp->b_dev = dev;
+
+ error = readdoslabel(bp, strat, lp, NULL, spoofonly);
+ if (error == 0)
+ goto done;
+
+#if defined(CD9660)
+ error = iso_disklabelspoof(dev, strat, lp);
+ if (error == 0)
+ goto done;
+#endif
+#if defined(UDF)
+ error = udf_disklabelspoof(dev, strat, lp);
+ if (error == 0)
+ goto done;
+#endif
+
+done:
+ if (bp) {
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ }
+ disk_change = 1;
+ return (error);
+}
+
+/*
+ * Write disk label back to device after modification.
+ */
+int
+writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp)
+{
+ daddr_t partoff = -1;
+ int error = EIO;
+ int offset;
+ struct disklabel *dlp;
+ struct buf *bp = NULL;
+
+ /* get a buffer and initialize it */
+ bp = geteblk(lp->d_secsize);
+ bp->b_dev = dev;
+
+ if (readdoslabel(bp, strat, lp, &partoff, 1) != 0)
+ goto done;
+
+ /* Read it in, slap the new label in, and write it back out */
+ error = readdisksector(bp, strat, lp, DL_BLKTOSEC(lp, partoff +
+ DOS_LABELSECTOR));
+ if (error)
+ goto done;
+ offset = DL_BLKOFFSET(lp, partoff + DOS_LABELSECTOR);
+
+ dlp = (struct disklabel *)(bp->b_data + offset);
+ *dlp = *lp;
+ CLR(bp->b_flags, B_READ | B_WRITE | B_DONE);
+ SET(bp->b_flags, B_BUSY | B_WRITE | B_RAW);
+ (*strat)(bp);
+ error = biowait(bp);
+
+done:
+ if (bp) {
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ }
+ disk_change = 1;
+ return (error);
+}
--- /dev/null
+/*
+ * Copyright (c) 2020 Dale Rahn <drahn@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include "machine/asm.h"
+
+void fpu_clear(struct fpreg *fp)
+{
+ /* rounding mode set to 0, should be RND_NEAREST */
+ bzero(fp, sizeof (*fp));
+}
+
+// may look into optimizing this, bit map lookup ?
+
+int fpu_valid_opcode(uint32_t instr)
+{
+ int opcode = instr & 0x7f;
+ int valid = 0;
+
+ if ((opcode & 0x3) == 0x3) {
+ /* 32 bit instruction */
+ switch(opcode) {
+ case 0x07: // LOAD-FP
+ case 0x27: // STORE-FP
+ case 0x53: // OP-FP
+ valid = 1;
+ break;
+ default:
+ ;
+ }
+ } else {
+ /* 16 bit instruction */
+ int opcode16 = instr & 0xe003;
+ switch (opcode16) {
+ case 0x1000: // C.FLD
+ case 0xa000: // C.SLD
+ valid = 1;
+ break;
+ case 0x2002: // C.FLDSP
+ // must verify dest register is float
+ valid = opcode16 & (1 << 11);
+ case 0xa002: // C.FSDSP
+ // must verify dest register is float
+ valid = opcode16 & (1 << 6);
+ break;
+ default:
+ ;
+ }
+ }
+ //printf("FPU check requested %d\n", valid);
+ return valid;
+}
+
+void
+fpu_discard(struct proc *p)
+{
+ if (p->p_addr->u_pcb.pcb_fpcpu == curcpu())
+ curcpu()->ci_fpuproc = NULL;
+ p->p_addr->u_pcb.pcb_fpcpu = NULL;
+}
+
+void
+fpu_disable()
+{
+ __asm volatile ("csrc sstatus, %0" :: "r"(SSTATUS_FS_MASK));
+}
+
+void
+fpu_enable_clean()
+{
+ __asm volatile ("csrc sstatus, %0" :: "r"(SSTATUS_FS_MASK));
+ __asm volatile ("csrs sstatus, %0" :: "r"(SSTATUS_FS_CLEAN));
+}
+
+void
+fpu_save(struct proc *p, struct trapframe *frame)
+{
+ struct cpu_info *ci = curcpu();
+ struct pcb *pcb = &p->p_addr->u_pcb;
+ struct fpreg *fp = &p->p_addr->u_pcb.pcb_fpstate;
+ register void *ptr = fp->fp_f;
+ uint64_t fcsr;
+
+ if (ci->ci_fpuproc != p) {
+ return;
+ }
+
+ if (pcb->pcb_fpcpu == NULL || ci->ci_fpuproc == NULL ||
+ !(pcb->pcb_fpcpu == ci && ci->ci_fpuproc == p)) {
+ /* disable fpu */
+ panic("FPU enabled but curproc and curcpu do not agree %p %p",
+ pcb->pcb_fpcpu, ci->ci_fpuproc);
+ }
+
+
+ switch (p->p_addr->u_pcb.pcb_tf->tf_sstatus & SSTATUS_FS_MASK) {
+ case SSTATUS_FS_OFF:
+ /* Fallthru */
+ case SSTATUS_FS_CLEAN:
+ p->p_addr->u_pcb.pcb_tf->tf_sstatus &= ~SSTATUS_FS_MASK;
+ return;
+ case SSTATUS_FS_DIRTY:
+ default:
+ ;
+ /* fallthru */
+ }
+
+ __asm volatile("frcsr %0" : "=r"(fcsr));
+
+ fp->fp_fcsr = fcsr;
+ #define STFx(x) \
+ __asm volatile ("fsd f" __STRING(x) ", %1(%0)": :"r"(ptr), "i"(x * 8))
+
+ STFx(0);
+ STFx(1);
+ STFx(2);
+ STFx(3);
+ STFx(4);
+ STFx(5);
+ STFx(6);
+ STFx(7);
+ STFx(8);
+ STFx(9);
+ STFx(10);
+ STFx(11);
+ STFx(12);
+ STFx(13);
+ STFx(14);
+ STFx(15);
+ STFx(16);
+ STFx(17);
+ STFx(18);
+ STFx(19);
+ STFx(20);
+ STFx(21);
+ STFx(22);
+ STFx(23);
+ STFx(24);
+ STFx(25);
+ STFx(26);
+ STFx(27);
+ STFx(28);
+ STFx(29);
+ STFx(30);
+ STFx(31);
+
+ /*
+ * pcb->pcb_fpcpu and ci->ci_fpuproc are still valid
+ * until some other fpu context steals either the cpu
+ * context or another cpu steals the fpu context.
+ */
+
+ p->p_addr->u_pcb.pcb_tf->tf_sstatus &= ~SSTATUS_FS_MASK;
+ void fpu_enable_disable();
+}
+
+void
+fpu_load(struct proc *p)
+{
+ struct cpu_info *ci = curcpu();
+ struct pcb *pcb = &p->p_addr->u_pcb;
+
+ struct fpreg *fp = &p->p_addr->u_pcb.pcb_fpstate;
+ register void *ptr = fp->fp_f;
+
+ /*
+ * Verify that context is not already loaded
+ */
+ if (pcb->pcb_fpcpu == ci && ci->ci_fpuproc == p) {
+ return;
+ }
+ //printf("FPU load requested %p %p \n", ci, p);
+
+ if ((pcb->pcb_flags & PCB_FPU) == 0) {
+ fpu_clear(fp);
+ pcb->pcb_flags |= PCB_FPU;
+ }
+ fpu_enable_clean();
+
+ __asm volatile("fscsr %0" : : "r"(fp->fp_fcsr));
+ #define RDFx(x) \
+ __asm volatile ("fld f" __STRING(x) ", %1(%0)": :"r"(ptr), "i"(x * 8))
+
+ RDFx(0);
+ RDFx(1);
+ RDFx(2);
+ RDFx(3);
+ RDFx(4);
+ RDFx(5);
+ RDFx(6);
+ RDFx(7);
+ RDFx(8);
+ RDFx(9);
+ RDFx(10);
+ RDFx(11);
+ RDFx(12);
+ RDFx(13);
+ RDFx(14);
+ RDFx(15);
+ RDFx(16);
+ RDFx(17);
+ RDFx(18);
+ RDFx(19);
+ RDFx(20);
+ RDFx(21);
+ RDFx(22);
+ RDFx(23);
+ RDFx(24);
+ RDFx(25);
+ RDFx(26);
+ RDFx(27);
+ RDFx(28);
+ RDFx(29);
+ RDFx(30);
+ RDFx(31);
+
+ /*
+ * pcb->pcb_fpcpu and ci->ci_fpuproc are activated here
+ * to indicate that the fpu context is correctly loaded on
+ * this cpu. XXX block interupts for these saves ?
+ */
+ pcb->pcb_fpcpu = ci;
+ ci->ci_fpuproc = p;
+
+ void fpu_enable_disable();
+}
--- /dev/null
+# Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+# All rights reserved.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+include <sys/param.h>
+include <sys/proc.h>
+include <sys/systm.h>
+include <sys/mbuf.h>
+include <sys/resourcevar.h>
+include <sys/device.h>
+include <sys/user.h>
+include <sys/signal.h>
+include <sys/mbuf.h>
+include <sys/socketvar.h>
+include <netinet/in.h>
+include <netinet/ip.h>
+
+include <machine/frame.h>
+include <machine/pcb.h>
+include <machine/cpu.h>
+include <machine/param.h>
+include <machine/bootconfig.h>
+
+struct trapframe
+member tf_ra
+member tf_sp
+member tf_gp
+member tf_tp
+member tf_t
+member tf_s
+member tf_a
+member tf_sepc
+member tf_sstatus
+member tf_stval
+member tf_scause
+
+struct switchframe
+member sf_s
+member sf_ra
+
+export IPL_NONE
+
+struct proc
+member p_stat
+member p_cpu
+member p_addr
+member p_astpending p_md.md_astpending
+
+export SONPROC
+
+struct sigframe
+member sf_signum
+member sf_sc
+member sf_si
+
+struct pcb
+member pcb_flags
+member pcb_tf
+member pcb_sp
+member pcb_onfault
+member pcb_fpstate
+member pcb_fpcpu
+
+struct cpu_info
+member ci_dev
+member ci_next
+member ci_schedstate
+member ci_cpuid
+member ci_self
+member ci_curproc
+member ci_curpm
+member ci_randseed
+member ci_curpcb
+member ci_idle_pcb
+member ci_ipending
+member ci_idepth
+ifdef DIAGNOSTIC
+member ci_mutex_level
+endif
+member ci_want_resched
+ifdef MULTIPROCESSOR
+member ci_srp_hazards
+member ci_flags
+member ci_ddb_paused
+endif
+ifdef GPROF
+member ci_gmon
+endif
+
+struct riscv_bootparams
+member kern_l1pt
+member kern_delta
+member kern_stack
+member dtbp_virt
+member dtbp_phys
+
+
--- /dev/null
+/*
+ * Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/timetc.h>
+#include <sys/malloc.h>
+
+#include <dev/clock_subr.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+#include <machine/frame.h>
+
+#include <dev/ofw/openfirm.h>
+
+uint32_t riscv_intr_get_parent(int);
+
+void *riscv_intr_prereg_establish_fdt(void *, int *, int, int (*)(void *),
+ void *, char *);
+void riscv_intr_prereg_disestablish_fdt(void *);
+
+int riscv_dflt_splraise(int);
+int riscv_dflt_spllower(int);
+void riscv_dflt_splx(int);
+void riscv_dflt_setipl(int);
+
+void riscv_dflt_intr(void *);
+void riscv_cpu_intr(void *);
+
+#define SI_TO_IRQBIT(x) (1 << (x))
+uint32_t riscv_smask[NIPL];
+
+struct riscv_intr_func riscv_intr_func = {
+ riscv_dflt_splraise,
+ riscv_dflt_spllower,
+ riscv_dflt_splx,
+ riscv_dflt_setipl
+};
+
+void (*riscv_intr_dispatch)(void *) = riscv_dflt_intr;
+
+void
+riscv_cpu_intr(void *frame)
+{
+ struct cpu_info *ci = curcpu();
+
+ ci->ci_idepth++;
+ riscv_intr_dispatch(frame);
+ ci->ci_idepth--;
+}
+
+void
+riscv_dflt_intr(void *frame)
+{
+ panic("riscv_dflt_intr() called");
+}
+
+/*
+ * Find the interrupt parent by walking up the tree.
+ */
+uint32_t
+riscv_intr_get_parent(int node)
+{
+ uint32_t phandle = 0;
+
+ while (node && !phandle) {
+ phandle = OF_getpropint(node, "interrupt-parent", 0);
+ node = OF_parent(node);
+ }
+
+ return phandle;
+}
+
+/*
+ * Interrupt pre-registration.
+ *
+ * To allow device drivers to establish interrupt handlers before all
+ * relevant interrupt controllers have been attached, we support
+ * pre-registration of interrupt handlers. For each node in the
+ * device tree that has an "interrupt-controller" property, we
+ * register a dummy interrupt controller that simply stashes away all
+ * relevant details of the interrupt handler being established.
+ * Later, when the real interrupt controller registers itself, we
+ * establush those interrupt handlers based on that information.
+ */
+
+#define MAX_INTERRUPT_CELLS 4
+
+struct intr_prereg {
+ LIST_ENTRY(intr_prereg) ip_list;
+ uint32_t ip_phandle;
+ uint32_t ip_cell[MAX_INTERRUPT_CELLS];
+
+ int ip_level;
+ int (*ip_func)(void *);
+ void *ip_arg;
+ char *ip_name;
+
+ struct interrupt_controller *ip_ic;
+ void *ip_ih;
+};
+
+LIST_HEAD(, intr_prereg) prereg_interrupts =
+ LIST_HEAD_INITIALIZER(prereg_interrupts);
+
+void *
+riscv_intr_prereg_establish_fdt(void *cookie, int *cell, int level,
+ int (*func)(void *), void *arg, char *name)
+{
+ struct interrupt_controller *ic = cookie;
+ struct intr_prereg *ip;
+ int i;
+
+ ip = malloc(sizeof(struct intr_prereg), M_DEVBUF, M_ZERO | M_WAITOK);
+ ip->ip_phandle = ic->ic_phandle;
+ for (i = 0; i < ic->ic_cells; i++)
+ ip->ip_cell[i] = cell[i];
+ ip->ip_level = level;
+ ip->ip_func = func;
+ ip->ip_arg = arg;
+ ip->ip_name = name;
+ LIST_INSERT_HEAD(&prereg_interrupts, ip, ip_list);
+
+ return ip;
+}
+
+void
+riscv_intr_prereg_disestablish_fdt(void *cookie)
+{
+ struct intr_prereg *ip = cookie;
+ struct interrupt_controller *ic = ip->ip_ic;
+
+ if (ip->ip_ic != NULL && ip->ip_ih != NULL)
+ ic->ic_disestablish(ip->ip_ih);
+
+ if (ip->ip_ic != NULL)
+ LIST_REMOVE(ip, ip_list);
+
+ free(ip, M_DEVBUF, sizeof(*ip));
+}
+
+void
+riscv_intr_init_fdt_recurse(int node)
+{
+ struct interrupt_controller *ic;
+
+ if (OF_getproplen(node, "interrupt-controller") >= 0) {
+ ic = malloc(sizeof(struct interrupt_controller),
+ M_DEVBUF, M_ZERO | M_WAITOK);
+ ic->ic_node = node;
+ ic->ic_cookie = ic;
+ ic->ic_establish = riscv_intr_prereg_establish_fdt;
+ ic->ic_disestablish = riscv_intr_prereg_disestablish_fdt;
+ riscv_intr_register_fdt(ic);
+ }
+
+ for (node = OF_child(node); node; node = OF_peer(node))
+ riscv_intr_init_fdt_recurse(node);
+}
+
+void
+riscv_intr_init_fdt(void)
+{
+ int node = OF_peer(0);
+
+ if (node)
+ riscv_intr_init_fdt_recurse(node);
+}
+
+LIST_HEAD(, interrupt_controller) interrupt_controllers =
+ LIST_HEAD_INITIALIZER(interrupt_controllers);
+
+void
+riscv_intr_register_fdt(struct interrupt_controller *ic)
+{
+ struct intr_prereg *ip, *tip;
+
+ ic->ic_cells = OF_getpropint(ic->ic_node, "#interrupt-cells", 0);
+ ic->ic_phandle = OF_getpropint(ic->ic_node, "phandle", 0);
+ if (ic->ic_phandle == 0)
+ return;
+ KASSERT(ic->ic_cells <= MAX_INTERRUPT_CELLS);
+
+ LIST_INSERT_HEAD(&interrupt_controllers, ic, ic_list);
+
+ /* Establish pre-registered interrupt handlers. */
+ LIST_FOREACH_SAFE(ip, &prereg_interrupts, ip_list, tip) {
+ if (ip->ip_phandle != ic->ic_phandle)
+ continue;
+
+ ip->ip_ic = ic;
+ if (ic->ic_establish)/* riscv_cpu_intc sets this to NULL */
+ {
+ ip->ip_ih = ic->ic_establish(ic->ic_cookie, ip->ip_cell,
+ ip->ip_level, ip->ip_func, ip->ip_arg, ip->ip_name);
+ if (ip->ip_ih == NULL)
+ printf("can't establish interrupt %s\n", ip->ip_name);
+ }
+
+ LIST_REMOVE(ip, ip_list);
+ }
+}
+
+struct riscv_intr_handle {
+ struct interrupt_controller *ih_ic;
+ void *ih_ih;
+};
+
+void *
+riscv_intr_establish_fdt(int node, int level, int (*func)(void *),
+ void *cookie, char *name)
+{
+ return riscv_intr_establish_fdt_idx(node, 0, level, func, cookie, name);
+}
+
+void *
+riscv_intr_establish_fdt_idx(int node, int idx, int level, int (*func)(void *),
+ void *cookie, char *name)
+{
+ struct interrupt_controller *ic;
+ int i, len, ncells, extended = 1;
+ uint32_t *cell, *cells, phandle;
+ struct riscv_intr_handle *ih;
+ void *val = NULL;
+
+ len = OF_getproplen(node, "interrupts-extended");
+ if (len <= 0) {
+ len = OF_getproplen(node, "interrupts");
+ extended = 0;
+ }
+ if (len <= 0 || (len % sizeof(uint32_t) != 0))
+ return NULL;
+
+ /* Old style. */
+ if (!extended) {
+ phandle = riscv_intr_get_parent(node);
+ LIST_FOREACH(ic, &interrupt_controllers, ic_list) {
+ if (ic->ic_phandle == phandle)
+ break;
+ }
+
+ if (ic == NULL)
+ return NULL;
+ }
+
+ cell = cells = malloc(len, M_TEMP, M_WAITOK);
+ if (extended)
+ OF_getpropintarray(node, "interrupts-extended", cells, len);
+ else
+ OF_getpropintarray(node, "interrupts", cells, len);
+ ncells = len / sizeof(uint32_t);
+
+ for (i = 0; i <= idx && ncells > 0; i++) {
+ if (extended) {
+ phandle = cell[0];
+
+ LIST_FOREACH(ic, &interrupt_controllers, ic_list) {
+ if (ic->ic_phandle == phandle)
+ break;
+ }
+
+ if (ic == NULL)
+ break;
+
+ cell++;
+ ncells--;
+ }
+
+ if (i == idx && ncells >= ic->ic_cells && ic->ic_establish) {
+ val = ic->ic_establish(ic->ic_cookie, cell, level,
+ func, cookie, name);
+ break;
+ }
+
+ cell += ic->ic_cells;
+ ncells -= ic->ic_cells;
+ }
+
+ free(cells, M_TEMP, len);
+
+ if (val == NULL)
+ return NULL;
+
+ ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
+ ih->ih_ic = ic;
+ ih->ih_ih = val;
+
+ return ih;
+}
+
+void
+riscv_intr_disestablish_fdt(void *cookie)
+{
+ struct riscv_intr_handle *ih = cookie;
+ struct interrupt_controller *ic = ih->ih_ic;
+
+ ic->ic_disestablish(ih->ih_ih);
+ free(ih, M_DEVBUF, sizeof(*ih));
+}
+
+void
+riscv_intr_enable(void *cookie)
+{
+ struct riscv_intr_handle *ih = cookie;
+ struct interrupt_controller *ic = ih->ih_ic;
+
+ KASSERT(ic->ic_enable != NULL);
+ ic->ic_enable(ih->ih_ih);
+}
+
+void
+riscv_intr_disable(void *cookie)
+{
+ struct riscv_intr_handle *ih = cookie;
+ struct interrupt_controller *ic = ih->ih_ic;
+
+ KASSERT(ic->ic_disable != NULL);
+ ic->ic_disable(ih->ih_ih);
+}
+
+void
+riscv_intr_route(void *cookie, int enable, struct cpu_info *ci)
+{
+ struct riscv_intr_handle *ih = cookie;
+ struct interrupt_controller *ic = ih->ih_ic;
+
+ if (ic->ic_route)
+ ic->ic_route(ih->ih_ih, enable, ci);
+}
+
+void
+riscv_intr_cpu_enable(void)
+{
+ struct interrupt_controller *ic;
+
+ LIST_FOREACH(ic, &interrupt_controllers, ic_list)
+ if (ic->ic_cpu_enable)
+ ic->ic_cpu_enable();
+}
+
+int
+riscv_dflt_splraise(int newcpl)
+{
+ struct cpu_info *ci = curcpu();
+ int oldcpl;
+
+ oldcpl = ci->ci_cpl;
+
+ if (newcpl < oldcpl)
+ newcpl = oldcpl;
+
+ ci->ci_cpl = newcpl;
+
+ return oldcpl;
+}
+
+int
+riscv_dflt_spllower(int newcpl)
+{
+ struct cpu_info *ci = curcpu();
+ int oldcpl;
+
+ oldcpl = ci->ci_cpl;
+
+ splx(newcpl);
+
+ return oldcpl;
+}
+
+void
+riscv_dflt_splx(int newcpl)
+{
+ struct cpu_info *ci = curcpu();
+
+ if (ci->ci_ipending & riscv_smask[newcpl])
+ riscv_do_pending_intr(newcpl);
+ ci->ci_cpl = newcpl;
+}
+
+void
+riscv_dflt_setipl(int newcpl)
+{
+ struct cpu_info *ci = curcpu();
+
+ ci->ci_cpl = newcpl;
+}
+
+void
+riscv_do_pending_intr(int pcpl)
+{
+ struct cpu_info *ci = curcpu();
+ int sie;
+
+ sie = disable_interrupts();
+
+#define DO_SOFTINT(si, ipl) \
+ if ((ci->ci_ipending & riscv_smask[pcpl]) & \
+ SI_TO_IRQBIT(si)) { \
+ ci->ci_ipending &= ~SI_TO_IRQBIT(si); \
+ riscv_intr_func.setipl(ipl); \
+ restore_interrupts(sie); \
+ softintr_dispatch(si); \
+ sie = disable_interrupts(); \
+ }
+
+ do {
+ DO_SOFTINT(SIR_TTY, IPL_SOFTTTY);
+ DO_SOFTINT(SIR_NET, IPL_SOFTNET);
+ DO_SOFTINT(SIR_CLOCK, IPL_SOFTCLOCK);
+ DO_SOFTINT(SIR_SOFT, IPL_SOFT);
+ } while (ci->ci_ipending & riscv_smask[pcpl]);
+
+ /* Don't use splx... we are here already! */
+ riscv_intr_func.setipl(pcpl);
+ restore_interrupts(sie);
+}
+
+void riscv_set_intr_func(int (*raise)(int), int (*lower)(int),
+ void (*x)(int), void (*setipl)(int))
+{
+ riscv_intr_func.raise = raise;
+ riscv_intr_func.lower = lower;
+ riscv_intr_func.x = x;
+ riscv_intr_func.setipl = setipl;
+}
+
+void riscv_set_intr_handler(void (*intr_handle)(void *))
+{
+ riscv_intr_dispatch = intr_handle;
+}
+
+void
+riscv_init_smask(void)
+{
+ static int inited = 0;
+ int i;
+
+ if (inited)
+ return;
+ inited = 1;
+
+ for (i = IPL_NONE; i <= IPL_HIGH; i++) {
+ riscv_smask[i] = 0;
+ if (i < IPL_SOFT)
+ riscv_smask[i] |= SI_TO_IRQBIT(SIR_SOFT);
+ if (i < IPL_SOFTCLOCK)
+ riscv_smask[i] |= SI_TO_IRQBIT(SIR_CLOCK);
+ if (i < IPL_SOFTNET)
+ riscv_smask[i] |= SI_TO_IRQBIT(SIR_NET);
+ if (i < IPL_SOFTTTY)
+ riscv_smask[i] |= SI_TO_IRQBIT(SIR_TTY);
+ }
+}
+
+/* provide functions for asm */
+#undef splraise
+#undef spllower
+#undef splx
+
+int
+splraise(int ipl)
+{
+ return riscv_intr_func.raise(ipl);
+}
+
+int _spllower(int ipl); /* XXX - called from asm? */
+int
+_spllower(int ipl)
+{
+ return riscv_intr_func.lower(ipl);
+}
+int
+spllower(int ipl)
+{
+ return riscv_intr_func.lower(ipl);
+}
+
+void
+splx(int ipl)
+{
+ riscv_intr_func.x(ipl);
+}
+
+
+#ifdef DIAGNOSTIC
+void
+riscv_splassert_check(int wantipl, const char *func)
+{
+ int oldipl = curcpu()->ci_cpl;
+
+ if (oldipl < wantipl) {
+ splassert_fail(wantipl, oldipl, func);
+ /*
+ * If the splassert_ctl is set to not panic, raise the ipl
+ * in a feeble attempt to reduce damage.
+ */
+ riscv_intr_func.setipl(wantipl);
+ }
+
+ if (wantipl == IPL_NONE && curcpu()->ci_idepth != 0) {
+ splassert_fail(-1, curcpu()->ci_idepth, func);
+ }
+}
+#endif
+
+/*
+ ********* timer interrupt relevant **************
+ */
+
+void riscv_dflt_delay(u_int usecs);
+
+struct {
+ void (*delay)(u_int);
+ void (*initclocks)(void);
+ void (*setstatclockrate)(int);
+ void (*mpstartclock)(void);
+} riscv_clock_func = {
+ riscv_dflt_delay,
+ NULL,
+ NULL,
+ NULL
+};
+
+void
+riscv_clock_register(void (*initclock)(void), void (*delay)(u_int),
+ void (*statclock)(int), void(*mpstartclock)(void))
+{
+ if (riscv_clock_func.initclocks)
+ return;
+
+ riscv_clock_func.initclocks = initclock;
+ riscv_clock_func.delay = delay;
+ riscv_clock_func.setstatclockrate = statclock;
+ riscv_clock_func.mpstartclock = mpstartclock;
+}
+
+void
+delay(u_int usec)
+{
+ riscv_clock_func.delay(usec);
+}
+
+void
+cpu_initclocks(void)
+{
+ if (riscv_clock_func.initclocks == NULL)
+ panic("initclocks function not initialized yet");
+
+ riscv_clock_func.initclocks();
+}
+
+void
+cpu_startclock(void)
+{
+ if (riscv_clock_func.mpstartclock == NULL)
+ panic("startclock function not initialized yet");
+
+ riscv_clock_func.mpstartclock();
+}
+
+void
+riscv_dflt_delay(u_int usecs)
+{
+ int j;
+ /* BAH - there is no good way to make this close */
+ /* but this isn't supposed to be used after the real clock attaches */
+ for (; usecs > 0; usecs--)
+ for (j = 100; j > 0; j--)
+ ;
+}
+
+void
+setstatclockrate(int new)
+{
+ if (riscv_clock_func.setstatclockrate == NULL) {
+ panic("riscv_clock_func.setstatclockrate not intialized");
+ }
+ riscv_clock_func.setstatclockrate(new);
+}
+
+void
+intr_barrier(void *ih)
+{
+ sched_barrier(NULL);
+}
+
+/*
+ * IPI implementation
+ */
+
+void riscv_no_send_ipi(struct cpu_info *ci, int id);
+void (*intr_send_ipi_func)(struct cpu_info *, int) = riscv_no_send_ipi;
+
+void
+riscv_send_ipi(struct cpu_info *ci, int id)
+{
+ (*intr_send_ipi_func)(ci, id);
+}
+
+void
+riscv_no_send_ipi(struct cpu_info *ci, int id)
+{
+ panic("riscv_send_ipi() called: no ipi function");
+}
--- /dev/null
+/* $OpenBSD: locore.S,v 1.1 2021/04/23 02:42:17 drahn Exp $ */
+/*-
+ * Copyright (c) 2012-2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: head/sys/arm64/arm64/locore.S 282867 2015-05-13 18:57:03Z zbb $
+ */
+
+#include <assym.h>
+#include <sys/syscall.h>
+#include <machine/asm.h>
+#include <machine/param.h>
+#include <machine/vmparam.h>
+#include <machine/trap.h>
+#include <machine/riscvreg.h>
+#include <machine/pte.h>
+
+
+ .globl kernbase
+ .set kernbase, KERNBASE
+
+ /* Trap entries */
+ .text
+
+ /* Reset vector */
+ .text
+ .globl _start_kern_bootstrap
+_start_kern_bootstrap:
+
+ /* Set the global pointer */
+.option push
+.option norelax
+ lla gp, __global_pointer$
+.option pop
+
+ /* Get the physical address kernel loaded to */
+ lla t0, virt_map //virt_map is a phy addr where its own va is stored.
+ ld t1, 0(t0)
+ sub t1, t1, t0 //t1 = t1 - t0 = va - pa, va<-->pa offset
+ li t2, KERNBASE //KERNBASE is virtual addr
+ sub s9, t2, t1 //s9 = physmem base of kernel
+
+ //registers passed by bbl.
+ /*
+ * a0 = hart id
+ * a1 = dtbp
+ */
+
+ bnez a1, 1f
+ // no dtb, we assume we were booted via efiboot
+ la t0, esym
+ add a3, a0, t1
+ sd a3, 0(t0)
+ li a0, 0
+ mv a1, a2
+
+1:
+ /* Pick a hart to run the boot process. */
+ lla t0, hart_lottery
+ li t1, 1
+ //atomic memory operation, read-modify-write:
+ //only the first hart can read 0 and modify it to 1,
+ //all other harts will read 1.
+ amoadd.w t2, t1, (t0)
+
+ /*
+ * We must jump to mpentry in the non-BSP case because the offset is
+ * too large to fit in a 12-bit branch immediate.
+ */
+ beqz t2, 1f
+ j mpentry
+
+1: //only one hart(which just won the lottery) runs the main boot procedure.
+ /*
+ * Page tables
+ */
+
+ /* step0) Identity map kernel @ 0x80000000 */
+ /* Build PTE for 1 GiB identity-mapped gigapage */
+ lla s1, pagetable_l1 //phy addr
+ mv s2, s9 //phy addr
+ li t0, 0xffffffffc0000000
+ and s2, s2, t0
+ srli s2, s2, PAGE_SHIFT //12, 4K page
+ slli t5, s2, PTE_PPN0_S //10bit Phys Mem Attribute
+ li t4, (PTE_KERN | PTE_X) //flag bit
+ or t6, t4, t5 //t6 now is PTE for a 1 GiB gigapage
+
+ /* Calculate VPN[2] for 1 GiB identity-mapped gigapage */
+ mv a5, s9 //va -- identity mapped
+ srli a5, a5, L1_SHIFT //30, remaining va[63:30]
+ andi a5, a5, 0x1ff //only use va[38:30] as VPN[2], =0x002
+
+ /* Store L1 PTE entry */
+ li a6, PTE_SIZE
+ mulw a5, a5, a6
+ add t0, s1, a5
+ mv s8, t0 // Store address in L1 Page Table to unmap later
+ sd t6, (t0)
+
+ /* step1) Add L1 entry for kernel */
+
+ //calc PTE based on pa
+ lla s1, pagetable_l1 //phy addr
+ lla s2, pagetable_l2
+ srli s2, s2, PAGE_SHIFT //12, 4K page
+ slli t5, s2, PTE_PPN0_S //10bit Phys Mem Attribute
+ li t4, PTE_V //PTE valid
+ or t6, t4, t5 //t6 now is the PTE for a level 2 page table
+
+ //calc VPN[2] based on va
+ li a5, KERNBASE //va
+ srli a5, a5, L1_SHIFT //30
+ andi a5, a5, 0x1ff //va[38:30] as VPN[2],==0x100
+
+ /* Store L1 PTE entry */
+ li a6, PTE_SIZE //8 Bytes
+ mulw a5, a5, a6 //distance in unit of bytes
+ add t0, s1, a5 //s1 is L1 table base pa
+ sd t6, (t0) //PTE of a l2 page table is populated to l1 page table
+
+
+ /* step2) Level 2 superpages (512 x 2MiB) */ //mega pages: two-level page table
+ lla s1, pagetable_l2
+ //calc PTE
+ srli t4, s9, L2_SHIFT /* Div physmem base by 2 MiB */
+ li t2, 512 /* Build totally 512 entries */
+ add t3, t4, t2
+ li t5, 0
+2:
+ li t0, (PTE_KERN | PTE_X)
+ slli t2, t4, PTE_PPN1_S //19
+ or t5, t0, t2 //PTE contructed
+ sd t5, (s1) /* Store PTE entry to position */
+
+ //iterating
+ addi s1, s1, PTE_SIZE
+ addi t4, t4, 1
+ bltu t4, t3, 2b //512 entries
+
+
+ /* step3) Create an L1 entry for early devmap */
+ lla s1, pagetable_l1 //pa
+
+ //calculate PTE based on pa: (PPN) + privilege/permission + ...
+ lla s2, pagetable_l2_devmap /* Link to next level PN */ //pa
+ srli s2, s2, PAGE_SHIFT //12, --> PPN
+ slli t5, s2, PTE_PPN0_S //10, PMA
+ li t4, PTE_V
+ or t6, t4, t5 //PTE contructed
+
+ //calculate VPN[2] index based on va
+ li a5, (VM_MAX_KERNEL_ADDRESS - L2_SIZE)//devmap is at kernel mem top, va
+ srli a5, a5, L1_SHIFT //30
+ andi a5, a5, 0x1ff //index using va[38:30]
+
+ /* Store single level1 PTE entry to position */
+ li a6, PTE_SIZE
+ mulw a5, a5, a6 //offset in Bytes
+ add t0, s1, a5 //find the physical add to write
+ sd t6, (t0)
+
+
+ /* step4) Create ONE L2 superpage 2MB for DTB */
+ lla s1, pagetable_l2_devmap
+
+ //calc PTE based on pa
+ mv s2, a1 //passed by bbl
+ li t0, 0xffffffffffe00000
+ and s2, s2, t0
+ srli s2, s2, PAGE_SHIFT //12
+ slli t2, s2, PTE_PPN0_S //10
+ li t0, (PTE_KERN)
+ or t0, t0, t2 //PTE contructed
+
+ /* Store PTE entry to position */
+ li a6, PTE_SIZE
+ li a5, 510
+ mulw a5, a5, a6
+ add t1, s1, a5
+ sd t0, (t1)
+
+ /* Page tables END */
+
+
+ /* Calculate virtual address of the first instruction after enable paging */
+ lla s3, va //va is a physical addr!
+ sub s3, s3, s9 //offset comparing to phymem base
+ li t0, KERNBASE //virt addr
+ add s3, s3, t0
+
+ /* Set page tables base register */
+ lla s2, pagetable_l1 //pa
+ srli s2, s2, PAGE_SHIFT //12, --> PPN
+ li t0, SATP_MODE_SV39 //satp[63:60] = 1000b, enable paging!
+ or s2, s2, t0
+ sfence.vma //Supervisor Fence for Virtual Memory, to flush TLB
+ csrw satp, s2
+ jr s3
+
+ .align 2
+va:
+ /* Set the global pointer again, this time with the virtual address. */
+.option push
+.option norelax
+ lla gp, __global_pointer$
+.option pop
+
+ /* Unmap the identity mapped kernel gigapage */
+ sd x0, (s8) // s8 is addr of pte for identity mapped kernel
+ sfence.vma // Flush the TLB. Goodbye identity mapped kernel!
+
+
+ /* Setup supervisor trap vector */
+ la t0, cpu_trap_handler
+ csrw stvec, t0
+
+ /* Ensure sscratch is zero */
+ li t0, 0
+ csrw sscratch, t0
+
+ /* Initialize stack pointer */
+ la s3, initstack_end
+ mv sp, s3
+
+ /* Allocate space for thread0 PCB and riscv_bootparams */
+ addi sp, sp, -(PCB_SIZEOF + RISCV_BOOTPARAMS_SIZEOF) & ~STACKALIGNBYTES
+
+ /* Clear BSS */
+ la s0, _C_LABEL(__bss_start)
+ la s1, _C_LABEL(_end)
+1:
+ sd zero, 0(s0)
+ addi s0, s0, 8
+ bltu s0, s1, 1b
+
+ /* Store boot hart id. */
+ la t0, boot_hart //the hart we booted on.
+ sw a0, 0(t0) //all above logic runs on this a0 hart.
+
+ /* Fill riscv_bootparams */
+ addi sp, sp, -RISCV_BOOTPARAMS_SIZEOF
+
+ la t0, pagetable_l1
+ sd t0, KERN_L1PT(sp)
+
+ li t0, KERNBASE
+ sub t0, s9, t0 //offset: PA - VA, used in pmap_*
+ sd t0, KERN_DELTA(sp)
+
+ la t0, initstack
+ sd t0, KERN_STACK(sp)
+
+ li t0, (VM_MAX_KERNEL_ADDRESS - 2 * L2_SIZE) // XXX Why 2?
+ li t1, 0x1fffff
+ and t1, a1, t1
+ add t0, t0, t1
+ sd t0, DTBP_VIRT(sp)
+ sd a1, DTBP_PHYS(sp)
+
+ mv a0, sp //stack setup, can call C now!
+ call _C_LABEL(initriscv) // Off we go, defined in machdep.c
+ call _C_LABEL(main) //defined in openbsd/kern/init_main.c
+
+ .data
+ .align 4
+initstack:
+ .space (PAGE_SIZE * KSTACK_PAGES)
+initstack_end:
+
+ .globl sigfill
+sigfill:
+ unimp
+esigfill:
+ .globl sigfillsiz
+sigfillsiz:
+ .data
+ .quad esigfill - sigfill
+
+ .text
+ENTRY(sigcode)
+ mv a0, sp
+ addi a0, a0, SF_SC //actual saved context
+
+1:
+ li t0, SYS_sigreturn
+ ecall //make a syscall from lower privilege to higher
+
+ .globl _C_LABEL(sigcoderet)
+_C_LABEL(sigcoderet):
+ nop
+ nop
+
+ /* sigreturn failed, exit */
+ li t0, SYS_exit
+ ecall
+ nop
+ nop
+
+ j 1b
+END(sigcode)
+ /* This may be copied to the stack, keep it 16-byte aligned */
+ .align 3
+ .globl _C_LABEL(esigcode)
+_C_LABEL(esigcode):
+
+
+ .data
+ .global _C_LABEL(esym)
+_C_LABEL(esym): .quad _C_LABEL(end)
+
+
+ .align 12
+pagetable_l1:
+ .space PAGE_SIZE
+ .globl pagetable_l2
+pagetable_l2:
+ .space PAGE_SIZE
+pagetable_l2_devmap:
+ .space PAGE_SIZE
+
+ .align 3
+virt_map:
+ .quad virt_map
+hart_lottery:
+ .space 4
+
+ .globl init_pt_va
+init_pt_va:
+ .quad pagetable_l2 /* XXX: Keep page tables VA */
+
+#ifndef MULTIPROCESSOR
+ENTRY(mpentry)
+1:
+ wfi
+ j 1b
+END(mpentry)
+#else
+/*
+ * mpentry(unsigned long)
+ *
+ * Called by a core / hart when it is being brought online.
+ * XXX: [CMPE] This needs to be updated
+ */
+ENTRY(mpentry)
+ /*
+ * Calculate the offset to __riscv_boot_ap
+ * for the current core, cpuid is in a0.
+ */
+ li t1, 4 //t1 = 4, each core occupies a word
+ mulw t1, t1, a0 //t1 = a0*4, offset in Bytes for #a0 core
+ /* Get the pointer */
+ lla t0, __riscv_boot_ap
+ add t0, t0, t1 //t0 = starting addr for current core
+
+1:
+ /* Wait the kernel to be ready */
+ lw t1, 0(t0) //when kernel is ready, 0(t0) should NOT euqal 0
+ beqz t1, 1b //see __riscv_boot_ap
+
+ /* Setup stack pointer */ //now kernel is ready
+ lla t0, secondary_stacks //pa, size: #core x #pages/kernel x pg_size
+ li t1, (PAGE_SIZE * KSTACK_PAGES) // size of kernel stack for one core
+ mulw t2, t1, a0 //offset for this hart
+ add t0, t0, t2 //end of stack for this hart
+ add t0, t0, t1 //start of stack for this hart
+ sub t0, t0, s9 //s9 is phymem base, t0 is now relative addr
+ li t1, KERNBASE //t1 is virtual addr
+ add sp, t0, t1 //now sp is set to the right virtual address.
+
+ /* Setup supervisor trap vector */
+ lla t0, mpva //mpva is phymem addr of the handler array
+ sub t0, t0, s9 //get the relative addr
+ li t1, KERNBASE
+ add t0, t0, t1 //get the virtual addr
+ csrw stvec, t0 //set the CSR
+
+ /* Set page tables base register */
+ lla s2, pagetable_l1
+ srli s2, s2, PAGE_SHIFT
+ li t0, SATP_MODE_SV39
+ or s2, s2, t0
+ sfence.vma
+ csrw satp, s2
+
+ .align 2
+mpva:
+ /* Set the global pointer again, this time with the virtual address. */
+.option push
+.option norelax
+ lla gp, __global_pointer$
+.option pop
+
+ /* Setup supervisor trap vector */
+ la t0, cpu_trap_handler
+ csrw stvec, t0
+
+ /* Ensure sscratch is zero */
+ li t0, 0
+ csrw sscratch, t0 //Scratch Register for Supervisor Mode Trap Handler
+
+ call init_secondary
+END(mpentry)
+#endif
+
+
--- /dev/null
+/* $OpenBSD: locore0.S,2019/10/31 mengshi.li.mars@gmail.com */
+/*-
+ * Copyright (c) 2012-2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: head/sys/arm64/arm64/locore.S 282867 2015-05-13 18:57:03Z zbb $
+ */
+
+
+#include <sys/syscall.h>
+#include <machine/asm.h>
+#include <machine/param.h>
+#include <machine/trap.h>
+#include <machine/riscvreg.h>
+#include <machine/pte.h>
+
+ .text
+ .globl _start
+_start:
+ /* jump to locore.S _start_kern_bootstrap*/
+ lla t0, _start_kern_bootstrap
+ jr t0
--- /dev/null
+/*
+ * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/timetc.h>
+#include <sys/sched.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/reboot.h>
+#include <sys/mount.h>
+#include <sys/exec.h>
+#include <sys/user.h>
+#include <sys/conf.h>
+#include <sys/kcore.h>
+#include <sys/core.h>
+#include <sys/msgbuf.h>
+#include <sys/buf.h>
+#include <sys/termios.h>
+#include <sys/sensors.h>
+#include <sys/syscallargs.h>
+#include <sys/stdarg.h>
+
+#include <net/if.h>
+#include <uvm/uvm.h>
+#include <dev/cons.h>
+#include <dev/clock_subr.h>
+#include <dev/ofw/fdt.h>
+#include <dev/ofw/openfirm.h>
+#include <machine/param.h>
+#include <machine/bootconfig.h>
+#include <machine/bus.h>
+#include <machine/riscv64var.h>
+#include <machine/sbi.h>
+
+#include <machine/db_machdep.h>
+#include <ddb/db_extern.h>
+
+#include <dev/acpi/efi.h>
+
+#include "softraid.h"
+#if NSOFTRAID > 0
+#include <dev/softraidvar.h>
+#endif
+
+char *boot_args = NULL;
+char *boot_file = "";
+
+uint8_t *bootmac = NULL;
+
+extern uint64_t esym;
+
+int stdout_node;
+int stdout_speed;
+
+void (*cpuresetfn)(void);
+void (*powerdownfn)(void);
+
+int cold = 1;
+
+struct vm_map *exec_map = NULL;
+struct vm_map *phys_map = NULL;
+
+int physmem;
+
+//struct consdev *cn_tab;
+
+caddr_t msgbufaddr;
+paddr_t msgbufphys;
+
+struct user *proc0paddr;
+
+struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 };
+struct uvm_constraint_range *uvm_md_constraints[] = { NULL };
+
+/* the following is used externally (sysctl_hw) */
+char machine[] = MACHINE; /* from <machine/param.h> */
+extern todr_chip_handle_t todr_handle;
+
+int safepri = 0;
+
+uint32_t boot_hart; /* The hart we booted on. */
+struct cpu_info cpu_info_primary;
+struct cpu_info *cpu_info[MAXCPUS] = { &cpu_info_primary };
+
+static int
+atoi(const char *s)
+{
+ int n, neg;
+
+ n = 0;
+ neg = 0;
+
+ while (*s == '-') {
+ s++;
+ neg = !neg;
+ }
+
+ while (*s != '\0') {
+ if (*s < '0' || *s > '9')
+ break;
+
+ n = (10 * n) + (*s - '0');
+ s++;
+ }
+
+ return (neg ? -n : n);
+}
+
+
+void *
+fdt_find_cons(const char *name)
+{
+ char *alias = "serial0";
+ char buf[128];
+ char *stdout = NULL;
+ char *p;
+ void *node;
+
+ /* First check if "stdout-path" is set. */
+ node = fdt_find_node("/chosen");
+ if (node) {
+ if (fdt_node_property(node, "stdout-path", &stdout) > 0) {
+ if (strchr(stdout, ':') != NULL) {
+ strlcpy(buf, stdout, sizeof(buf));
+ if ((p = strchr(buf, ':')) != NULL) {
+ *p++ = '\0';
+ stdout_speed = atoi(p);
+ }
+ stdout = buf;
+ }
+ if (stdout[0] != '/') {
+ /* It's an alias. */
+ alias = stdout;
+ stdout = NULL;
+ }
+ }
+ }
+
+ /* Perform alias lookup if necessary. */
+ if (stdout == NULL) {
+ node = fdt_find_node("/aliases");
+ if (node)
+ fdt_node_property(node, alias, &stdout);
+ }
+
+ /* Lookup the physical address of the interface. */
+ if (stdout) {
+ node = fdt_find_node(stdout);
+ if (node && fdt_is_compatible(node, name)) {
+ stdout_node = OF_finddevice(stdout);
+ return (node);
+ }
+ }
+
+ return (NULL);
+}
+
+#if 0 // CMPE: not supporting following uarts
+extern void amluart_init_cons(void);
+extern void imxuart_init_cons(void);
+extern void mvuart_init_cons(void);
+extern void pluart_init_cons(void);
+extern void simplefb_init_cons(bus_space_tag_t);
+#endif
+
+extern void com_fdt_init_cons(void);
+
+void
+consinit(void)
+{
+ static int consinit_called = 0;
+
+ if (consinit_called != 0)
+ return;
+
+ consinit_called = 1;
+#if 0 //no support
+ amluart_init_cons();
+ com_fdt_init_cons();
+ imxuart_init_cons();
+ mvuart_init_cons();
+ pluart_init_cons();
+ simplefb_init_cons(&riscv64_bs_tag);
+#endif
+ com_fdt_init_cons();
+}
+
+
+//XXX TODO: need to populate console for qemu
+//maybe no longer needed, as already have cn_tab ??
+struct consdev constab[] = {
+ { NULL }
+};
+
+void
+cpu_idle_enter()
+{
+}
+
+void
+cpu_idle_cycle()
+{
+ // Enable interrupts
+ enable_interrupts();
+ // XXX Data Sync Barrier? (Maybe SFENCE???)
+ __asm volatile("wfi");
+}
+
+void
+cpu_idle_leave()
+{
+}
+
+
+// XXX what? - not really used
+struct trapframe proc0tf;
+void
+cpu_startup()
+{
+ u_int loop;
+ paddr_t minaddr;
+ paddr_t maxaddr;
+
+ proc0.p_addr = proc0paddr;
+
+ /*
+ * Give pmap a chance to set up a few more things now the vm
+ * is initialised
+ */
+ pmap_postinit();
+
+ /*
+ * Initialize error message buffer (at end of core).
+ */
+
+ /* msgbufphys was setup during the secondary boot strap */
+ for (loop = 0; loop < atop(MSGBUFSIZE); ++loop)
+ pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
+ msgbufphys + loop * PAGE_SIZE, PROT_READ | PROT_WRITE);
+ pmap_update(pmap_kernel());
+ initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
+
+ /*
+ * Identify ourselves for the msgbuf (everything printed earlier will
+ * not be buffered).
+ */
+ printf("%s", version);
+
+ printf("real mem = %lu (%luMB)\n", ptoa(physmem),
+ ptoa(physmem)/1024/1024);
+
+ /*
+ * Allocate a submap for exec arguments. This map effectively
+ * limits the number of processes exec'ing at any time.
+ */
+ minaddr = vm_map_min(kernel_map);
+ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
+
+
+ /*
+ * Allocate a submap for physio
+ */
+ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, 0, FALSE, NULL);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+
+ printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
+ ptoa(uvmexp.free)/1024/1024);
+
+ curpcb = &proc0.p_addr->u_pcb;
+ curpcb->pcb_flags = 0;
+ curpcb->pcb_tf = &proc0tf;
+
+ if (boothowto & RB_CONFIG) {
+#ifdef BOOT_CONFIG
+ user_config();
+#else
+ printf("kernel does not support -c; continuing..\n");
+#endif
+ }
+}
+
+int
+cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen, struct proc *p)
+{
+ /* all sysctl names at this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ // none supported currently
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}
+
+int waittime = -1;
+
+__dead void
+boot(int howto)
+{
+ if ((howto & RB_RESET) != 0)
+ goto doreset;
+
+ if (cold) {
+ if ((howto & RB_USERREQ) == 0)
+ howto |= RB_HALT;
+ goto haltsys;
+ }
+
+ boothowto = howto;
+ if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
+ waittime = 0;
+ vfs_shutdown(curproc);
+
+ if ((howto & RB_TIMEBAD) == 0) {
+ resettodr();
+ } else {
+ printf("WARNING: not updating battery clock\n");
+ }
+ }
+ if_downall();
+
+ uvm_shutdown();
+ splhigh();
+ cold = 1;
+
+ if ((howto & RB_DUMP) != 0)
+ //dumpsys();//XXX no dump so far. CMPE295
+
+haltsys:
+ config_suspend_all(DVACT_POWERDOWN);
+
+ if ((howto & RB_HALT) != 0) {
+ if ((howto & RB_POWERDOWN) != 0) {
+ printf("\nAttempting to power down...\n");
+ delay(500000);
+ if (powerdownfn)
+ (*powerdownfn)();
+ }
+
+ printf("\n");
+ printf("The operating system has halted.\n");
+ printf("Please press any key to reboot.\n\n");
+ cngetc();
+ }
+
+doreset:
+ printf("rebooting...\n");
+ delay(500000);
+ if (cpuresetfn)
+ (*cpuresetfn)();
+ printf("reboot failed; spinning\n");
+ for (;;)
+ continue;
+ /* NOTREACHED */
+}
+
+//Copied from ARM64, removed some registers. XXX
+void
+setregs(struct proc *p, struct exec_package *pack, u_long stack,
+ register_t *retval)
+{
+ struct trapframe *tf;
+
+ /* If we were using the FPU, forget about it. */
+#if 0 // XXX ignore fp for now
+ if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
+ vfp_discard(p);
+#endif
+ p->p_addr->u_pcb.pcb_flags &= ~PCB_FPU;
+
+ tf = p->p_addr->u_pcb.pcb_tf;
+
+ memset (tf,0, sizeof(*tf));
+ tf->tf_a[0] = stack; // XXX Inherited from FreeBSD. Why?
+ tf->tf_sp = STACKALIGN(stack);
+ tf->tf_ra = pack->ep_entry;
+ tf->tf_sepc = pack->ep_entry;
+
+ retval[1] = 0;
+}
+
+void
+need_resched(struct cpu_info *ci)
+{
+ ci->ci_want_resched = 1;
+
+ /* There's a risk we'll be called before the idle threads start */
+ if (ci->ci_curproc) {
+ aston(ci->ci_curproc);
+ cpu_kick(ci);
+ }
+}
+
+
+/// XXX ?
+/*
+ * Size of memory segments, before any memory is stolen.
+ */
+phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
+int mem_cluster_cnt;
+/// XXX ?
+/*
+ * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
+ */
+int
+cpu_dumpsize(void)
+{
+ int size;
+
+ size = ALIGN(sizeof(kcore_seg_t)) +
+ ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
+ if (roundup(size, dbtob(1)) != dbtob(1))
+ return (-1);
+
+ return (1);
+}
+
+int64_t dcache_line_size; /* The minimum D cache line size */
+int64_t icache_line_size; /* The minimum I cache line size */
+int64_t idcache_line_size; /* The minimum cache line size */
+
+void
+cache_setup(void)
+{
+// XXX TODO CMPE, following freebsd
+ dcache_line_size = 0;
+ icache_line_size = 0;
+ idcache_line_size = 0;
+}
+
+u_long
+cpu_dump_mempagecnt()
+{
+ return 0;
+}
+
+//Copied from ARM64
+/*
+ * These variables are needed by /sbin/savecore
+ */
+u_long dumpmag = 0x8fca0101; /* magic number */
+int dumpsize = 0; /* pages */
+long dumplo = 0; /* blocks */
+
+/*
+ * This is called by main to set dumplo and dumpsize.
+ * Dumps always skip the first PAGE_SIZE of disk space
+ * in case there might be a disk label stored there.
+ * If there is extra space, put dump at the end to
+ * reduce the chance that swapping trashes it.
+ */
+void
+dumpconf(void)
+{
+ int nblks, dumpblks; /* size of dump area */
+
+ if (dumpdev == NODEV ||
+ (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
+ return;
+ if (nblks <= ctod(1))
+ return;
+
+ dumpblks = cpu_dumpsize();
+ if (dumpblks < 0)
+ return;
+ dumpblks += ctod(cpu_dump_mempagecnt());
+
+ /* If dump won't fit (incl. room for possible label), punt. */
+ if (dumpblks > (nblks - ctod(1)))
+ return;
+
+ /* Put dump at end of partition */
+ dumplo = nblks - dumpblks;
+
+ /* dumpsize is in page units, and doesn't include headers. */
+ dumpsize = cpu_dump_mempagecnt();
+}
+
+//copied from arm64/sys_machdep.h
+int
+sys_sysarch(struct proc *p, void *v, register_t *retval)
+{
+ struct sys_sysarch_args /* {
+ syscallarg(int) op;
+ syscallarg(void *) parms;
+ } */ *uap = v;
+ int error = 0;
+
+ switch(SCARG(uap, op)) {
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+
+uint64_t mmap_start;
+uint32_t mmap_size;
+uint32_t mmap_desc_size;
+uint32_t mmap_desc_ver;
+
+void collect_kernel_args(char *);
+void process_kernel_args(void);
+
+void
+initriscv(struct riscv_bootparams *rbp)
+{
+ vaddr_t vstart, vend;
+ struct cpu_info *pcpup;
+ long kvo = rbp->kern_delta; //should be PA - VA
+ paddr_t memstart, memend;
+
+ void *config = (void *) rbp->dtbp_virt;
+ void *fdt = NULL;
+
+ int (*map_func_save)(bus_space_tag_t, bus_addr_t, bus_size_t, int,
+ bus_space_handle_t *);
+
+ // NOTE that 1GB of ram is mapped in by default in
+ // the bootstrap memory config, so nothing is necessary
+ // until pmap_bootstrap_finalize is called??
+
+ //NOTE: FDT is already mapped (rbp->dtbp_virt => rbp->dtbp_phys)
+ // Initialize the Flattened Device Tree
+ if (!fdt_init(config) || fdt_get_size(config) == 0)
+ panic("initriscv: no FDT");
+
+ size_t fdt_size = fdt_get_size(config);
+ paddr_t fdt_start = (paddr_t) rbp->dtbp_phys;
+ paddr_t fdt_end = fdt_start + fdt_size;
+ struct fdt_reg reg;
+ void *node;
+
+ node = fdt_find_node("/chosen");
+ if (node != NULL) {
+ char *prop;
+ int len;
+ // static uint8_t lladdr[6]; //not yet used
+
+ len = fdt_node_property(node, "bootargs", &prop);
+ if (len > 0)
+ collect_kernel_args(prop);
+
+#if 0 //CMPE: yet not using these properties
+
+ len = fdt_node_property(node, "openbsd,bootduid", &prop);
+ if (len == sizeof(bootduid))
+ memcpy(bootduid, prop, sizeof(bootduid));
+
+ len = fdt_node_property(node, "openbsd,bootmac", &prop);
+ if (len == sizeof(lladdr)) {
+ memcpy(lladdr, prop, sizeof(lladdr));
+ bootmac = lladdr;
+ }
+
+ len = fdt_node_property(node, "openbsd,sr-bootuuid", &prop);
+#if NSOFTRAID > 0
+ if (len == sizeof(sr_bootuuid))
+ memcpy(&sr_bootuuid, prop, sizeof(sr_bootuuid));
+#endif
+ if (len > 0)
+ explicit_bzero(prop, len);
+
+ len = fdt_node_property(node, "openbsd,sr-bootkey", &prop);
+#if NSOFTRAID > 0
+ if (len == sizeof(sr_bootkey))
+ memcpy(&sr_bootkey, prop, sizeof(sr_bootkey));
+#endif
+ if (len > 0)
+ explicit_bzero(prop, len);
+
+ len = fdt_node_property(node, "openbsd,uefi-mmap-start", &prop);
+ if (len == sizeof(mmap_start))
+ mmap_start = bemtoh64((uint64_t *)prop);
+ len = fdt_node_property(node, "openbsd,uefi-mmap-size", &prop);
+ if (len == sizeof(mmap_size))
+ mmap_size = bemtoh32((uint32_t *)prop);
+ len = fdt_node_property(node, "openbsd,uefi-mmap-desc-size", &prop);
+ if (len == sizeof(mmap_desc_size))
+ mmap_desc_size = bemtoh32((uint32_t *)prop);
+ len = fdt_node_property(node, "openbsd,uefi-mmap-desc-ver", &prop);
+ if (len == sizeof(mmap_desc_ver))
+ mmap_desc_ver = bemtoh32((uint32_t *)prop);
+ len = fdt_node_property(node, "openbsd,uefi-system-table", &prop);
+ if (len == sizeof(system_table))
+ system_table = bemtoh64((uint64_t *)prop);
+#endif
+ }
+
+ /* Set the pcpu data, this is needed by pmap_bootstrap */
+ // smp
+ pcpup = &cpu_info_primary;
+
+ /*
+ * backup the pcpu pointer in tp to
+ * restore kernel context when entering the kernel from userland.
+ */
+ __asm __volatile("mv tp, %0" :: "r"(pcpup));
+
+ sbi_init();
+ cache_setup();//dummy for now
+
+ process_kernel_args();
+
+ void _start(void);
+ long kernbase = (long)&_start & ~(PAGE_SIZE-1); // page aligned
+
+#if 0 // Below we set memstart / memend based on entire physical address
+ // range based on information sourced from FDT.
+ /* The bootloader has loaded us into a 64MB block. */
+ memstart = KERNBASE + kvo; //va + (pa - va) ==> pa
+ memend = memstart + 64 * 1024 * 1024; //XXX CMPE: size also 64M??
+#endif
+
+ node = fdt_find_node("/memory");
+ if (node == NULL)
+ panic("%s: no memory specified", __func__);
+
+ paddr_t start, end;
+ int i;
+
+ // Assume that the kernel was loaded at valid physical memory location
+ // Scan the FDT to identify the full physical address range for machine
+ // XXX Save physical memory segments to later allocate to UVM?
+ memstart = memend = kernbase + kvo;
+ for (i = 0; i < VM_PHYSSEG_MAX; i++) {
+ if (fdt_get_reg(node, i, ®))
+ break;
+ if (reg.size == 0)
+ continue;
+
+ start = reg.addr;
+ end = reg.addr + reg.size;
+
+ if (start < memstart)
+ memstart = start;
+ if (end > memend)
+ memend = end;
+ }
+
+ // XXX At this point, OpenBSD/arm64 would have set memstart / memend
+ // to the range mapped by the bootloader (KERNBASE - KERNBASE + 64MiB).
+ // Instead, we have mapped memstart / memend to the full physical
+ // address range. What implications might this have?
+
+ /* Bootstrap enough of pmap to enter the kernel proper. */
+ vstart = pmap_bootstrap(kvo, rbp->kern_l1pt,
+ kernbase, esym, fdt_start, fdt_end, memstart, memend);
+
+ // XX correctly sized?
+ proc0paddr = (struct user *)rbp->kern_stack;
+
+ msgbufaddr = (caddr_t)vstart;
+ msgbufphys = pmap_steal_avail(round_page(MSGBUFSIZE), PAGE_SIZE, NULL);
+ // XXX should map this msgbuffphys to kernel pmap??
+
+ vstart += round_page(MSGBUFSIZE);
+
+ zero_page = vstart;
+ vstart += MAXCPUS * PAGE_SIZE;
+ copy_src_page = vstart;
+ vstart += MAXCPUS * PAGE_SIZE;
+ copy_dst_page = vstart;
+ vstart += MAXCPUS * PAGE_SIZE;
+
+ /* Relocate the FDT to safe memory. */
+ if (fdt_size != 0) {
+ uint32_t csize, size = round_page(fdt_size);
+ paddr_t pa;
+ vaddr_t va;
+
+ pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
+ memcpy((void *) PHYS_TO_DMAP(pa),
+ (void *) PHYS_TO_DMAP(fdt_start), size);
+ for (va = vstart, csize = size; csize > 0;
+ csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
+ pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
+
+ fdt = (void *)vstart;
+ vstart += size;
+ }
+
+ /*
+ * Managed KVM space is what we have claimed up to end of
+ * mapped kernel buffers.
+ */
+ {
+ // export back to pmap
+ extern vaddr_t virtual_avail, virtual_end;
+ virtual_avail = vstart;
+ vend = VM_MAX_KERNEL_ADDRESS; // XXX
+ virtual_end = vend;
+ }
+
+ /* Now we can reinit the FDT, using the virtual address. */
+ if (fdt)
+ fdt_init(fdt);
+
+ int pmap_bootstrap_bs_map(bus_space_tag_t t, bus_addr_t bpa,
+ bus_size_t size, int flags, bus_space_handle_t *bshp);
+
+ map_func_save = riscv64_bs_tag._space_map;
+ riscv64_bs_tag._space_map = pmap_bootstrap_bs_map;
+
+ // cninit
+ consinit();
+
+#ifdef DEBUG_AUTOCONF
+ fdt_print_tree();
+#endif
+
+ riscv64_bs_tag._space_map = map_func_save;
+
+ /* XXX */
+ pmap_avail_fixup();
+
+ uvmexp.pagesize = PAGE_SIZE;
+ uvm_setpagesize();
+
+ /* Make what's left of the initial 64MB block available to UVM. */
+ pmap_physload_avail();
+
+#if 0
+ /* Make all other physical memory available to UVM. */
+ if (mmap && mmap_desc_ver == EFI_MEMORY_DESCRIPTOR_VERSION) {
+ EFI_MEMORY_DESCRIPTOR *desc = mmap;
+ int i;
+
+ /*
+ * Load all memory marked as EfiConventionalMemory.
+ * Don't bother with blocks smaller than 64KB. The
+ * initial 64MB memory block should be marked as
+ * EfiLoaderData so it won't be added again here.
+ */
+ for (i = 0; i < mmap_size / mmap_desc_size; i++) {
+ printf("type 0x%x pa 0x%llx va 0x%llx pages 0x%llx attr 0x%llx\n",
+ desc->Type, desc->PhysicalStart,
+ desc->VirtualStart, desc->NumberOfPages,
+ desc->Attribute);
+ if (desc->Type == EfiConventionalMemory &&
+ desc->NumberOfPages >= 16) {
+ uvm_page_physload(atop(desc->PhysicalStart),
+ atop(desc->PhysicalStart) +
+ desc->NumberOfPages,
+ atop(desc->PhysicalStart),
+ atop(desc->PhysicalStart) +
+ desc->NumberOfPages, 0);
+ physmem += desc->NumberOfPages;
+ }
+ desc = NextMemoryDescriptor(desc, mmap_desc_size);
+ }
+ } else {
+ paddr_t start, end;
+ int i;
+
+ node = fdt_find_node("/memory");
+ if (node == NULL)
+ panic("%s: no memory specified", __func__);
+
+ for (i = 0; i < VM_PHYSSEG_MAX; i++) {
+ if (fdt_get_reg(node, i, ®))
+ break;
+ if (reg.size == 0)
+ continue;
+
+ start = reg.addr;
+ end = MIN(reg.addr + reg.size, (paddr_t)-PAGE_SIZE);
+
+ /*
+ * The intial 64MB block is not excluded, so we need
+ * to make sure we don't add it here.
+ */
+ if (start < memend && end > memstart) {
+ if (start < memstart) {
+ uvm_page_physload(atop(start),
+ atop(memstart), atop(start),
+ atop(memstart), 0);
+ physmem += atop(memstart - start);
+ }
+ if (end > memend) {
+ uvm_page_physload(atop(memend),
+ atop(end), atop(memend),
+ atop(end), 0);
+ physmem += atop(end - memend);
+ }
+ } else {
+ uvm_page_physload(atop(start), atop(end),
+ atop(start), atop(end), 0);
+ physmem += atop(end - start);
+ }
+ }
+ }
+#endif
+ /*
+ * Make sure that we have enough KVA to initialize UVM. In
+ * particular, we need enough KVA to be able to allocate the
+ * vm_page structures.
+ */
+ pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024 +
+ physmem * sizeof(struct vm_page));
+#ifdef DDB
+ db_machine_init();
+
+ /* Firmware doesn't load symbols. */
+ ddb_init();
+
+ if (boothowto & RB_KDB)
+ db_enter();
+#endif
+ softintr_init();
+ splraise(IPL_IPI);
+}
+
+char bootargs[256];
+
+void
+collect_kernel_args(char *args)
+{
+ /* Make a local copy of the bootargs */
+ strlcpy(bootargs, args, sizeof(bootargs));
+}
+
+void
+process_kernel_args(void)
+{
+ char *cp = bootargs;
+
+ if (cp[0] == '\0') {
+ boothowto = RB_AUTOBOOT;
+ return;
+ }
+
+ boothowto = 0;
+ boot_file = bootargs;
+
+ /* Skip the kernel image filename */
+ while (*cp != ' ' && *cp != 0)
+ ++cp;
+
+ if (*cp != 0)
+ *cp++ = 0;
+
+ while (*cp == ' ')
+ ++cp;
+
+ boot_args = cp;
+
+ printf("bootfile: %s\n", boot_file);
+ printf("bootargs: %s\n", boot_args);
+
+ /* Setup pointer to boot flags */
+ while (*cp != '-')
+ if (*cp++ == '\0')
+ return;
+
+ for (;*++cp;) {
+ int fl;
+
+ fl = 0;
+ switch(*cp) {
+ case 'a':
+ fl |= RB_ASKNAME;
+ break;
+ case 'c':
+ fl |= RB_CONFIG;
+ break;
+ case 'd':
+ fl |= RB_KDB;
+ break;
+ case 's':
+ fl |= RB_SINGLE;
+ break;
+ default:
+ printf("unknown option `%c'\n", *cp);
+ break;
+ }
+ boothowto |= fl;
+ }
+}
+
+/*
+ * allow bootstrap to steal KVA after machdep has given it back to pmap.
+ * XXX - need a mechanism to prevent this from being used too early or late.
+ */
+int
+pmap_bootstrap_bs_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
+ int flags, bus_space_handle_t *bshp)
+{
+ u_long startpa, pa, endpa;
+ vaddr_t va;
+
+ extern vaddr_t virtual_avail, virtual_end;
+
+ va = virtual_avail; // steal memory from virtual avail.
+
+ if (va == 0)
+ panic("pmap_bootstrap_bs_map, no virtual avail");
+
+ startpa = trunc_page(bpa);
+ endpa = round_page((bpa + size));
+
+ *bshp = (bus_space_handle_t)(va + (bpa - startpa));
+
+ for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
+ pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE,
+ PMAP_CACHE_DEV);
+
+ virtual_avail = va;
+
+ return 0;
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1988 University of Utah.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Memory special file
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/filio.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/fcntl.h>
+#include <sys/rwlock.h>
+
+#include <machine/cpu.h>
+#include <machine/conf.h>
+
+#include <uvm/uvm_extern.h>
+
+caddr_t zeropage;
+
+/* open counter for aperture */
+// XXX What is aperture?
+#ifdef APERTURE
+static int ap_open_count = 0;
+extern int allowaperture;
+#endif
+
+static struct rwlock physlock = RWLOCK_INITIALIZER("mmrw");
+
+int
+mmopen(dev_t dev, int flag, int mode, struct proc *p)
+{
+ extern int allowkmem;
+
+ switch (minor(dev)) {
+ case 0:
+ case 1:
+ if (securelevel <= 0 || allowkmem)
+ break;
+ return (EPERM);
+ case 2:
+ case 12:
+ break;
+#ifdef APERTURE
+ case 4:
+ if (suser(p) != 0 || !allowaperture)
+ return (EPERM);
+
+ /* authorize only one simultaneous open() unless
+ * allowaperture=3 */
+ if (ap_open_count > 0 && allowaperture < 3)
+ return (EPERM);
+ ap_open_count++;
+ break;
+#endif
+ default:
+ return (ENXIO);
+ }
+ return (0);
+}
+
+int
+mmclose(dev_t dev, int flag, int mode, struct proc *p)
+{
+#ifdef APERTURE
+ if (minor(dev) == 4)
+ ap_open_count = 0;
+#endif
+ return (0);
+}
+
+int
+mmrw(dev_t dev, struct uio *uio, int flags)
+{
+ vaddr_t o, v;
+ size_t c;
+ struct iovec *iov;
+ int error = 0;
+ vm_prot_t prot;
+ extern caddr_t vmmap;
+
+ if (minor(dev) == 0) {
+ /* lock against other uses of shared vmmap */
+ error = rw_enter(&physlock, RW_WRITE | RW_INTR);
+ if (error)
+ return (error);
+ }
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("mmrw");
+ continue;
+ }
+ switch (minor(dev)) {
+
+ case 0:
+ v = uio->uio_offset;
+ prot = uio->uio_rw == UIO_READ ? PROT_READ :
+ PROT_WRITE;
+ pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
+ trunc_page(v), prot, prot|PMAP_WIRED);
+ pmap_update(pmap_kernel());
+ o = uio->uio_offset & PGOFSET;
+ c = ulmin(uio->uio_resid, PAGE_SIZE - o);
+ error = uiomove((caddr_t)vmmap + o, c, uio);
+ pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
+ (vaddr_t)vmmap + PAGE_SIZE);
+ pmap_update(pmap_kernel());
+ break;
+
+ case 1:
+ v = uio->uio_offset;
+ c = ulmin(iov->iov_len, MAXPHYS);
+ if (!uvm_kernacc((caddr_t)v, c,
+ uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
+ return (EFAULT);
+ error = uiomove((caddr_t)v, c, uio);
+ break;
+
+ case 2:
+ if (uio->uio_rw == UIO_WRITE)
+ uio->uio_resid = 0;
+ return (0);
+
+ case 12:
+ if (uio->uio_rw == UIO_WRITE) {
+ uio->uio_resid = 0;
+ return (0);
+ }
+ if (zeropage == NULL)
+ zeropage = malloc(PAGE_SIZE, M_TEMP,
+ M_WAITOK | M_ZERO);
+ c = ulmin(iov->iov_len, PAGE_SIZE);
+ error = uiomove(zeropage, c, uio);
+ break;
+
+ default:
+ return (ENXIO);
+ }
+ }
+ if (minor(dev) == 0) {
+ rw_exit(&physlock);
+ }
+ return (error);
+}
+
+paddr_t
+mmmmap(dev_t dev, off_t off, int prot)
+{
+ struct proc *p = curproc; /* XXX */
+
+ /*
+ * /dev/mem is the only one that makes sense through this
+ * interface. For /dev/kmem any physaddr we return here
+ * could be transient and hence incorrect or invalid at
+ * a later time. /dev/null just doesn't make any sense
+ * and /dev/zero is a hack that is handled via the default
+ * pager in mmap().
+ */
+ if (minor(dev) != 0)
+ return (-1);
+
+ /* minor device 0 is physical memory */
+
+ if ((paddr_t)off >= ptoa((paddr_t)physmem) &&
+ suser(p) != 0)
+ return -1;
+ return off;
+}
+
+int
+mmioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ switch (cmd) {
+ case FIONBIO:
+ case FIOASYNC:
+ /* handled by fd layer */
+ return 0;
+ }
+
+ return (EOPNOTSUPP);
+}
--- /dev/null
+/*
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <machine/asm.h>
+#include <machine/param.h>
+
+#include "assym.h"
+
+ENTRY(pagezero)
+ RETGUARD_SETUP(pagezero, t6)
+ li a1, PAGE_SIZE
+ add a1, a0, a1
+
+1: sd x0, 0(a0)
+ sd x0, 8(a0)
+ sd x0, 16(a0)
+ sd x0, 24(a0)
+ sd x0, 32(a0)
+ sd x0, 40(a0)
+ sd x0, 48(a0)
+ sd x0, 56(a0)
+ addi a0, a0, 64
+ blt a0, a1, 1b
+
+ RETGUARD_CHECK(pagezero, t6)
+ ret
+END(pagezero)
--- /dev/null
+/*
+ * Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/systm.h>
+#include <sys/pool.h>
+#include <sys/atomic.h>
+
+#include <uvm/uvm.h>
+
+#include "machine/vmparam.h"
+#include "machine/pmap.h"
+#include "machine/cpufunc.h"
+#include "machine/pcb.h"
+#include "machine/riscvreg.h"
+
+#include <machine/db_machdep.h>
+#include <ddb/db_extern.h>
+#include <ddb/db_output.h>
+
+void pmap_set_satp(struct proc *);
+void pmap_free_asid(pmap_t);
+
+#define cpu_tlb_flush_all() sfence_vma()
+#define cpu_tlb_flush_page_all(va) sfence_vma_page(va)
+#define cpu_tlb_flush_asid_all(asid) sfence_vma_asid(asid)
+#define cpu_tlb_flush_page_asid(va, asid) sfence_vma_page_asid(va, asid)
+
+/* We run userland code with ASIDs that have the low bit set. */
+#define ASID_USER 1
+
+static inline void
+tlb_flush(pmap_t pm, vaddr_t va)
+{
+ if (pm == pmap_kernel()) {
+ // Flush Translations for VA across all ASIDs
+ cpu_tlb_flush_page_all(va);
+ } else {
+ // Flush Translations for VA in appropriate Kernel / USER ASIDs
+ cpu_tlb_flush_page_asid(va, SATP_ASID(pm->pm_satp));
+ cpu_tlb_flush_page_asid(va, SATP_ASID(pm->pm_satp) | ASID_USER);
+ }
+}
+
+struct pmap kernel_pmap_;
+struct pmap pmap_tramp;
+
+LIST_HEAD(pted_pv_head, pte_desc);
+
+struct pte_desc {
+ LIST_ENTRY(pte_desc) pted_pv_list;
+ pt_entry_t pted_pte;
+ pmap_t pted_pmap;
+ vaddr_t pted_va;
+};
+
+/* VP routines */
+int pmap_vp_enter(pmap_t pm, vaddr_t va, struct pte_desc *pted, int flags);
+struct pte_desc *pmap_vp_remove(pmap_t pm, vaddr_t va);
+void pmap_vp_destroy(pmap_t pm);
+void pmap_vp_destroy_l2_l3(pmap_t pm, struct pmapvp1 *vp1);
+struct pte_desc *pmap_vp_lookup(pmap_t pm, vaddr_t va, pt_entry_t **);
+
+/* PV routines */
+void pmap_enter_pv(struct pte_desc *pted, struct vm_page *);
+void pmap_remove_pv(struct pte_desc *pted);
+
+void _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags,
+ int cache);
+
+static __inline void pmap_set_mode(pmap_t);
+static __inline void pmap_set_ppn(pmap_t, paddr_t);
+void pmap_allocate_asid(pmap_t);
+
+# if 0 // XXX 4 Level Page Table
+struct pmapvp0 {
+ pt_entry_t l0[VP_IDX0_CNT];
+ struct pmapvp1 *vp[VP_IDX0_CNT];
+};
+#endif
+
+struct pmapvp1 {
+ pt_entry_t l1[VP_IDX1_CNT];
+ struct pmapvp2 *vp[VP_IDX1_CNT];
+};
+
+struct pmapvp2 {
+ pt_entry_t l2[VP_IDX2_CNT];
+ struct pmapvp3 *vp[VP_IDX2_CNT];
+};
+
+struct pmapvp3 {
+ pt_entry_t l3[VP_IDX3_CNT];
+ struct pte_desc *vp[VP_IDX3_CNT];
+};
+CTASSERT(sizeof(struct pmapvp1) == sizeof(struct pmapvp2));
+CTASSERT(sizeof(struct pmapvp1) == sizeof(struct pmapvp3));
+
+/* Allocator for VP pool. */
+void *pmap_vp_page_alloc(struct pool *, int, int *);
+void pmap_vp_page_free(struct pool *, void *);
+
+struct pool_allocator pmap_vp_allocator = {
+ pmap_vp_page_alloc, pmap_vp_page_free, sizeof(struct pmapvp1)
+};
+
+
+void pmap_remove_pted(pmap_t pm, struct pte_desc *pted);
+void pmap_kremove_pg(vaddr_t va);
+#if 0 // XXX Not necessary without 4-Level PT
+void pmap_set_l1(struct pmap *pm, uint64_t va, struct pmapvp1 *l1_va, paddr_t l1_pa);
+#endif
+void pmap_set_l2(struct pmap *pm, uint64_t va, struct pmapvp2 *l2_va, paddr_t l2_pa);
+void pmap_set_l3(struct pmap *pm, uint64_t va, struct pmapvp3 *l3_va, paddr_t l3_pa);
+
+
+/* XXX */
+void
+pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
+ vm_prot_t prot, int flags, int cache);
+void pmap_pte_insert(struct pte_desc *pted);
+void pmap_pte_remove(struct pte_desc *pted, int);
+void pmap_pte_update(struct pte_desc *pted, pt_entry_t *pl3);
+void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
+void pmap_pinit(pmap_t pm);
+void pmap_release(pmap_t pm);
+paddr_t pmap_steal_avail(size_t size, int align, void **kva);
+void pmap_remove_avail(paddr_t base, paddr_t end);
+vaddr_t pmap_map_stolen(vaddr_t);
+void pmap_physload_avail(void);
+extern caddr_t msgbufaddr;
+
+vaddr_t vmmap;
+vaddr_t zero_page;
+vaddr_t copy_src_page;
+vaddr_t copy_dst_page;
+
+/* XXX - panic on pool get failures? */
+struct pool pmap_pmap_pool;
+struct pool pmap_pted_pool;
+struct pool pmap_vp_pool;
+
+/* list of L1 tables */
+
+int pmap_initialized = 0;
+
+struct mem_region {
+ vaddr_t start;
+ vsize_t size;
+};
+
+struct mem_region pmap_avail_regions[10];
+struct mem_region pmap_allocated_regions[10];
+struct mem_region *pmap_avail = &pmap_avail_regions[0];
+struct mem_region *pmap_allocated = &pmap_allocated_regions[0];
+int pmap_cnt_avail, pmap_cnt_allocated;
+uint64_t pmap_avail_kvo;
+
+static inline void
+pmap_lock(struct pmap *pmap)
+{
+ if (pmap != pmap_kernel())
+ mtx_enter(&pmap->pm_mtx);
+}
+
+static inline void
+pmap_unlock(struct pmap *pmap)
+{
+ if (pmap != pmap_kernel())
+ mtx_leave(&pmap->pm_mtx);
+}
+
+/* virtual to physical helpers */
+static inline int
+VP_IDX0(vaddr_t va)
+{
+ return (va >> VP_IDX0_POS) & VP_IDX0_MASK;
+}
+
+static inline int
+VP_IDX1(vaddr_t va)
+{
+ return (va >> VP_IDX1_POS) & VP_IDX1_MASK;
+}
+
+static inline int
+VP_IDX2(vaddr_t va)
+{
+ return (va >> VP_IDX2_POS) & VP_IDX2_MASK;
+}
+
+static inline int
+VP_IDX3(vaddr_t va)
+{
+ return (va >> VP_IDX3_POS) & VP_IDX3_MASK;
+}
+
+// For RISC-V Machines, write without read permission is not a valid
+// combination of permission bits. These cases are mapped to R+W instead.
+// PROT_NONE grants read permissions because r = 0 | w = 0 | x = 0 is
+// reserved for non-leaf page table entries.
+const pt_entry_t ap_bits_user[8] = {
+ [PROT_NONE] = PTE_U|PTE_A|PTE_R,
+ [PROT_READ] = PTE_U|PTE_A|PTE_R,
+ [PROT_WRITE] = PTE_U|PTE_A|PTE_R|PTE_W,
+ [PROT_WRITE|PROT_READ] = PTE_U|PTE_A|PTE_R|PTE_W,
+ [PROT_EXEC] = PTE_U|PTE_A|PTE_X|PTE_R,
+ [PROT_EXEC|PROT_READ] = PTE_U|PTE_A|PTE_X|PTE_R,
+ [PROT_EXEC|PROT_WRITE] = PTE_U|PTE_A|PTE_X|PTE_R|PTE_W,
+ [PROT_EXEC|PROT_WRITE|PROT_READ] = PTE_U|PTE_A|PTE_X|PTE_R|PTE_W,
+};
+
+const pt_entry_t ap_bits_kern[8] = {
+ [PROT_NONE] = PTE_A|PTE_R,
+ [PROT_READ] = PTE_A|PTE_R,
+ [PROT_WRITE] = PTE_A|PTE_R|PTE_W,
+ [PROT_WRITE|PROT_READ] = PTE_A|PTE_R|PTE_W,
+ [PROT_EXEC] = PTE_A|PTE_X|PTE_R,
+ [PROT_EXEC|PROT_READ] = PTE_A|PTE_X|PTE_R,
+ [PROT_EXEC|PROT_WRITE] = PTE_A|PTE_X|PTE_R|PTE_W,
+ [PROT_EXEC|PROT_WRITE|PROT_READ] = PTE_A|PTE_X|PTE_R|PTE_W,
+};
+
+/*
+ * This is used for pmap_kernel() mappings, they are not to be removed
+ * from the vp table because they were statically initialized at the
+ * initial pmap initialization. This is so that memory allocation
+ * is not necessary in the pmap_kernel() mappings.
+ * Otherwise bad race conditions can appear.
+ */
+struct pte_desc *
+pmap_vp_lookup(pmap_t pm, vaddr_t va, pt_entry_t **pl3entry)
+{
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+ struct pte_desc *pted;
+
+ vp1 = pm->pm_vp.l1;
+ if (vp1 == NULL) {
+ return NULL;
+ }
+
+ vp2 = vp1->vp[VP_IDX1(va)];
+ if (vp2 == NULL) {
+ return NULL;
+ }
+
+ vp3 = vp2->vp[VP_IDX2(va)];
+ if (vp3 == NULL) {
+ return NULL;
+ }
+
+ pted = vp3->vp[VP_IDX3(va)];
+ if (pl3entry != NULL)
+ *pl3entry = &(vp3->l3[VP_IDX3(va)]);
+
+ return pted;
+}
+
+/*
+ * Remove, and return, pted at specified address, NULL if not present
+ */
+struct pte_desc *
+pmap_vp_remove(pmap_t pm, vaddr_t va)
+{
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+ struct pte_desc *pted;
+
+ vp1 = pm->pm_vp.l1;
+
+ vp2 = vp1->vp[VP_IDX1(va)];
+ if (vp2 == NULL) {
+ return NULL;
+ }
+
+ vp3 = vp2->vp[VP_IDX2(va)];
+ if (vp3 == NULL) {
+ return NULL;
+ }
+
+ pted = vp3->vp[VP_IDX3(va)];
+ vp3->vp[VP_IDX3(va)] = NULL;
+
+ return pted;
+}
+
+
+/*
+ * Create a V -> P mapping for the given pmap and virtual address
+ * with reference to the pte descriptor that is used to map the page.
+ * This code should track allocations of vp table allocations
+ * so they can be freed efficiently.
+ *
+ * XXX it may be possible to save some bits of count in the
+ * upper address bits of the pa or the pte entry.
+ * However that does make populating the other bits more tricky.
+ * each level has 512 entries, so that mean 9 bits to store
+ * stash 3 bits each in the first 3 entries?
+ */
+int
+pmap_vp_enter(pmap_t pm, vaddr_t va, struct pte_desc *pted, int flags)
+{
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+
+ vp1 = pm->pm_vp.l1;
+
+ vp2 = vp1->vp[VP_IDX1(va)];
+ if (vp2 == NULL) {
+ vp2 = pool_get(&pmap_vp_pool, PR_NOWAIT | PR_ZERO);
+ if (vp2 == NULL) {
+ if ((flags & PMAP_CANFAIL) == 0)
+ panic("%s: unable to allocate L2", __func__);
+ return ENOMEM;
+ }
+ pmap_set_l2(pm, va, vp2, 0);
+ }
+
+ vp3 = vp2->vp[VP_IDX2(va)];
+ if (vp3 == NULL) {
+ vp3 = pool_get(&pmap_vp_pool, PR_NOWAIT | PR_ZERO);
+ if (vp3 == NULL) {
+ if ((flags & PMAP_CANFAIL) == 0)
+ panic("%s: unable to allocate L3", __func__);
+ return ENOMEM;
+ }
+ pmap_set_l3(pm, va, vp3, 0);
+ }
+
+ vp3->vp[VP_IDX3(va)] = pted;
+ return 0;
+}
+
+void *
+pmap_vp_page_alloc(struct pool *pp, int flags, int *slowdown)
+{
+ struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
+
+ kd.kd_waitok = ISSET(flags, PR_WAITOK);
+ kd.kd_trylock = ISSET(flags, PR_NOWAIT);
+ kd.kd_slowdown = slowdown;
+
+ return km_alloc(pp->pr_pgsize, &kv_any, &kp_dirty, &kd);
+}
+
+void
+pmap_vp_page_free(struct pool *pp, void *v)
+{
+ km_free(v, pp->pr_pgsize, &kv_any, &kp_dirty);
+}
+
+u_int32_t PTED_MANAGED(struct pte_desc *pted);
+u_int32_t PTED_WIRED(struct pte_desc *pted);
+u_int32_t PTED_VALID(struct pte_desc *pted);
+
+u_int32_t
+PTED_MANAGED(struct pte_desc *pted)
+{
+ return (pted->pted_va & PTED_VA_MANAGED_M);
+}
+
+u_int32_t
+PTED_WIRED(struct pte_desc *pted)
+{
+ return (pted->pted_va & PTED_VA_WIRED_M);
+}
+
+u_int32_t
+PTED_VALID(struct pte_desc *pted)
+{
+ return (pted->pted_pte != 0);
+}
+
+/*
+ * PV entries -
+ * manipulate the physical to virtual translations for the entire system.
+ *
+ * QUESTION: should all mapped memory be stored in PV tables? Or
+ * is it alright to only store "ram" memory. Currently device mappings
+ * are not stored.
+ * It makes sense to pre-allocate mappings for all of "ram" memory, since
+ * it is likely that it will be mapped at some point, but would it also
+ * make sense to use a tree/table like is use for pmap to store device
+ * mappings?
+ * Further notes: It seems that the PV table is only used for pmap_protect
+ * and other paging related operations. Given this, it is not necessary
+ * to store any pmap_kernel() entries in PV tables and does not make
+ * sense to store device mappings in PV either.
+ *
+ * Note: unlike other powerpc pmap designs, the array is only an array
+ * of pointers. Since the same structure is used for holding information
+ * in the VP table, the PV table, and for kernel mappings, the wired entries.
+ * Allocate one data structure to hold all of the info, instead of replicating
+ * it multiple times.
+ *
+ * One issue of making this a single data structure is that two pointers are
+ * wasted for every page which does not map ram (device mappings), this
+ * should be a low percentage of mapped pages in the system, so should not
+ * have too noticable unnecessary ram consumption.
+ */
+
+void
+pmap_enter_pv(struct pte_desc *pted, struct vm_page *pg)
+{
+ /*
+ * XXX does this test mean that some pages try to be managed,
+ * but this is called too soon?
+ */
+ if (__predict_false(!pmap_initialized))
+ return;
+
+ mtx_enter(&pg->mdpage.pv_mtx);
+ LIST_INSERT_HEAD(&(pg->mdpage.pv_list), pted, pted_pv_list);
+ pted->pted_va |= PTED_VA_MANAGED_M;
+ mtx_leave(&pg->mdpage.pv_mtx);
+}
+
+void
+pmap_remove_pv(struct pte_desc *pted)
+{
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pted->pted_pte & PTE_RPGN);
+
+ mtx_enter(&pg->mdpage.pv_mtx);
+ LIST_REMOVE(pted, pted_pv_list);
+ mtx_leave(&pg->mdpage.pv_mtx);
+}
+
+int
+pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
+{
+ struct pte_desc *pted;
+ struct vm_page *pg;
+ int error;
+ int cache = PMAP_CACHE_WB;
+ int need_sync = 0;
+
+ if (pa & PMAP_NOCACHE)
+ cache = PMAP_CACHE_CI;
+ if (pa & PMAP_DEVICE)
+ cache = PMAP_CACHE_DEV;
+ pg = PHYS_TO_VM_PAGE(pa);
+
+ pmap_lock(pm);
+ pted = pmap_vp_lookup(pm, va, NULL);
+ if (pted && PTED_VALID(pted)) {
+ pmap_remove_pted(pm, pted);
+ /* we lost our pted if it was user */
+ if (pm != pmap_kernel())
+ pted = pmap_vp_lookup(pm, va, NULL);
+ }
+
+ pm->pm_stats.resident_count++;
+
+ /* Do not have pted for this, get one and put it in VP */
+ if (pted == NULL) {
+ pted = pool_get(&pmap_pted_pool, PR_NOWAIT | PR_ZERO);
+ if (pted == NULL) {
+ if ((flags & PMAP_CANFAIL) == 0)
+ panic("%s: failed to allocate pted", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ if (pmap_vp_enter(pm, va, pted, flags)) {
+ if ((flags & PMAP_CANFAIL) == 0)
+ panic("%s: failed to allocate L2/L3", __func__);
+ error = ENOMEM;
+ pool_put(&pmap_pted_pool, pted);
+ goto out;
+ }
+ }
+
+ /*
+ * If it should be enabled _right now_, we can skip doing ref/mod
+ * emulation. Any access includes reference, modified only by write.
+ */
+ if (pg != NULL &&
+ ((flags & PROT_MASK) || (pg->pg_flags & PG_PMAP_REF))) {
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_REF);
+ if ((prot & PROT_WRITE) && (flags & PROT_WRITE)) {
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_MOD);
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
+ }
+ }
+
+ pmap_fill_pte(pm, va, pa, pted, prot, flags, cache);
+
+ if (pg != NULL) {
+ pmap_enter_pv(pted, pg); /* only managed mem */
+ }
+
+ /*
+ * Insert into table, if this mapping said it needed to be mapped
+ * now.
+ */
+ if (flags & (PROT_READ|PROT_WRITE|PROT_EXEC|PMAP_WIRED)) {
+ pmap_pte_insert(pted);
+ }
+
+ tlb_flush(pm, va & ~PAGE_MASK);
+
+ if (pg != NULL && (flags & PROT_EXEC)) {
+ need_sync = ((pg->pg_flags & PG_PMAP_EXE) == 0);
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_EXE);
+ }
+
+ if (need_sync && (pm == pmap_kernel() || (curproc &&
+ curproc->p_vmspace->vm_map.pmap == pm)))
+ cpu_icache_sync_range(va & ~PAGE_MASK, PAGE_SIZE);
+
+ error = 0;
+out:
+ pmap_unlock(pm);
+ return error;
+}
+
+
+/*
+ * Remove the given range of mapping entries.
+ */
+void
+pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
+{
+ struct pte_desc *pted;
+ vaddr_t va;
+
+ pmap_lock(pm);
+ for (va = sva; va < eva; va += PAGE_SIZE) {
+ pted = pmap_vp_lookup(pm, va, NULL);
+
+ if (pted == NULL)
+ continue;
+
+ if (pted->pted_va & PTED_VA_WIRED_M) {
+ pm->pm_stats.wired_count--;
+ pted->pted_va &= ~PTED_VA_WIRED_M;
+ }
+
+ if (PTED_VALID(pted))
+ pmap_remove_pted(pm, pted);
+ }
+ pmap_unlock(pm);
+}
+
+/*
+ * remove a single mapping, notice that this code is O(1)
+ */
+void
+pmap_remove_pted(pmap_t pm, struct pte_desc *pted)
+{
+ pm->pm_stats.resident_count--;
+
+ if (pted->pted_va & PTED_VA_WIRED_M) {
+ pm->pm_stats.wired_count--;
+ pted->pted_va &= ~PTED_VA_WIRED_M;
+ }
+
+ pmap_pte_remove(pted, pm != pmap_kernel());
+
+ tlb_flush(pm, pted->pted_va & ~PAGE_MASK);
+
+ if (pted->pted_va & PTED_VA_EXEC_M) {
+ pted->pted_va &= ~PTED_VA_EXEC_M;
+ }
+
+ if (PTED_MANAGED(pted))
+ pmap_remove_pv(pted);
+
+ pted->pted_pte = 0;
+ pted->pted_va = 0;
+
+ if (pm != pmap_kernel())
+ pool_put(&pmap_pted_pool, pted);
+}
+
+
+/*
+ * Populate a kernel mapping for the given page.
+ * kernel mappings have a larger set of prerequisites than normal mappings.
+ *
+ * 1. no memory should be allocated to create a kernel mapping.
+ * 2. a vp mapping should already exist, even if invalid. (see 1)
+ * 3. all vp tree mappings should already exist (see 1)
+ *
+ */
+void
+_pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
+{
+ pmap_t pm = pmap_kernel();
+ struct pte_desc *pted;
+
+ pted = pmap_vp_lookup(pm, va, NULL);
+
+ /* Do not have pted for this, get one and put it in VP */
+ if (pted == NULL) {
+ panic("pted not preallocated in pmap_kernel() va %lx pa %lx\n",
+ va, pa);
+ }
+
+ if (pted && PTED_VALID(pted))
+ pmap_kremove_pg(va); /* pted is reused */
+
+ pm->pm_stats.resident_count++;
+
+ flags |= PMAP_WIRED; /* kernel mappings are always wired. */
+ /* Calculate PTE */
+ pmap_fill_pte(pm, va, pa, pted, prot, flags, cache);
+
+ /*
+ * Insert into table
+ * We were told to map the page, probably called from vm_fault,
+ * so map the page!
+ */
+ pmap_pte_insert(pted);
+
+ tlb_flush(pm, va & ~PAGE_MASK);
+ if (cache == PMAP_CACHE_CI || cache == PMAP_CACHE_DEV)
+ cpu_idcache_wbinv_range(va & ~PAGE_MASK, PAGE_SIZE);
+}
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ _pmap_kenter_pa(va, pa, prot, prot,
+ (pa & PMAP_NOCACHE) ? PMAP_CACHE_CI : PMAP_CACHE_WB);
+}
+
+void
+pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable)
+{
+ _pmap_kenter_pa(va, pa, prot, prot, cacheable);
+}
+
+/*
+ * remove kernel (pmap_kernel()) mapping, one page
+ */
+void
+pmap_kremove_pg(vaddr_t va)
+{
+ pmap_t pm = pmap_kernel();
+ struct pte_desc *pted;
+ int s;
+
+ pted = pmap_vp_lookup(pm, va, NULL);
+ if (pted == NULL)
+ return;
+
+ if (!PTED_VALID(pted))
+ return; /* not mapped */
+
+ s = splvm();
+
+ pm->pm_stats.resident_count--;
+
+ /*
+ * Table needs to be locked here as well as pmap, and pv list.
+ * so that we know the mapping information is either valid,
+ * or that the mapping is not present in the hash table.
+ */
+ pmap_pte_remove(pted, 0);
+
+ tlb_flush(pm, pted->pted_va & ~PAGE_MASK);
+
+ if (pted->pted_va & PTED_VA_EXEC_M)
+ pted->pted_va &= ~PTED_VA_EXEC_M;
+
+ if (PTED_MANAGED(pted))
+ pmap_remove_pv(pted);
+
+ if (pted->pted_va & PTED_VA_WIRED_M)
+ pm->pm_stats.wired_count--;
+
+ /* invalidate pted; */
+ pted->pted_pte = 0;
+ pted->pted_va = 0;
+
+ splx(s);
+}
+
+/*
+ * remove kernel (pmap_kernel()) mappings
+ */
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len >0; len--, va += PAGE_SIZE)
+ pmap_kremove_pg(va);
+}
+
+void
+pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
+ vm_prot_t prot, int flags, int cache)
+{
+ pted->pted_va = va;
+ pted->pted_pmap = pm;
+
+ switch (cache) {
+ case PMAP_CACHE_WB:
+ break;
+ case PMAP_CACHE_WT:
+ break;
+ case PMAP_CACHE_CI:
+ break;
+ case PMAP_CACHE_DEV:
+ break;
+ default:
+ panic("pmap_fill_pte:invalid cache mode");
+ }
+ pted->pted_va |= cache;
+
+ pted->pted_va |= prot & (PROT_READ|PROT_WRITE|PROT_EXEC);
+
+ if (flags & PMAP_WIRED) {
+ pted->pted_va |= PTED_VA_WIRED_M;
+ pm->pm_stats.wired_count++;
+ }
+
+ pted->pted_pte = pa & PTE_RPGN;
+ pted->pted_pte |= flags & (PROT_READ|PROT_WRITE|PROT_EXEC);
+}
+
+/*
+ * Garbage collects the physical map system for pages which are
+ * no longer used. Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but others may be collected
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(pmap_t pm)
+{
+ /* This could return unused v->p table layers which
+ * are empty.
+ * could malicious programs allocate memory and eat
+ * these wired pages? These are allocated via pool.
+ * Are there pool functions which could be called
+ * to lower the pool usage here?
+ */
+}
+
+/*
+ * Fill the given physical page with zeros.
+ */
+void
+pmap_zero_page(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ vaddr_t va = zero_page + cpu_number() * PAGE_SIZE;
+
+ pmap_kenter_pa(va, pa, PROT_READ|PROT_WRITE);
+ pagezero(va);
+ pmap_kremove_pg(va);
+}
+
+/*
+ * Copy the given physical page.
+ */
+void
+pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
+{
+ paddr_t srcpa = VM_PAGE_TO_PHYS(srcpg);
+ paddr_t dstpa = VM_PAGE_TO_PHYS(dstpg);
+ vaddr_t srcva = copy_src_page + cpu_number() * PAGE_SIZE;
+ vaddr_t dstva = copy_dst_page + cpu_number() * PAGE_SIZE;
+
+ pmap_kenter_pa(srcva, srcpa, PROT_READ);
+ pmap_kenter_pa(dstva, dstpa, PROT_READ|PROT_WRITE);
+ memcpy((void *)dstva, (void *)srcva, PAGE_SIZE);
+ pmap_kremove_pg(srcva);
+ pmap_kremove_pg(dstva);
+}
+
+void
+pmap_pinit(pmap_t pm)
+{
+ struct pmapvp1 *vp1, *kvp1;
+ vaddr_t l1va;
+ uint64_t l1pa;
+
+ /* Allocate a full L1 table. */
+ while (pm->pm_vp.l1 == NULL) {
+ pm->pm_vp.l1 = pool_get(&pmap_vp_pool,
+ PR_WAITOK | PR_ZERO);
+ }
+
+ vp1 = pm->pm_vp.l1; /* top level is l1 */
+ l1va = (vaddr_t)vp1->l1;
+
+ // Fill Kernel Entries
+ kvp1 = pmap_kernel()->pm_vp.l1;
+ memcpy(&vp1->l1[L1_KERN_BASE], &kvp1->l1[L1_KERN_BASE],
+ L1_KERN_ENTRIES * sizeof(pt_entry_t));
+ memcpy(&vp1->vp[L1_KERN_BASE], &kvp1->vp[L1_KERN_BASE],
+ L1_KERN_ENTRIES * sizeof(struct pmapvp2 *));
+
+ // Fill DMAP PTEs
+ memcpy(&vp1->l1[L1_DMAP_BASE], &kvp1->l1[L1_DMAP_BASE],
+ L1_DMAP_ENTRIES * sizeof(pt_entry_t));
+ memcpy(&vp1->vp[L1_DMAP_BASE], &kvp1->vp[L1_DMAP_BASE],
+ L1_DMAP_ENTRIES * sizeof(struct pmapvp2 *));
+
+ pmap_extract(pmap_kernel(), l1va, (paddr_t *)&l1pa);
+ pmap_set_ppn(pm, l1pa);
+ pmap_set_mode(pm);
+ pmap_allocate_asid(pm);
+ pmap_reference(pm);
+}
+
+int pmap_vp_poolcache = 0; /* force vp poolcache to allocate late */
+
+/*
+ * Create and return a physical map.
+ */
+pmap_t
+pmap_create(void)
+{
+ pmap_t pmap;
+
+ pmap = pool_get(&pmap_pmap_pool, PR_WAITOK | PR_ZERO);
+
+ mtx_init(&pmap->pm_mtx, IPL_VM);
+
+ pmap_pinit(pmap);
+ if (pmap_vp_poolcache == 0) {
+ pool_setlowat(&pmap_vp_pool, 20);
+ pmap_vp_poolcache = 20;
+ }
+ return (pmap);
+}
+
+/*
+ * Add a reference to a given pmap.
+ */
+void
+pmap_reference(pmap_t pm)
+{
+ atomic_inc_int(&pm->pm_refs);
+}
+
+/*
+ * Retire the given pmap from service.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_destroy(pmap_t pm)
+{
+ int refs;
+
+ refs = atomic_dec_int_nv(&pm->pm_refs);
+ if (refs > 0)
+ return;
+
+ /*
+ * reference count is zero, free pmap resources and free pmap.
+ */
+ pmap_release(pm);
+ pmap_free_asid(pm);
+ pool_put(&pmap_pmap_pool, pm);
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ */
+void
+pmap_release(pmap_t pm)
+{
+ pmap_vp_destroy(pm);
+}
+
+void
+pmap_vp_destroy_l2_l3(pmap_t pm, struct pmapvp1 *vp1)
+{
+
+}
+
+void
+pmap_vp_destroy(pmap_t pm)
+{
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+ struct pte_desc *pted;
+ int j, k, l;
+
+ vp1 = pm->pm_vp.l1;
+ /*
+ * there is not a separate supervisor and user page table root ?
+ * remove only user page tables
+ */
+ for (j = 0; j < L1_KERN_BASE; j++) {
+ vp2 = vp1->vp[j];
+ if (vp2 == NULL)
+ continue;
+ vp1->vp[j] = NULL;
+
+ for (k = 0; k < VP_IDX2_CNT; k++) {
+ vp3 = vp2->vp[k];
+ if (vp3 == NULL)
+ continue;
+ vp2->vp[k] = NULL;
+
+ for (l = 0; l < VP_IDX3_CNT; l++) {
+ pted = vp3->vp[l];
+ if (pted == NULL)
+ continue;
+ vp3->vp[l] = NULL;
+
+ pool_put(&pmap_pted_pool, pted);
+ }
+ pool_put(&pmap_vp_pool, vp3);
+ }
+ pool_put(&pmap_vp_pool, vp2);
+ }
+ pool_put(&pmap_vp_pool, pm->pm_vp.l1);
+ pm->pm_vp.l1 = NULL;
+ return;
+}
+
+vaddr_t virtual_avail, virtual_end;
+int pmap_virtual_space_called;
+
+static inline pt_entry_t
+VP_Lx(paddr_t pa)
+{
+ /*
+ * This function takes the pa address given and manipulates it
+ * into the form that should be inserted into the VM table.
+ */
+ // NOTE: We always assume the entry is valid. OpenBSD/arm64 uses
+ // the least significant bits to differentiate between PTD / PTE.
+ // In riscv64 Sv39 address translation mode PTD / PTE distinguished
+ // by the lack of PTE_R / PTE_X on an entry with PTE_V set. For both
+ // a PTD and PTE, the PTE_V bit is set.
+ return (((pa & PTE_RPGN) >> PAGE_SHIFT) << PTE_PPN0_S) | PTE_V;
+}
+
+/*
+ * In pmap_bootstrap() we allocate the page tables for the first GB
+ * of the kernel address space.
+ */
+vaddr_t pmap_maxkvaddr = VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024;
+
+/*
+ * Allocator for growing the kernel page tables. We use a dedicated
+ * submap to make sure we have the space to map them as we are called
+ * when address space is tight!
+ */
+
+struct vm_map *pmap_kvp_map;
+
+const struct kmem_va_mode kv_kvp = {
+ .kv_map = &pmap_kvp_map,
+ .kv_wait = 0
+};
+
+void *
+pmap_kvp_alloc(void)
+{
+ void *kvp;
+
+ if (!uvm.page_init_done && !pmap_virtual_space_called) {
+ paddr_t pa[2];
+ vaddr_t va;
+
+ if (!uvm_page_physget(&pa[0]) || !uvm_page_physget(&pa[1]))
+ panic("%s: out of memory", __func__);
+
+ va = virtual_avail;
+ virtual_avail += 2 * PAGE_SIZE;
+ KASSERT(virtual_avail <= pmap_maxkvaddr);
+ kvp = (void *)va;
+
+ pmap_kenter_pa(va, pa[0], PROT_READ|PROT_WRITE);
+ pmap_kenter_pa(va + PAGE_SIZE, pa[1], PROT_READ|PROT_WRITE);
+ pagezero(va);
+ pagezero(va + PAGE_SIZE);
+ } else {
+ kvp = km_alloc(sizeof(struct pmapvp1), &kv_kvp, &kp_zero,
+ &kd_nowait);
+ }
+
+ return kvp;
+}
+
+struct pte_desc *
+pmap_kpted_alloc(void)
+{
+ static struct pte_desc *pted;
+ static int npted;
+
+ if (npted == 0) {
+ if (!uvm.page_init_done && !pmap_virtual_space_called) {
+ paddr_t pa;
+ vaddr_t va;
+
+ if (!uvm_page_physget(&pa))
+ panic("%s: out of memory", __func__);
+
+ va = virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ KASSERT(virtual_avail <= pmap_maxkvaddr);
+ pted = (struct pte_desc *)va;
+
+ pmap_kenter_pa(va, pa, PROT_READ|PROT_WRITE);
+ pagezero(va);
+ } else {
+ pted = km_alloc(PAGE_SIZE, &kv_kvp, &kp_zero,
+ &kd_nowait);
+ if (pted == NULL)
+ return NULL;
+ }
+
+ npted = PAGE_SIZE / sizeof(struct pte_desc);
+ }
+
+ npted--;
+ return pted++;
+}
+
+vaddr_t
+pmap_growkernel(vaddr_t maxkvaddr)
+{
+ // XXX pmap_growkernel must add kernel L1 pages to existing pmaps
+ struct pmapvp1 *vp1 = pmap_kernel()->pm_vp.l1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+ struct pte_desc *pted;
+ paddr_t pa;
+ int lb_idx2, ub_idx2;
+ int i, j, k;
+ int s;
+
+ if (maxkvaddr <= pmap_maxkvaddr)
+ return pmap_maxkvaddr;
+
+ /*
+ * Not strictly necessary, but we use an interrupt-safe map
+ * and uvm asserts that we're at IPL_VM.
+ */
+ s = splvm();
+
+ for (i = VP_IDX1(pmap_maxkvaddr); i <= VP_IDX1(maxkvaddr - 1); i++) {
+ vp2 = vp1->vp[i];
+ if (vp2 == NULL) {
+ vp2 = pmap_kvp_alloc();
+ if (vp2 == NULL)
+ goto fail;
+ pmap_extract(pmap_kernel(), (vaddr_t)vp2, &pa);
+ vp1->vp[i] = vp2;
+ vp1->l1[i] = VP_Lx(pa);
+ }
+
+ if (i == VP_IDX1(pmap_maxkvaddr)) {
+ lb_idx2 = VP_IDX2(pmap_maxkvaddr);
+ } else {
+ lb_idx2 = 0;
+ }
+
+ if (i == VP_IDX1(maxkvaddr - 1)) {
+ ub_idx2 = VP_IDX2(maxkvaddr - 1);
+ } else {
+ ub_idx2 = VP_IDX2_CNT - 1;
+ }
+
+ for (j = lb_idx2; j <= ub_idx2; j++) {
+ vp3 = vp2->vp[j];
+ if (vp3 == NULL) {
+ vp3 = pmap_kvp_alloc();
+ if (vp3 == NULL)
+ goto fail;
+ pmap_extract(pmap_kernel(), (vaddr_t)vp3, &pa);
+ vp2->vp[j] = vp3;
+ vp2->l2[j] = VP_Lx(pa);
+ }
+
+ for (k = 0; k <= VP_IDX3_CNT - 1; k++) {
+ if (vp3->vp[k] == NULL) {
+ pted = pmap_kpted_alloc();
+ if (pted == NULL)
+ goto fail;
+ vp3->vp[k] = pted;
+ pmap_maxkvaddr += PAGE_SIZE;
+ }
+ }
+ }
+ }
+ KASSERT(pmap_maxkvaddr >= maxkvaddr);
+
+fail:
+ splx(s);
+
+ return pmap_maxkvaddr;
+}
+
+void pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo);
+
+/*
+ * Initialize pmap setup.
+ * ALL of the code which deals with avail needs rewritten as an actual
+ * memory allocation.
+ */
+CTASSERT(sizeof(struct pmapvp1) == 2 * PAGE_SIZE);
+
+int mappings_allocated = 0;
+int pted_allocated = 0;
+
+extern char __text_start[], _etext[];
+extern char __rodata_start[], _erodata[];
+
+paddr_t dmap_phys_base;
+paddr_t dmap_phys_max;
+vaddr_t dmap_virt_max;
+
+static void
+pmap_bootstrap_dmap(vaddr_t kern_l1, paddr_t min_pa, paddr_t max_pa)
+{
+ vaddr_t va;
+ paddr_t pa;
+ pt_entry_t *l1;
+ u_int l1_slot;
+ pt_entry_t entry;
+ pn_t pn;
+
+ pa = dmap_phys_base = min_pa & ~L1_OFFSET; // 1 GiB Align
+ va = DMAP_MIN_ADDRESS;
+ l1 = (pt_entry_t *)kern_l1;
+ l1_slot = VP_IDX1(DMAP_MIN_ADDRESS);
+
+ for (; va < DMAP_MAX_ADDRESS && pa < max_pa;
+ pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
+ KASSERT(l1_slot < Ln_ENTRIES);
+
+ /* gigapages */
+ pn = (pa / PAGE_SIZE);
+ entry = PTE_KERN;
+ entry |= (pn << PTE_PPN0_S);
+ atomic_store_64(&l1[l1_slot], entry);
+ }
+
+ /* set the upper limit of the dmap region */
+ dmap_phys_max = pa;
+ dmap_virt_max = va;
+
+ sfence_vma();
+}
+
+vaddr_t
+pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t kernelstart, vaddr_t kernelend,
+ paddr_t fdt_start, paddr_t fdt_end, paddr_t ram_start, paddr_t ram_end)
+{
+ void *va;
+ paddr_t pa, pt1pa;
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+ struct pte_desc *pted;
+ vaddr_t vstart;
+ int i, j, k;
+ int lb_idx2, ub_idx2;
+
+ pmap_setup_avail(ram_start, ram_end, kvo);
+
+ /*
+ * in theory we could start with just the memory in the
+ * kernel, however this could 'allocate' the bootloader and
+ * bootstrap vm table, which we may need to preserve until
+ * later.
+ */
+ printf("removing %lx-%lx\n", ram_start, kernelstart+kvo);
+ pmap_remove_avail(ram_start, kernelstart+kvo);
+
+ printf("removing %lx-%lx\n", kernelstart+kvo, kernelend+kvo);
+ pmap_remove_avail(kernelstart+kvo, kernelend+kvo);
+
+ // Remove the FDT physical address range as well
+ printf("removing %lx-%lx\n", fdt_start+kvo, fdt_end+kvo);
+ pmap_remove_avail(fdt_start, fdt_end);
+
+ /*
+ * KERNEL IS ASSUMED TO BE 39 bits (or less), start from L1,
+ * not L0 ALSO kernel mappings may not cover enough ram to
+ * bootstrap so all accesses initializing tables must be done
+ * via physical pointers
+ */
+
+ // Map the entire Physical Address Space to Direct Mapped Region
+ pmap_bootstrap_dmap(l1pt, ram_start, ram_end);
+
+ pt1pa = pmap_steal_avail(2 * sizeof(struct pmapvp1), Lx_TABLE_ALIGN,
+ &va);
+ vp1 = (struct pmapvp1 *) PHYS_TO_DMAP(pt1pa);
+ pmap_kernel()->pm_vp.l1 = (struct pmapvp1 *)va;
+ pmap_kernel()->pm_privileged = 1;
+ pmap_kernel()->pm_satp = SATP_MODE_SV39 | /* ASID = 0 */
+ ((PPN(pt1pa) & SATP_PPN_MASK) << SATP_PPN_SHIFT);
+
+ // XXX Trampoline
+ pmap_tramp.pm_vp.l1 = (struct pmapvp1 *)va + 1;
+ pmap_tramp.pm_privileged = 1;
+ pmap_tramp.pm_satp = SATP_MODE_SV39; /* ASID = 0 */
+ /* pmap_tramp ppn initialized in pmap_postinit */
+
+ /* allocate memory (in unit of pages) for l2 and l3 page table */
+ for (i = VP_IDX1(VM_MIN_KERNEL_ADDRESS);
+ i <= VP_IDX1(pmap_maxkvaddr - 1);
+ i++) {
+ mappings_allocated++;
+ pa = pmap_steal_avail(sizeof(struct pmapvp2), Lx_TABLE_ALIGN,
+ &va);
+ vp2 = (struct pmapvp2 *) PHYS_TO_DMAP(pa);
+ vp1->vp[i] = va;
+ vp1->l1[i] = VP_Lx(pa);
+
+ if (i == VP_IDX1(VM_MIN_KERNEL_ADDRESS)) {
+ lb_idx2 = VP_IDX2(VM_MIN_KERNEL_ADDRESS);
+ } else {
+ lb_idx2 = 0;
+ }
+ if (i == VP_IDX1(pmap_maxkvaddr - 1)) {
+ ub_idx2 = VP_IDX2(pmap_maxkvaddr - 1);
+ } else {
+ ub_idx2 = VP_IDX2_CNT - 1;
+ }
+ for (j = lb_idx2; j <= ub_idx2; j++) {
+ mappings_allocated++;
+ pa = pmap_steal_avail(sizeof(struct pmapvp3),
+ Lx_TABLE_ALIGN, &va);
+ vp3 = (struct pmapvp3 *) PHYS_TO_DMAP(pa);
+ vp2->vp[j] = va;
+ vp2->l2[j] = VP_Lx(pa);
+ }
+ }
+ /* allocate memory for pte_desc */
+ for (i = VP_IDX1(VM_MIN_KERNEL_ADDRESS);
+ i <= VP_IDX1(pmap_maxkvaddr - 1);
+ i++) {
+ vp2 = (void *) PHYS_TO_DMAP((long)vp1->vp[i] + kvo);
+
+ if (i == VP_IDX1(VM_MIN_KERNEL_ADDRESS)) {
+ lb_idx2 = VP_IDX2(VM_MIN_KERNEL_ADDRESS);
+ } else {
+ lb_idx2 = 0;
+ }
+ if (i == VP_IDX1(pmap_maxkvaddr - 1)) {
+ ub_idx2 = VP_IDX2(pmap_maxkvaddr - 1);
+ } else {
+ ub_idx2 = VP_IDX2_CNT - 1;
+ }
+ for (j = lb_idx2; j <= ub_idx2; j++) {
+ vp3 = (void *) PHYS_TO_DMAP((long)vp2->vp[j] + kvo);
+
+ for (k = 0; k <= VP_IDX3_CNT - 1; k++) {
+ pted_allocated++;
+ pa = pmap_steal_avail(sizeof(struct pte_desc),
+ 4, &va);
+ pted = va;
+ vp3->vp[k] = pted;
+ }
+ }
+ }
+
+ /* now that we have mapping-space for everything, lets map it */
+ /* all of these mappings are ram -> kernel va */
+
+#if 0 // XXX This block does not appear to do anything useful?
+ /*
+ * enable mappings for existing 'allocated' mapping in the bootstrap
+ * page tables
+ */
+ extern pt_entry_t *pagetable_l2;
+ extern char _end[];
+ vp2 = (void *) PHYS_TO_DMAP((long)&pagetable_l2 + kvo);
+ struct mem_region *mp;
+ ssize_t size;
+ for (mp = pmap_allocated; mp->size != 0; mp++) {
+ /* bounds may be kinda messed up */
+ for (pa = mp->start, size = mp->size & ~(PAGE_SIZE-1);
+ size > 0;
+ pa+= L2_SIZE, size -= L2_SIZE)
+ {
+ paddr_t mappa = pa & ~(L2_SIZE-1);
+ vaddr_t mapva = mappa - kvo;
+ int prot = PROT_READ | PROT_WRITE;
+
+ if (mapva < (vaddr_t)_end)
+ continue;
+
+ if (mapva >= (vaddr_t)__text_start &&
+ mapva < (vaddr_t)_etext)
+ prot = PROT_READ | PROT_EXEC;
+ else if (mapva >= (vaddr_t)__rodata_start &&
+ mapva < (vaddr_t)_erodata)
+ prot = PROT_READ;
+
+ // XXX What does ATTR_nG in arm64 mean?
+ vp2->l2[VP_IDX2(mapva)] = VP_Lx(mappa) |
+ ap_bits_kern[prot];
+ }
+ }
+#endif
+
+ pmap_avail_fixup();
+
+ /*
+ * At this point we are still running on the bootstrap page
+ * tables however all memory for the final page tables is
+ * 'allocated' and should now be mapped. This means we are
+ * able to use the virtual addressing to populate the final
+ * mappings into the new mapping tables.
+ */
+ vstart = pmap_map_stolen(kernelstart);
+
+ // Include the Direct Map in Kernel PMAP
+ // as gigapages, only populated the pmapvp1->l1 field,
+ // pmap->va field is not used
+ pmap_bootstrap_dmap((vaddr_t) pmap_kernel()->pm_vp.l1, ram_start, ram_end);
+
+ //switching to new page table
+ uint64_t satp = pmap_kernel()->pm_satp;
+ __asm __volatile("csrw satp, %0" :: "r" (satp) : "memory");
+
+ printf("all mapped\n");
+
+ curcpu()->ci_curpm = pmap_kernel();
+
+ vmmap = vstart;
+ vstart += PAGE_SIZE;
+
+ return vstart;
+}
+
+#if 0 // XXX Not necessary without 4-Level PT
+void
+pmap_set_l1(struct pmap *pm, uint64_t va, struct pmapvp1 *l1_va, paddr_t l1_pa)
+{
+ pt_entry_t pg_entry;
+ int idx0;
+
+ if (l1_pa == 0) {
+ /*
+ * if this is called from pmap_vp_enter, this is a
+ * normally mapped page, call pmap_extract to get pa
+ */
+ pmap_extract(pmap_kernel(), (vaddr_t)l1_va, &l1_pa);
+ }
+
+ if (l1_pa & (Lx_TABLE_ALIGN-1))
+ panic("misaligned L2 table\n");
+
+ pg_entry = VP_Lx(l1_pa);
+
+ idx0 = VP_IDX0(va);
+ pm->pm_vp.l0->vp[idx0] = l1_va;
+ pm->pm_vp.l0->l0[idx0] = pg_entry;
+}
+#endif
+
+void
+pmap_set_l2(struct pmap *pm, uint64_t va, struct pmapvp2 *l2_va, paddr_t l2_pa)
+{
+ pt_entry_t pg_entry;
+ struct pmapvp1 *vp1;
+ int idx0, idx1;
+
+ if (l2_pa == 0) {
+ /*
+ * if this is called from pmap_vp_enter, this is a
+ * normally mapped page, call pmap_extract to get pa
+ */
+ pmap_extract(pmap_kernel(), (vaddr_t)l2_va, &l2_pa);
+ }
+
+ if (l2_pa & (Lx_TABLE_ALIGN-1))
+ panic("misaligned L2 table\n");
+
+ pg_entry = VP_Lx(l2_pa);
+
+ idx0 = VP_IDX0(va);
+ idx1 = VP_IDX1(va);
+ vp1 = pm->pm_vp.l1;
+ vp1->vp[idx1] = l2_va;
+ vp1->l1[idx1] = pg_entry;
+}
+
+void
+pmap_set_l3(struct pmap *pm, uint64_t va, struct pmapvp3 *l3_va, paddr_t l3_pa)
+{
+ pt_entry_t pg_entry;
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ int idx1, idx2;
+
+ if (l3_pa == 0) {
+ /*
+ * if this is called from pmap_vp_enter, this is a
+ * normally mapped page, call pmap_extract to get pa
+ */
+ pmap_extract(pmap_kernel(), (vaddr_t)l3_va, &l3_pa);
+ }
+
+ if (l3_pa & (Lx_TABLE_ALIGN-1))
+ panic("misaligned L2 table\n");
+
+ pg_entry = VP_Lx(l3_pa);
+
+ idx1 = VP_IDX1(va);
+ idx2 = VP_IDX2(va);
+ vp1 = pm->pm_vp.l1;
+ vp2 = vp1->vp[idx1];
+ vp2->vp[idx2] = l3_va;
+ vp2->l2[idx2] = pg_entry;
+}
+
+/*
+ * activate a pmap entry
+ */
+void
+pmap_activate(struct proc *p)
+{
+ pmap_t pm = p->p_vmspace->vm_map.pmap;
+ int sie;
+
+ sie = disable_interrupts();
+ if (p == curproc && pm != curcpu()->ci_curpm)
+ pmap_set_satp(p);
+ restore_interrupts(sie);
+}
+
+/*
+ * deactivate a pmap entry
+ */
+void
+pmap_deactivate(struct proc *p)
+{
+}
+
+/*
+ * Get the physical page address for the given pmap/virtual address.
+ */
+boolean_t
+pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pa)
+{
+ struct pte_desc *pted;
+
+ pted = pmap_vp_lookup(pm, va, NULL);
+
+ if (pted == NULL)
+ return FALSE;
+
+ if (pted->pted_pte == 0)
+ return FALSE;
+
+ if (pa != NULL)
+ *pa = (pted->pted_pte & PTE_RPGN) | (va & PAGE_MASK);
+
+ return TRUE;
+}
+
+void
+pmap_page_ro(pmap_t pm, vaddr_t va, vm_prot_t prot)
+{
+ struct pte_desc *pted;
+ pt_entry_t *pl3;
+
+ /* Every VA needs a pted, even unmanaged ones. */
+ pted = pmap_vp_lookup(pm, va, &pl3);
+ if (!pted || !PTED_VALID(pted)) {
+ return;
+ }
+
+ pted->pted_va &= ~PROT_WRITE;
+ pted->pted_pte &= ~PROT_WRITE;
+ if ((prot & PROT_EXEC) == 0) {
+ pted->pted_va &= ~PROT_EXEC;
+ pted->pted_pte &= ~PROT_EXEC;
+ }
+ pmap_pte_update(pted, pl3);
+
+ tlb_flush(pm, pted->pted_va & ~PAGE_MASK);
+
+ return;
+}
+
+/*
+ * Lower the protection on the specified physical page.
+ *
+ * There are only two cases, either the protection is going to 0,
+ * or it is going to read-only.
+ */
+void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+ struct pte_desc *pted;
+ struct pmap *pm;
+
+ if (prot != PROT_NONE) {
+ mtx_enter(&pg->mdpage.pv_mtx);
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
+ pmap_page_ro(pted->pted_pmap, pted->pted_va, prot);
+ }
+ mtx_leave(&pg->mdpage.pv_mtx);
+ }
+
+ mtx_enter(&pg->mdpage.pv_mtx);
+ while ((pted = LIST_FIRST(&(pg->mdpage.pv_list))) != NULL) {
+ pmap_reference(pted->pted_pmap);
+ pm = pted->pted_pmap;
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ pmap_lock(pm);
+
+ /*
+ * We dropped the pvlist lock before grabbing the pmap
+ * lock to avoid lock ordering problems. This means
+ * we have to check the pvlist again since somebody
+ * else might have modified it. All we care about is
+ * that the pvlist entry matches the pmap we just
+ * locked. If it doesn't, unlock the pmap and try
+ * again.
+ */
+ mtx_enter(&pg->mdpage.pv_mtx);
+ pted = LIST_FIRST(&(pg->mdpage.pv_list));
+ if (pted == NULL || pted->pted_pmap != pm) {
+ mtx_leave(&pg->mdpage.pv_mtx);
+ pmap_unlock(pm);
+ pmap_destroy(pm);
+ mtx_enter(&pg->mdpage.pv_mtx);
+ continue;
+ }
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ pmap_remove_pted(pm, pted);
+ pmap_unlock(pm);
+ pmap_destroy(pm);
+
+ mtx_enter(&pg->mdpage.pv_mtx);
+ }
+ /* page is being reclaimed, sync icache next use */
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
+ mtx_leave(&pg->mdpage.pv_mtx);
+}
+
+void
+pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
+{
+ if (prot & (PROT_READ | PROT_EXEC)) {
+ pmap_lock(pm);
+ while (sva < eva) {
+ pmap_page_ro(pm, sva, prot);
+ sva += PAGE_SIZE;
+ }
+ pmap_unlock(pm);
+ return;
+ }
+ pmap_remove(pm, sva, eva);
+}
+
+void
+pmap_init(void)
+{
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, IPL_NONE, 0,
+ "pmap", NULL);
+ pool_setlowat(&pmap_pmap_pool, 2);
+ pool_init(&pmap_pted_pool, sizeof(struct pte_desc), 0, IPL_VM, 0,
+ "pted", NULL);
+ pool_setlowat(&pmap_pted_pool, 20);
+ pool_init(&pmap_vp_pool, sizeof(struct pmapvp1), PAGE_SIZE, IPL_VM, 0,
+ "vp", &pmap_vp_allocator);
+ pool_setlowat(&pmap_vp_pool, 20);
+
+ pmap_initialized = 1;
+}
+
+void
+pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len)
+{
+ struct pmap *pm = vm_map_pmap(&pr->ps_vmspace->vm_map);
+ vaddr_t kva = zero_page + cpu_number() * PAGE_SIZE;
+ paddr_t pa;
+ vsize_t clen;
+ vsize_t off;
+
+ /*
+ * If we're caled for the current processes, we can simply
+ * flush the data cache to the point of unification and
+ * invalidate the instruction cache.
+ */
+ if (pr == curproc->p_p) {
+ cpu_icache_sync_range(va, len);
+ return;
+ }
+
+ /*
+ * Flush and invalidate through an aliased mapping. This
+ * assumes the instruction cache is PIPT. That is only true
+ * for some of the hardware we run on.
+ */
+ while (len > 0) {
+ /* add one to always round up to the next page */
+ clen = round_page(va + 1) - va;
+ if (clen > len)
+ clen = len;
+
+ off = va - trunc_page(va);
+ if (pmap_extract(pm, trunc_page(va), &pa)) {
+ pmap_kenter_pa(kva, pa, PROT_READ|PROT_WRITE);
+ cpu_icache_sync_range(kva + off, clen);
+ pmap_kremove_pg(kva);
+ }
+
+ len -= clen;
+ va += clen;
+ }
+}
+
+void
+pmap_pte_insert(struct pte_desc *pted)
+{
+ /* put entry into table */
+ /* need to deal with ref/change here */
+ pmap_t pm = pted->pted_pmap;
+ pt_entry_t *pl3;
+
+ if (pmap_vp_lookup(pm, pted->pted_va, &pl3) == NULL) {
+ panic("%s: have a pted, but missing a vp"
+ " for %lx va pmap %p", __func__, pted->pted_va, pm);
+ }
+
+ pmap_pte_update(pted, pl3);
+}
+
+void
+pmap_pte_update(struct pte_desc *pted, uint64_t *pl3)
+{
+ pt_entry_t pte, access_bits;
+ pmap_t pm = pted->pted_pmap;
+#if 0 // XXX Attributes specific to arm64? Does riscv64 have equivalent?
+ uint64_t attr = ATTR_nG;
+
+ /* see mair in locore.S */
+ switch (pted->pted_va & PMAP_CACHE_BITS) {
+ case PMAP_CACHE_WB:
+ /* inner and outer writeback */
+ attr |= ATTR_IDX(PTE_ATTR_WB);
+ attr |= ATTR_SH(SH_INNER);
+ break;
+ case PMAP_CACHE_WT:
+ /* inner and outer writethrough */
+ attr |= ATTR_IDX(PTE_ATTR_WT);
+ attr |= ATTR_SH(SH_INNER);
+ break;
+ case PMAP_CACHE_CI:
+ attr |= ATTR_IDX(PTE_ATTR_CI);
+ attr |= ATTR_SH(SH_INNER);
+ break;
+ case PMAP_CACHE_DEV:
+ attr |= ATTR_IDX(PTE_ATTR_DEV);
+ attr |= ATTR_SH(SH_INNER);
+ break;
+ default:
+ panic("pmap_pte_insert: invalid cache mode");
+ }
+#endif
+
+ if (pm->pm_privileged)
+ access_bits = ap_bits_kern[pted->pted_pte & PROT_MASK];
+ else
+ access_bits = ap_bits_user[pted->pted_pte & PROT_MASK];
+
+ pte = VP_Lx(pted->pted_pte) | access_bits | PTE_V;
+ *pl3 = pte;
+}
+
+void
+pmap_pte_remove(struct pte_desc *pted, int remove_pted)
+{
+ /* put entry into table */
+ /* need to deal with ref/change here */
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+ pmap_t pm = pted->pted_pmap;
+
+ vp1 = pm->pm_vp.l1;
+ if (vp1->vp[VP_IDX1(pted->pted_va)] == NULL) {
+ panic("have a pted, but missing the l2 for %lx va pmap %p",
+ pted->pted_va, pm);
+ }
+ vp2 = vp1->vp[VP_IDX1(pted->pted_va)];
+ if (vp2 == NULL) {
+ panic("have a pted, but missing the l2 for %lx va pmap %p",
+ pted->pted_va, pm);
+ }
+ vp3 = vp2->vp[VP_IDX2(pted->pted_va)];
+ if (vp3 == NULL) {
+ panic("have a pted, but missing the l2 for %lx va pmap %p",
+ pted->pted_va, pm);
+ }
+ vp3->l3[VP_IDX3(pted->pted_va)] = 0;
+ if (remove_pted)
+ vp3->vp[VP_IDX3(pted->pted_va)] = NULL;
+
+ tlb_flush(pm, pted->pted_va);
+}
+
+/*
+ * This function exists to do software referenced/modified emulation.
+ * It's purpose is to tell the caller that a fault was generated either
+ * for this emulation, or to tell the caller that it's a legit fault.
+ */
+int
+pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
+{
+ struct pte_desc *pted;
+ struct vm_page *pg;
+ paddr_t pa;
+ pt_entry_t *pl3 = NULL;
+ int need_sync = 0;
+ int retcode = 0;
+
+ pmap_lock(pm);
+
+ /* Every VA needs a pted, even unmanaged ones. */
+ pted = pmap_vp_lookup(pm, va, &pl3);
+ if (!pted || !PTED_VALID(pted))
+ goto done;
+
+ /* There has to be a PA for the VA, get it. */
+ pa = (pted->pted_pte & PTE_RPGN);
+
+ /* If it's unmanaged, it must not fault. */
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg == NULL)
+ goto done;
+
+ /*
+ * Check based on fault type for mod/ref emulation.
+ * if L3 entry is zero, it is not a possible fixup
+ */
+ if (*pl3 == 0)
+ goto done;
+
+ /*
+ * Check the fault types to find out if we were doing
+ * any mod/ref emulation and fixup the PTE if we were.
+ */
+ if ((ftype & PROT_WRITE) && /* fault caused by a write */
+ !(pted->pted_pte & PROT_WRITE) && /* and write is disabled now */
+ (pted->pted_va & PROT_WRITE)) { /* but is supposedly allowed */
+
+ /*
+ * Page modified emulation. A write always includes
+ * a reference. This means that we can enable read and
+ * exec as well, akin to the page reference emulation.
+ */
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_MOD|PG_PMAP_REF);
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
+
+ /* Thus, enable read, write and exec. */
+ pted->pted_pte |=
+ (pted->pted_va & (PROT_READ|PROT_WRITE|PROT_EXEC));
+ } else if ((ftype & PROT_EXEC) && /* fault caused by an exec */
+ !(pted->pted_pte & PROT_EXEC) && /* and exec is disabled now */
+ (pted->pted_va & PROT_EXEC)) { /* but is supposedly allowed */
+
+ /*
+ * Exec always includes a reference. Since we now know
+ * the page has been accesed, we can enable read as well
+ * if UVM allows it.
+ */
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_REF);
+
+ /* Thus, enable read and exec. */
+ pted->pted_pte |= (pted->pted_va & (PROT_READ|PROT_EXEC));
+ } else if ((ftype & PROT_READ) && /* fault caused by a read */
+ !(pted->pted_pte & PROT_READ) && /* and read is disabled now */
+ (pted->pted_va & PROT_READ)) { /* but is supposedly allowed */
+
+ /*
+ * Page referenced emulation. Since we now know the page
+ * has been accessed, we can enable exec as well if UVM
+ * allows it.
+ */
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_REF);
+
+ /* Thus, enable read and exec. */
+ pted->pted_pte |= (pted->pted_va & (PROT_READ|PROT_EXEC));
+ } else {
+ /* didn't catch it, so probably broken */
+ goto done;
+ }
+
+ /* We actually made a change, so flush it and sync. */
+ pmap_pte_update(pted, pl3);
+
+ /* Flush tlb. */
+ tlb_flush(pm, va & ~PAGE_MASK);
+
+ /*
+ * If this is a page that can be executed, make sure to invalidate
+ * the instruction cache if the page has been modified or not used
+ * yet.
+ */
+ if (pted->pted_va & PROT_EXEC) {
+ need_sync = ((pg->pg_flags & PG_PMAP_EXE) == 0);
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_EXE);
+ if (need_sync)
+ cpu_icache_sync_range(va & ~PAGE_MASK, PAGE_SIZE);
+ }
+
+ retcode = 1;
+done:
+ pmap_unlock(pm);
+ return retcode;
+}
+
+void
+pmap_postinit(void)
+{
+#if 0 // XXX Trampoline Vectors
+ extern char trampoline_vectors[];
+ paddr_t pa;
+#endif
+ vaddr_t minaddr, maxaddr;
+ u_long npteds, npages;
+
+ memset(pmap_tramp.pm_vp.l1, 0, sizeof(struct pmapvp1));
+#if 0 // XXX Trampoline Vectors
+ pmap_extract(pmap_kernel(), (vaddr_t)trampoline_vectors, &pa);
+ pmap_enter(&pmap_tramp, (vaddr_t)trampoline_vectors, pa,
+ PROT_READ | PROT_EXEC, PROT_READ | PROT_EXEC | PMAP_WIRED);
+#endif
+
+ /*
+ * Reserve enough virtual address space to grow the kernel
+ * page tables. We need a descriptor for each page as well as
+ * an extra page for level 1/2/3 page tables for management.
+ * To simplify the code, we always allocate full tables at
+ * level 3, so take that into account.
+ */
+ npteds = (VM_MAX_KERNEL_ADDRESS - pmap_maxkvaddr + 1) / PAGE_SIZE;
+ npteds = roundup(npteds, VP_IDX3_CNT);
+ npages = howmany(npteds, PAGE_SIZE / (sizeof(struct pte_desc)));
+ npages += 2 * howmany(npteds, VP_IDX3_CNT);
+ npages += 2 * howmany(npteds, VP_IDX3_CNT * VP_IDX2_CNT);
+ npages += 2 * howmany(npteds, VP_IDX3_CNT * VP_IDX2_CNT * VP_IDX1_CNT);
+
+ /*
+ * Use an interrupt safe map such that we don't recurse into
+ * uvm_map() to allocate map entries.
+ */
+ minaddr = vm_map_min(kernel_map);
+ pmap_kvp_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ npages * PAGE_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
+}
+
+void
+pmap_update(pmap_t pm)
+{
+}
+
+int
+pmap_is_referenced(struct vm_page *pg)
+{
+ return ((pg->pg_flags & PG_PMAP_REF) != 0);
+}
+
+int
+pmap_is_modified(struct vm_page *pg)
+{
+ return ((pg->pg_flags & PG_PMAP_MOD) != 0);
+}
+
+int
+pmap_clear_modify(struct vm_page *pg)
+{
+ struct pte_desc *pted;
+ pt_entry_t *pl3 = NULL;
+
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_MOD);
+
+ mtx_enter(&pg->mdpage.pv_mtx);
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
+ if (pmap_vp_lookup(pted->pted_pmap, pted->pted_va & ~PAGE_MASK, &pl3) == NULL)
+ panic("failed to look up pte\n");
+ *pl3 &= ~PTE_W;
+ pted->pted_pte &= ~PROT_WRITE;
+
+ tlb_flush(pted->pted_pmap, pted->pted_va & ~PAGE_MASK);
+ }
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ return 0;
+}
+
+/*
+ * When this turns off read permissions it also disables write permissions
+ * so that mod is correctly tracked after clear_ref; FAULT_READ; FAULT_WRITE;
+ */
+int
+pmap_clear_reference(struct vm_page *pg)
+{
+ struct pte_desc *pted;
+
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_REF);
+
+ mtx_enter(&pg->mdpage.pv_mtx);
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
+ pted->pted_pte &= ~PROT_MASK;
+ pmap_pte_insert(pted);
+ tlb_flush(pted->pted_pmap, pted->pted_va & ~PAGE_MASK);
+ }
+ mtx_leave(&pg->mdpage.pv_mtx);
+
+ return 0;
+}
+
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
+ vsize_t len, vaddr_t src_addr)
+{
+ /* NOOP */
+}
+
+void
+pmap_unwire(pmap_t pm, vaddr_t va)
+{
+ struct pte_desc *pted;
+
+ pted = pmap_vp_lookup(pm, va, NULL);
+ if ((pted != NULL) && (pted->pted_va & PTED_VA_WIRED_M)) {
+ pm->pm_stats.wired_count--;
+ pted->pted_va &= ~PTED_VA_WIRED_M;
+ }
+}
+
+void
+pmap_remove_holes(struct vmspace *vm)
+{
+ /* NOOP */
+}
+
+void
+pmap_virtual_space(vaddr_t *start, vaddr_t *end)
+{
+ *start = virtual_avail;
+ *end = virtual_end;
+
+ /* Prevent further KVA stealing. */
+ pmap_virtual_space_called = 1;
+}
+
+void
+pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo)
+{
+ /* This makes several assumptions
+ * 1) kernel will be located 'low' in memory
+ * 2) memory will not start at VM_MIN_KERNEL_ADDRESS
+ * 3) several MB of memory starting just after the kernel will
+ * be premapped at the kernel address in the bootstrap mappings
+ * 4) kvo will be the 64 bit number to add to the ram address to
+ * obtain the kernel virtual mapping of the ram. KVO == PA -> VA
+ * 5) it is generally assumed that these translations will occur with
+ * large granularity, at minimum the translation will be page
+ * aligned, if not 'section' or greater.
+ */
+
+ pmap_avail_kvo = kvo;
+ pmap_avail[0].start = ram_start;
+ pmap_avail[0].size = ram_end-ram_start;
+
+ /* XXX - multiple sections */
+ physmem = atop(pmap_avail[0].size);
+
+ pmap_cnt_avail = 1;
+
+ pmap_avail_fixup();
+}
+
+void
+pmap_avail_fixup(void)
+{
+ struct mem_region *mp;
+ vaddr_t align;
+ vaddr_t end;
+
+ mp = pmap_avail;
+ while(mp->size !=0) {
+ align = round_page(mp->start);
+ if (mp->start != align) {
+ pmap_remove_avail(mp->start, align);
+ mp = pmap_avail;
+ continue;
+ }
+ end = mp->start+mp->size;
+ align = trunc_page(end);
+ if (end != align) {
+ pmap_remove_avail(align, end);
+ mp = pmap_avail;
+ continue;
+ }
+ mp++;
+ }
+}
+
+/* remove a given region from avail memory */
+void
+pmap_remove_avail(paddr_t base, paddr_t end)
+{
+ struct mem_region *mp;
+ int i;
+ long mpend;
+
+ /* remove given region from available */
+ for (mp = pmap_avail; mp->size; mp++) {
+ /*
+ * Check if this region holds all of the region
+ */
+ mpend = mp->start + mp->size;
+ if (base > mpend) {
+ continue;
+ }
+ if (base <= mp->start) {
+ if (end <= mp->start)
+ break; /* region not present -??? */
+
+ if (end >= mpend) {
+ /* covers whole region */
+ /* shorten */
+ for (i = mp - pmap_avail;
+ i < pmap_cnt_avail;
+ i++) {
+ pmap_avail[i] = pmap_avail[i+1];
+ }
+ pmap_cnt_avail--;
+ pmap_avail[pmap_cnt_avail].size = 0;
+ } else {
+ mp->start = end;
+ mp->size = mpend - end;
+ }
+ } else {
+ /* start after the beginning */
+ if (end >= mpend) {
+ /* just truncate */
+ mp->size = base - mp->start;
+ } else {
+ /* split */
+ for (i = pmap_cnt_avail;
+ i > (mp - pmap_avail);
+ i--) {
+ pmap_avail[i] = pmap_avail[i - 1];
+ }
+ pmap_cnt_avail++;
+ mp->size = base - mp->start;
+ mp++;
+ mp->start = end;
+ mp->size = mpend - end;
+ }
+ }
+ }
+ for (mp = pmap_allocated; mp->size != 0; mp++) {
+ if (base < mp->start) {
+ if (end == mp->start) {
+ mp->start = base;
+ mp->size += end - base;
+ break;
+ }
+ /* lengthen */
+ for (i = pmap_cnt_allocated; i > (mp - pmap_allocated);
+ i--) {
+ pmap_allocated[i] = pmap_allocated[i - 1];
+ }
+ pmap_cnt_allocated++;
+ mp->start = base;
+ mp->size = end - base;
+ return;
+ }
+ if (base == (mp->start + mp->size)) {
+ mp->size += end - base;
+ return;
+ }
+ }
+ if (mp->size == 0) {
+ mp->start = base;
+ mp->size = end - base;
+ pmap_cnt_allocated++;
+ }
+}
+
+/* XXX - this zeros pages via their physical address */
+paddr_t
+pmap_steal_avail(size_t size, int align, void **kva)
+{
+ struct mem_region *mp;
+ long start;
+ long remsize;
+
+ for (mp = pmap_avail; mp->size; mp++) {
+ if (mp->size > size) {
+ start = (mp->start + (align -1)) & ~(align -1);
+ remsize = mp->size - (start - mp->start);
+ if (remsize >= 0) {//XXX buggy?? should be remsize >= size
+ pmap_remove_avail(start, start+size);
+ if (kva != NULL){
+ *kva = (void *)(start - pmap_avail_kvo);
+ }
+ // XXX We clear the page based on its Direct
+ // Mapped address for now. Physical Addresses
+ // are not available because we have unmapped
+ // our identity mapped kernel. Should consider
+ // if we need to keep the identity mapping
+ // during pmap bootstrapping.
+ vaddr_t start_dmap = PHYS_TO_DMAP(start);
+ bzero((void*)(start_dmap), size);
+ return start;
+ }
+ }
+ }
+ panic ("unable to allocate region with size %lx align %x",
+ size, align);
+}
+
+vaddr_t
+pmap_map_stolen(vaddr_t kernel_start)
+{
+ struct mem_region *mp;
+ paddr_t pa;
+ vaddr_t va;
+ uint64_t e;
+
+ for (mp = pmap_allocated; mp->size; mp++) {
+ for (e = 0; e < mp->size; e += PAGE_SIZE) {
+ int prot = PROT_READ | PROT_WRITE;
+
+ pa = mp->start + e;
+ va = pa - pmap_avail_kvo;
+
+ if (va < VM_MIN_KERNEL_ADDRESS ||
+ va >= VM_MAX_KERNEL_ADDRESS)
+ continue;
+
+ if (va >= (vaddr_t)__text_start &&
+ va < (vaddr_t)_etext)
+ prot = PROT_READ | PROT_EXEC;
+ else if (va >= (vaddr_t)__rodata_start &&
+ va < (vaddr_t)_erodata)
+ prot = PROT_READ;
+
+ pmap_kenter_cache(va, pa, prot, PMAP_CACHE_WB);
+ }
+ }
+
+ return va + PAGE_SIZE;
+}
+
+void
+pmap_physload_avail(void)
+{
+ struct mem_region *mp;
+ uint64_t start, end;
+
+ for (mp = pmap_avail; mp->size; mp++) {
+ if (mp->size < PAGE_SIZE) {
+ printf(" skipped - too small\n");
+ continue;
+ }
+ start = mp->start;
+ if (start & PAGE_MASK) {
+ start = PAGE_SIZE + (start & PMAP_PA_MASK);
+ }
+ end = mp->start + mp->size;
+ if (end & PAGE_MASK) {
+ end = (end & PMAP_PA_MASK);
+ }
+ uvm_page_physload(atop(start), atop(end),
+ atop(start), atop(end), 0);
+
+ }
+}
+
+void
+pmap_show_mapping(uint64_t va)
+{
+ struct pmapvp1 *vp1;
+ struct pmapvp2 *vp2;
+ struct pmapvp3 *vp3;
+ struct pte_desc *pted;
+ struct pmap *pm;
+ uint64_t satp;
+
+ printf("showing mapping of %llx\n", va);
+
+ if (va & 1ULL << 63)
+ pm = pmap_kernel();
+ else
+ pm = curproc->p_vmspace->vm_map.pmap;
+
+ vp1 = pm->pm_vp.l1;
+
+ __asm volatile ("csrr %0, satp" : "=r" (satp));
+ printf(" satp %llx %llx\n", satp, SATP_PPN(pm->pm_satp) << PAGE_SHIFT);
+ printf(" vp1 = %p\n", vp1);
+
+ vp2 = vp1->vp[VP_IDX1(va)];
+ printf(" vp2 = %p lp2 = %llx idx1 off %x\n",
+ vp2, vp1->l1[VP_IDX1(va)], VP_IDX1(va)*8);
+ if (vp2 == NULL)
+ return;
+
+ vp3 = vp2->vp[VP_IDX2(va)];
+ printf(" vp3 = %p lp3 = %llx idx2 off %x\n",
+ vp3, vp2->l2[VP_IDX2(va)], VP_IDX2(va)*8);
+ if (vp3 == NULL)
+ return;
+
+ pted = vp3->vp[VP_IDX3(va)];
+ printf(" pted = %p lp3 = %llx idx3 off %x\n",
+ pted, vp3->l3[VP_IDX3(va)], VP_IDX3(va)*8);
+}
+
+static __inline void
+pmap_set_ppn(pmap_t pm, paddr_t pa) {
+ ((pm)->pm_satp |= SATP_FORMAT_PPN(PPN(pa)));
+}
+
+static __inline void
+pmap_set_mode(pmap_t pm) {
+ // Always using Sv39
+ // XXX Support 4-level PT
+ ((pm)->pm_satp |= SATP_MODE_SV39);
+}
+
+/*
+ * We allocate ASIDs in pairs. The first ASID is used to run the
+ * kernel and has both userland and the full kernel mapped. The
+ * second ASID is used for running userland and has only the
+ * trampoline page mapped in addition to userland.
+ */
+
+#define NUM_ASID (1 << 16)
+uint32_t pmap_asid[NUM_ASID / 32];
+
+void
+pmap_allocate_asid(pmap_t pm)
+{
+ uint32_t bits;
+ int asid, bit;
+
+ for (;;) {
+ do {
+ asid = arc4random() & (NUM_ASID - 2);
+ bit = (asid & (32 - 1));
+ bits = pmap_asid[asid / 32];
+ } while (asid == 0 || (bits & (3U << bit)));
+
+ if (atomic_cas_uint(&pmap_asid[asid / 32], bits,
+ bits | (3U << bit)) == bits)
+ break;
+ }
+ pm->pm_satp |= SATP_FORMAT_ASID(asid);
+}
+
+void
+pmap_free_asid(pmap_t pm)
+{
+ uint32_t bits;
+ int asid, bit;
+
+ KASSERT(pm != curcpu()->ci_curpm);
+ asid = SATP_ASID(pm->pm_satp);
+ cpu_tlb_flush_asid_all(asid);
+ cpu_tlb_flush_asid_all(asid | ASID_USER);
+
+ bit = (asid & (32 - 1));
+ for (;;) {
+ bits = pmap_asid[asid / 32];
+ if (atomic_cas_uint(&pmap_asid[asid / 32], bits,
+ bits & ~(3U << bit)) == bits)
+ break;
+ }
+}
+
+void
+pmap_set_satp(struct proc *p)
+{
+ struct cpu_info *ci = curcpu();
+ pmap_t pm = p->p_vmspace->vm_map.pmap;
+
+ load_satp(pm->pm_satp);
+ __asm __volatile("sfence.vma");
+ ci->ci_curpm = pm;
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file may seem a bit stylized, but that so that it's easier to port.
+ * Functions to be implemented here are:
+ *
+ * process_read_regs(proc, regs)
+ * Get the current user-visible register set from the process
+ * and copy it into the regs structure (<machine/reg.h>).
+ * The process is stopped at the time read_regs is called.
+ *
+ * process_write_regs(proc, regs)
+ * Update the current register set from the passed in regs
+ * structure. Take care to avoid clobbering special CPU
+ * registers or privileged bits in the PSL.
+ * The process is stopped at the time write_regs is called.
+ *
+ * process_sstep(proc, sstep)
+ * Arrange for the process to trap or not trap depending on sstep
+ * after executing a single instruction.
+ *
+ * process_set_pc(proc)
+ * Set the process's program counter.
+ */
+
+#include <sys/param.h>
+
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/systm.h>
+#include <sys/user.h>
+
+#include <machine/pcb.h>
+#include <machine/reg.h>
+#include <machine/riscvreg.h>
+
+int
+process_read_regs(struct proc *p, struct reg *regs)
+{
+ struct trapframe *tf = p->p_addr->u_pcb.pcb_tf;
+
+ memcpy(®s->r_t[0], &tf->tf_t[0], sizeof(regs->r_t));
+ memcpy(®s->r_s[0], &tf->tf_s[0], sizeof(regs->r_s));
+ memcpy(®s->r_a[0], &tf->tf_a[0], sizeof(regs->r_a));
+ regs->r_ra = tf->tf_ra;
+ regs->r_sp = tf->tf_sp;
+ regs->r_gp = tf->tf_gp;
+ regs->r_tp = tf->tf_tp;//following Freebsd
+ //regs->r_tp = (uint64_t)p->p_addr->u_pcb.pcb_tcb;//XXX why?
+ //XXX freebsd adds the following two fields so we just follow.
+ regs->r_sepc = tf->tf_sepc;
+ regs->r_sstatus = tf->tf_sstatus;
+
+ return(0);
+}
+
+#if 0
+int
+process_read_fpregs(struct proc *p, struct fpreg *regs)
+{
+ if (p->p_addr->u_pcb.pcb_flags & PCB_FPU)
+ memcpy(regs, &p->p_addr->u_pcb.pcb_fpstate, sizeof(*regs));
+ else
+ memset(regs, 0, sizeof(*regs));
+
+ return(0);
+}
+#endif
+#ifdef PTRACE
+
+int
+process_write_regs(struct proc *p, struct reg *regs)
+{
+ struct trapframe *tf = p->p_addr->u_pcb.pcb_tf;
+
+ memcpy(&tf->tf_t[0], ®s->r_t[0], sizeof(tf->tf_t));
+ memcpy(&tf->tf_s[0], ®s->r_s[0], sizeof(tf->tf_s));
+ memcpy(&tf->tf_a[0], ®s->r_a[0], sizeof(tf->tf_a));
+ tf->tf_ra = regs->r_ra;
+ tf->tf_sp = regs->r_sp;
+ tf->tf_gp = regs->r_gp;
+ tf->tf_tp = regs->r_tp; //XXX
+ tf->tf_sepc = regs->r_sepc;
+ //p->p_addr->u_pcb.pcb_tcb = (void *)regs->r_tp;//XXX why? freebsd just copied r_tp to tf_tp
+ //XXX should we add r_sepc and sstatus also?
+ return(0);
+}
+
+#if 0
+int
+process_write_fpregs(struct proc *p, struct fpreg *regs)
+{
+ p->p_addr->u_pcb.pcb_flags |= PCB_FPU;
+ memcpy(&p->p_addr->u_pcb.pcb_fpstate, regs,
+ sizeof(p->p_addr->u_pcb.pcb_fpstate));
+ return(0);
+}
+#endif
+
+int
+process_sstep(struct proc *p, int sstep)
+{
+#if 0
+ //XXX TODO
+ struct trapframe *tf = p->p_addr->u_pcb.pcb_tf;
+
+ if (sstep) {
+ p->p_addr->u_pcb.pcb_flags |= PCB_SINGLESTEP;
+ tf->tf_spsr |= PSR_SS;
+ } else {
+ p->p_addr->u_pcb.pcb_flags &= ~(PCB_SINGLESTEP);
+ tf->tf_spsr &= ~PSR_SS;
+ }
+ return 0;
+#endif
+ return (EOPNOTSUPP);
+}
+
+int
+process_set_pc(struct proc *p, caddr_t addr)
+{
+ struct trapframe *tf = p->p_addr->u_pcb.pcb_tf;
+ tf->tf_sepc = (uint64_t)addr;
+ return (0);
+}
+
+#endif /* PTRACE */
--- /dev/null
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+
+#if 0
+#include <machine/md_var.h>
+#endif
+#include <machine/sbi.h>
+
+/* SBI Implementation-Specific Definitions */
+#define OPENSBI_VERSION_MAJOR_OFFSET 16
+#define OPENSBI_VERSION_MINOR_MASK 0xFFFF
+
+u_long sbi_spec_version;
+u_long sbi_impl_id;
+u_long sbi_impl_version;
+
+static struct sbi_ret
+sbi_get_spec_version(void)
+{
+ return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_SPEC_VERSION));
+}
+
+static struct sbi_ret
+sbi_get_impl_id(void)
+{
+ return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_ID));
+}
+
+static struct sbi_ret
+sbi_get_impl_version(void)
+{
+ return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_VERSION));
+}
+
+static struct sbi_ret
+sbi_get_mvendorid(void)
+{
+ return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_MVENDORID));
+}
+
+
+static struct sbi_ret
+sbi_get_marchid(void)
+{
+ return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_MARCHID));
+}
+
+static struct sbi_ret
+sbi_get_mimpid(void)
+{
+ return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_MIMPID));
+}
+
+void
+sbi_print_version(void)
+{
+ u_int major;
+ u_int minor;
+
+ /* For legacy SBI implementations. */
+ if (sbi_spec_version == 0) {
+ printf("SBI: Unknown (Legacy) Implementation\n");
+ printf("SBI Specification Version: 0.1\n");
+ return;
+ }
+
+ switch (sbi_impl_id) {
+ case (SBI_IMPL_ID_BBL):
+ printf("SBI: Berkely Boot Loader %lu\n", sbi_impl_version);
+ break;
+ case (SBI_IMPL_ID_OPENSBI):
+ major = sbi_impl_version >> OPENSBI_VERSION_MAJOR_OFFSET;
+ minor = sbi_impl_version & OPENSBI_VERSION_MINOR_MASK;
+ printf("SBI: OpenSBI v%u.%u\n", major, minor);
+ break;
+ default:
+ printf("SBI: Unrecognized Implementation: %lu\n", sbi_impl_id);
+ break;
+ }
+
+ major = (sbi_spec_version & SBI_SPEC_VERS_MAJOR_MASK) >>
+ SBI_SPEC_VERS_MAJOR_OFFSET;
+ minor = (sbi_spec_version & SBI_SPEC_VERS_MINOR_MASK);
+ printf("SBI Specification Version: %u.%u\n", major, minor);
+}
+
+void
+sbi_init(void)
+{
+ struct sbi_ret sret;
+
+ /*
+ * Get the spec version. For legacy SBI implementations this will
+ * return an error, otherwise it is guaranteed to succeed.
+ */
+ sret = sbi_get_spec_version();
+ if (sret.error != 0) {
+ /* We are running a legacy SBI implementation. */
+ sbi_spec_version = 0;
+ return;
+ }
+
+ /* Set the SBI implementation info. */
+ sbi_spec_version = sret.value;
+ sbi_impl_id = sbi_get_impl_id().value;
+ sbi_impl_version = sbi_get_impl_version().value;
+
+ // XXX Move somewhere accessible -- md_var.h?
+ register_t mvendorid;
+ register_t marchid;
+ register_t mimpid;
+ /* Set the hardware implementation info. */
+ mvendorid = sbi_get_mvendorid().value;
+ marchid = sbi_get_marchid().value;
+ mimpid = sbi_get_mimpid().value;
+
+ /*
+ * Probe for legacy extensions. Currently we rely on all of them
+ * to be implemented, but this is not guaranteed by the spec.
+ */
+ KASSERTMSG(sbi_probe_extension(SBI_SET_TIMER) != 0,
+ "SBI doesn't implement sbi_set_timer()");
+ KASSERTMSG(sbi_probe_extension(SBI_CONSOLE_PUTCHAR) != 0,
+ "SBI doesn't implement sbi_console_putchar()");
+ KASSERTMSG(sbi_probe_extension(SBI_CONSOLE_GETCHAR) != 0,
+ "SBI doesn't implement sbi_console_getchar()");
+ KASSERTMSG(sbi_probe_extension(SBI_CLEAR_IPI) != 0,
+ "SBI doesn't implement sbi_clear_ipi()");
+ KASSERTMSG(sbi_probe_extension(SBI_SEND_IPI) != 0,
+ "SBI doesn't implement sbi_send_ipi()");
+ KASSERTMSG(sbi_probe_extension(SBI_REMOTE_FENCE_I) != 0,
+ "SBI doesn't implement sbi_remote_fence_i()");
+ KASSERTMSG(sbi_probe_extension(SBI_REMOTE_SFENCE_VMA) != 0,
+ "SBI doesn't implement sbi_remote_sfence_vma()");
+ KASSERTMSG(sbi_probe_extension(SBI_REMOTE_SFENCE_VMA_ASID) != 0,
+ "SBI doesn't implement sbi_remote_sfence_vma_asid()");
+ KASSERTMSG(sbi_probe_extension(SBI_SHUTDOWN) != 0,
+ "SBI doesn't implement sbi_shutdown()");
+}
--- /dev/null
+/*
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz and Don Ahn.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+/*
+ * Copyright (c) 2001 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+
+#include <sys/mount.h> /* XXX only needed by syscallargs.h */
+#include <sys/proc.h>
+#include <sys/signal.h>
+#include <sys/signalvar.h>
+#include <sys/syscallargs.h>
+#include <sys/systm.h>
+#include <sys/user.h>
+
+#include <machine/riscvreg.h>
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/pcb.h>
+
+#include <uvm/uvm_extern.h>
+
+static __inline struct trapframe *
+process_frame(struct proc *p)
+{
+ return p->p_addr->u_pcb.pcb_tf;
+}
+
+void dumpframe (char *msg, struct trapframe *tf, void *p)
+{
+ int i;
+ printf("%s\n",msg);
+ printf("pc %lx ra %lx sp %lx tp %lx\n", tf->tf_sepc, tf->tf_ra, tf->tf_sp, tf->tf_tp);
+ for(i = 0; i < 7; i++)
+ printf("%st%d %lx", (i==0)?"":", ", i, tf->tf_t[i]);
+ printf("\n");
+ for(i = 0; i < 12; i++)
+ printf("%ss%d %lx", (i==0)?"":", ", i, tf->tf_s[i]);
+ printf("\n");
+ for(i = 0; i < 8; i++)
+ printf("%sa%d %lx", (i==0)?"":", ", i, tf->tf_a[i]);
+ printf("\n");
+ if (p != NULL)
+ printf("fp %p\n", p);
+}
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode to call routine, followed by
+ * syscall to sigreturn routine below. After sigreturn resets the
+ * signal mask, the stack, and the frame pointer, it returns to the
+ * user specified pc.
+ */
+int
+sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip)
+{
+ struct proc *p = curproc;
+ struct trapframe *tf;
+ struct sigframe *fp, frame;
+ struct sigacts *psp = p->p_p->ps_sigacts;
+ siginfo_t *sip = NULL;
+ int i;
+
+ tf = process_frame(p);
+
+ /* Allocate space for the signal handler context. */
+ if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 &&
+ !sigonstack(tf->tf_sp) && (psp->ps_sigonstack & sigmask(sig)))
+ fp = (struct sigframe *)
+ trunc_page((vaddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size);
+ else
+ fp = (struct sigframe *)tf->tf_sp;
+
+ /* make room on the stack */
+ fp--;
+
+ /* make the stack aligned */
+ fp = (struct sigframe *)STACKALIGN(fp);
+
+ /* Build stack frame for signal trampoline. */
+ bzero(&frame, sizeof(frame));
+ frame.sf_signum = sig;
+
+ //dumpframe ("before", tf, fp);
+
+ /* Save register context. */
+ for (i=0; i < 7; i++)
+ frame.sf_sc.sc_t[i] = tf->tf_t[i];
+ for (i=0; i < 12; i++)
+ frame.sf_sc.sc_s[i] = tf->tf_s[i];
+ for (i=0; i < 8; i++)
+ frame.sf_sc.sc_a[i] = tf->tf_a[i];
+ frame.sf_sc.sc_ra = tf->tf_ra;
+ frame.sf_sc.sc_sp = tf->tf_sp;
+ frame.sf_sc.sc_tp = tf->tf_tp;
+ frame.sf_sc.sc_sepc = tf->tf_sepc;
+
+ /* Save signal mask. */
+ frame.sf_sc.sc_mask = mask;
+
+ /* XXX Save floating point context */
+ /* XXX! */
+
+ if (psp->ps_siginfo & sigmask(sig)) {
+ sip = &fp->sf_si;
+ frame.sf_si = *ksip;
+ }
+
+ frame.sf_sc.sc_cookie = (long)&fp->sf_sc ^ p->p_p->ps_sigcookie;
+ if (copyout(&frame, fp, sizeof(frame)) != 0) {
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ /* NOTREACHED */
+ return 1;
+ }
+
+
+ /*
+ * Build context to run handler in. We invoke the handler
+ * directly, only returning via the trampoline.
+ */
+ tf->tf_a[0] = sig;
+ tf->tf_a[1] = (register_t)sip;
+ tf->tf_a[2] = (register_t)&fp->sf_sc;
+ tf->tf_ra = p->p_p->ps_sigcode;
+ tf->tf_sp = (register_t)fp;
+
+ tf->tf_sepc = (register_t)catcher;
+
+ return 0;
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * psr to gain improper privileges or to cause
+ * a machine fault.
+ */
+
+int
+sys_sigreturn(struct proc *p, void *v, register_t *retval)
+{
+
+ struct sys_sigreturn_args /* {
+ syscallarg(struct sigcontext *) sigcntxp;
+ } */ *uap = v;
+ struct sigcontext ksc, *scp = SCARG(uap, sigcntxp);
+ struct trapframe *tf;
+ int i;
+
+ if (PROC_PC(p) != p->p_p->ps_sigcoderet) {
+ sigexit(p, SIGILL);
+ return (EPERM);
+ }
+
+ if (copyin(scp, &ksc, sizeof(*scp)) != 0)
+ return (EFAULT);
+
+ if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) {
+ sigexit(p, SIGILL);
+ return (EFAULT);
+ }
+
+ /* Prevent reuse of the sigcontext cookie */
+ ksc.sc_cookie = 0;
+ (void)copyout(&ksc.sc_cookie, (caddr_t)scp +
+ offsetof(struct sigcontext, sc_cookie), sizeof (ksc.sc_cookie));
+
+ /*
+ * Make sure the processor mode has not been tampered with and
+ * interrupts have not been disabled.
+ */
+#if 0
+ /* XXX include sanity check */
+ if ((ksc.sc_spsr & PSR_M_MASK) != PSR_M_EL0t ||
+ (ksc.sc_spsr & (PSR_I | PSR_F)) != 0)
+ return (EINVAL);
+#endif
+
+ /* XXX Restore floating point context */
+
+ /* Restore register context. */
+ tf = process_frame(p);
+ for (i=0; i < 7; i++)
+ tf->tf_t[i] = ksc.sc_t[i];
+ for (i=0; i < 12; i++)
+ tf->tf_s[i] = ksc.sc_s[i];
+ for (i=0; i < 8; i++)
+ tf->tf_a[i] = ksc.sc_a[i];
+ tf->tf_ra = ksc.sc_ra;
+ tf->tf_sp = ksc.sc_sp;
+ tf->tf_tp = ksc.sc_tp;
+ tf->tf_sepc = ksc.sc_sepc;
+
+ //dumpframe ("after", tf, 0);
+
+ /* Restore signal mask. */
+ p->p_sigmask = ksc.sc_mask & ~sigcantmask;
+
+ return (EJUSTRETURN);
+}
--- /dev/null
+/*-
+ * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Generic soft interrupt implementation
+ */
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+
+#include <machine/intr.h>
+
+#include <uvm/uvm_extern.h>
+
+struct soft_intr soft_intrs[SI_NSOFTINTR];
+
+const int soft_intr_to_ssir[SI_NSOFTINTR] = {
+ SIR_SOFT,
+ SIR_CLOCK,
+ SIR_NET,
+ SIR_TTY,
+};
+
+void softintr_biglock_wrap(void *);
+
+/*
+ * softintr_init:
+ *
+ * Initialize the software interrupt system.
+ */
+void
+softintr_init(void)
+{
+ struct soft_intr *si;
+ int i;
+
+ for (i = 0; i < SI_NSOFTINTR; i++) {
+ si = &soft_intrs[i];
+ TAILQ_INIT(&si->softintr_q);
+ mtx_init(&si->softintr_lock, IPL_HIGH);
+ si->softintr_ssir = soft_intr_to_ssir[i];
+ }
+}
+
+/*
+ * softintr_dispatch:
+ *
+ * Process pending software interrupts.
+ */
+void
+softintr_dispatch(int which)
+{
+ struct soft_intr *si = &soft_intrs[which];
+ struct soft_intrhand *sih;
+ void *arg;
+ void (*fn)(void *);
+
+ for (;;) {
+ mtx_enter(&si->softintr_lock);
+ sih = TAILQ_FIRST(&si->softintr_q);
+ if (sih == NULL) {
+ mtx_leave(&si->softintr_lock);
+ break;
+ }
+ TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
+ sih->sih_pending = 0;
+
+ uvmexp.softs++;
+ arg = sih->sih_arg;
+ fn = sih->sih_fn;
+ mtx_leave(&si->softintr_lock);
+
+ (*fn)(arg);
+ }
+}
+
+#ifdef MULTIPROCESSOR
+void
+softintr_biglock_wrap(void *arg)
+{
+ struct soft_intrhand *sih = arg;
+
+ KERNEL_LOCK();
+ sih->sih_fnwrap(sih->sih_argwrap);
+ KERNEL_UNLOCK();
+}
+#endif
+
+/*
+ * softintr_establish: [interface]
+ *
+ * Register a software interrupt handler.
+ */
+void *
+softintr_establish_flags(int ipl, void (*func)(void *), void *arg, int flags)
+{
+ struct soft_intr *si;
+ struct soft_intrhand *sih;
+ int which;
+
+ switch (ipl) {
+ case IPL_SOFTCLOCK:
+ which = SIR_CLOCK;
+ break;
+
+ case IPL_SOFTNET:
+ which = SIR_NET;
+ break;
+
+ case IPL_TTY:
+ case IPL_SOFTTTY:
+ which = SIR_TTY;
+ break;
+
+ default:
+ panic("softintr_establish");
+ }
+
+ si = &soft_intrs[which];
+
+ sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (__predict_true(sih != NULL)) {
+ sih->sih_intrhead = si;
+#ifdef MULTIPROCESSOR
+ if (flags & SOFTINTR_ESTABLISH_MPSAFE) {
+#endif
+ sih->sih_fn = func;
+ sih->sih_arg = arg;
+#ifdef MULTIPROCESSOR
+ } else {
+ sih->sih_fnwrap = func;
+ sih->sih_argwrap = arg;
+ sih->sih_fn = softintr_biglock_wrap;
+ sih->sih_arg = sih;
+ }
+#endif
+ }
+ return (sih);
+}
+
+/*
+ * softintr_disestablish: [interface]
+ *
+ * Unregister a software interrupt handler.
+ */
+void
+softintr_disestablish(void *arg)
+{
+ struct soft_intrhand *sih = arg;
+ struct soft_intr *si = sih->sih_intrhead;
+
+ mtx_enter(&si->softintr_lock);
+ if (sih->sih_pending) {
+ TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
+ sih->sih_pending = 0;
+ }
+ mtx_leave(&si->softintr_lock);
+
+ free(sih, M_DEVBUF, 0);
+}
+
+void
+softintr(int intrq)
+{
+ // protected by mutex in caller
+ curcpu()->ci_ipending |= (1 << intrq);
+}
--- /dev/null
+/*-
+ * Copyright (c) 2015-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Portions of this software were developed by SRI International and the
+ * University of Cambridge Computer Laboratory under DARPA/AFRL contract
+ * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Portions of this software were developed by the University of Cambridge
+ * Computer Laboratory as part of the CTSRD Project, with support from the
+ * UK Higher Education Innovation Fund (HEIF).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+#include <machine/setjmp.h>
+
+ENTRY(setjmp)
+ /* Store the stack pointer */
+ sd sp, 0(a0)
+ addi a0, a0, 8
+
+ /* Store the general purpose registers and ra */
+ sd s0, (0 * 8)(a0)
+ sd s1, (1 * 8)(a0)
+ sd s2, (2 * 8)(a0)
+ sd s3, (3 * 8)(a0)
+ sd s4, (4 * 8)(a0)
+ sd s5, (5 * 8)(a0)
+ sd s6, (6 * 8)(a0)
+ sd s7, (7 * 8)(a0)
+ sd s8, (8 * 8)(a0)
+ sd s9, (9 * 8)(a0)
+ sd s10, (10 * 8)(a0)
+ sd s11, (11 * 8)(a0)
+ sd ra, (12 * 8)(a0)
+
+ /* Return value */
+ li a0, 0
+ ret
+END(setjmp)
+
+ENTRY(longjmp)
+ /* Restore the stack pointer */
+ ld sp, 0(a0)
+ addi a0, a0, 8
+
+ /* Restore the general purpose registers and ra */
+ ld s0, (0 * 8)(a0)
+ ld s1, (1 * 8)(a0)
+ ld s2, (2 * 8)(a0)
+ ld s3, (3 * 8)(a0)
+ ld s4, (4 * 8)(a0)
+ ld s5, (5 * 8)(a0)
+ ld s6, (6 * 8)(a0)
+ ld s7, (7 * 8)(a0)
+ ld s8, (8 * 8)(a0)
+ ld s9, (9 * 8)(a0)
+ ld s10, (10 * 8)(a0)
+ ld s11, (11 * 8)(a0)
+ ld ra, (12 * 8)(a0)
+
+ /* Load the return value */
+ mv a0, a1
+ ret
+END(longjmp)
--- /dev/null
+/*
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/user.h>
+#include <sys/vnode.h>
+#include <sys/signal.h>
+#include <sys/syscall.h>
+#include <sys/syscall_mi.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/syscall.h>
+
+#define MAXARGS 8
+
+static __inline struct trapframe *
+process_frame(struct proc *p)
+{
+ return p->p_addr->u_pcb.pcb_tf;
+}
+
+void
+svc_handler(trapframe_t *frame)
+{
+ struct proc *p = curproc;
+ const struct sysent *callp;
+ int code, error;
+ u_int nap = 8, nargs;
+ register_t *ap, *args, copyargs[MAXARGS], rval[2];
+
+ uvmexp.syscalls++;
+
+#if 0 // XXX Save FPU State
+ /* Before enabling interrupts, save FPU state */
+ vfp_save();
+#endif
+
+#if 0 // XXX Re-enable interrupts
+ /* Re-enable interrupts if they were enabled previously */
+ if (__predict_true((frame->tf_spsr & I_bit) == 0))
+ enable_interrupts();
+#endif
+
+ ap = &frame->tf_a[0]; // Pointer to first arg
+ code = frame->tf_t[0]; // Syscall code
+ callp = p->p_p->ps_emul->e_sysent;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ code = *ap++;
+ nap--;
+ break;
+ }
+
+ if (code < 0 || code >= p->p_p->ps_emul->e_nsysent) {
+ callp += p->p_p->ps_emul->e_nosys;
+ } else {
+ callp += code;
+ }
+ nargs = callp->sy_argsize / sizeof(register_t);
+ if (nargs <= nap) {
+ args = ap;
+ } else {
+ KASSERT(nargs <= MAXARGS);
+ memcpy(copyargs, ap, nap * sizeof(register_t));
+ if ((error = copyin((void *)frame->tf_sp, copyargs + nap,
+ (nargs - nap) * sizeof(register_t))))
+ goto bad;
+ args = copyargs;
+ }
+
+ rval[0] = 0;
+ rval[1] = frame->tf_a[1];
+
+ error = mi_syscall(p, code, callp, args, rval);
+
+ switch (error) {
+ case 0:
+ frame->tf_a[0] = rval[0];
+ frame->tf_a[1] = rval[1];
+ frame->tf_t[0] = 0; /* syscall succeeded */
+ break;
+
+ case ERESTART:
+ frame->tf_sepc -= 4; /* prev instruction */
+ break;
+
+ case EJUSTRETURN:
+ break;
+
+ default:
+ bad:
+ frame->tf_a[0] = error;
+ frame->tf_t[0] = 1; /* syscall error */
+ break;
+ }
+
+ mi_syscall_return(p, code, error, rval);
+}
+
+void
+child_return(arg)
+ void *arg;
+{
+ struct proc *p = arg;
+ struct trapframe *frame = process_frame(p);;
+
+ frame->tf_a[0] = 0;
+ frame->tf_a[1] = 1;
+ // XXX How to signal error?
+ frame->tf_t[0] = 0;
+
+ KERNEL_UNLOCK();
+
+ mi_child_return(p);
+}
--- /dev/null
+/*
+ * Copyright (c) 2020 Mengshi Li <mengshi.li.mars@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+#include <machine/trap.h>
+#include <machine/riscvreg.h>
+
+.macro save_registers el
+ addi sp, sp, -(TRAPFRAME_SIZEOF)
+
+ sd ra, (TF_RA)(sp)
+
+.if \el == 0 /* We came from userspace. */
+ sd gp, (TF_GP)(sp)
+.option push
+.option norelax
+ /* Load the kernel's global pointer */
+ la gp, __global_pointer$
+.option pop
+
+ /* Load our pcpu */
+ sd tp, (TF_TP)(sp)
+ ld tp, (TRAPFRAME_SIZEOF)(sp)
+.endif
+
+ sd t0, (TF_T + 0 * 8)(sp)
+ sd t1, (TF_T + 1 * 8)(sp)
+ sd t2, (TF_T + 2 * 8)(sp)
+ sd t3, (TF_T + 3 * 8)(sp)
+ sd t4, (TF_T + 4 * 8)(sp)
+ sd t5, (TF_T + 5 * 8)(sp)
+ sd t6, (TF_T + 6 * 8)(sp)
+
+ sd s0, (TF_S + 0 * 8)(sp)
+ sd s1, (TF_S + 1 * 8)(sp)
+ sd s2, (TF_S + 2 * 8)(sp)
+ sd s3, (TF_S + 3 * 8)(sp)
+ sd s4, (TF_S + 4 * 8)(sp)
+ sd s5, (TF_S + 5 * 8)(sp)
+ sd s6, (TF_S + 6 * 8)(sp)
+ sd s7, (TF_S + 7 * 8)(sp)
+ sd s8, (TF_S + 8 * 8)(sp)
+ sd s9, (TF_S + 9 * 8)(sp)
+ sd s10, (TF_S + 10 * 8)(sp)
+ sd s11, (TF_S + 11 * 8)(sp)
+
+ sd a0, (TF_A + 0 * 8)(sp)
+ sd a1, (TF_A + 1 * 8)(sp)
+ sd a2, (TF_A + 2 * 8)(sp)
+ sd a3, (TF_A + 3 * 8)(sp)
+ sd a4, (TF_A + 4 * 8)(sp)
+ sd a5, (TF_A + 5 * 8)(sp)
+ sd a6, (TF_A + 6 * 8)(sp)
+ sd a7, (TF_A + 7 * 8)(sp)
+
+.if \el == 1
+ /* Store kernel sp */
+ li t1, TRAPFRAME_SIZEOF
+ add t0, sp, t1
+ sd t0, (TF_SP)(sp)
+.else
+ /* Store user sp */
+ csrr t0, sscratch
+ sd t0, (TF_SP)(sp)
+.endif
+ li t0, 0
+ csrw sscratch, t0
+ csrr t0, sepc
+ sd t0, (TF_SEPC)(sp)
+ csrr t0, sstatus
+ sd t0, (TF_SSTATUS)(sp)
+ csrr t0, stval
+ sd t0, (TF_STVAL)(sp)
+ csrr t0, scause
+ sd t0, (TF_SCAUSE)(sp)
+.endm
+
+.macro restore_registers el
+ ld t0, (TF_SSTATUS)(sp)
+.if \el == 0
+ /* Ensure user interrupts will be enabled on eret */
+ li t1, SSTATUS_SPIE
+ or t0, t0, t1
+.else
+ /*
+ * Disable interrupts for supervisor mode exceptions.
+ * For user mode exceptions we have already done this
+ * in do_ast.
+ */
+ li t1, ~SSTATUS_SIE
+ and t0, t0, t1
+.endif
+ csrw sstatus, t0
+
+ ld t0, (TF_SEPC)(sp)
+ csrw sepc, t0
+
+.if \el == 0
+ /* We go to userspace. Load user sp */
+ ld t0, (TF_SP)(sp)
+ csrw sscratch, t0
+
+ /* Store our pcpu */
+ sd tp, (TRAPFRAME_SIZEOF)(sp)
+ ld tp, (TF_TP)(sp)
+
+ /* And restore the user's global pointer */
+ ld gp, (TF_GP)(sp)
+.endif
+
+ ld ra, (TF_RA)(sp)
+
+ ld t0, (TF_T + 0 * 8)(sp)
+ ld t1, (TF_T + 1 * 8)(sp)
+ ld t2, (TF_T + 2 * 8)(sp)
+ ld t3, (TF_T + 3 * 8)(sp)
+ ld t4, (TF_T + 4 * 8)(sp)
+ ld t5, (TF_T + 5 * 8)(sp)
+ ld t6, (TF_T + 6 * 8)(sp)
+
+ ld s0, (TF_S + 0 * 8)(sp)
+ ld s1, (TF_S + 1 * 8)(sp)
+ ld s2, (TF_S + 2 * 8)(sp)
+ ld s3, (TF_S + 3 * 8)(sp)
+ ld s4, (TF_S + 4 * 8)(sp)
+ ld s5, (TF_S + 5 * 8)(sp)
+ ld s6, (TF_S + 6 * 8)(sp)
+ ld s7, (TF_S + 7 * 8)(sp)
+ ld s8, (TF_S + 8 * 8)(sp)
+ ld s9, (TF_S + 9 * 8)(sp)
+ ld s10, (TF_S + 10 * 8)(sp)
+ ld s11, (TF_S + 11 * 8)(sp)
+
+ ld a0, (TF_A + 0 * 8)(sp)
+ ld a1, (TF_A + 1 * 8)(sp)
+ ld a2, (TF_A + 2 * 8)(sp)
+ ld a3, (TF_A + 3 * 8)(sp)
+ ld a4, (TF_A + 4 * 8)(sp)
+ ld a5, (TF_A + 5 * 8)(sp)
+ ld a6, (TF_A + 6 * 8)(sp)
+ ld a7, (TF_A + 7 * 8)(sp)
+
+ addi sp, sp, (TRAPFRAME_SIZEOF)
+.endm
+
+.macro do_ast
+ /* Disable interrupts */
+ csrr a4, sstatus
+1:
+ csrci sstatus, (SSTATUS_SIE)
+
+ /* Check for astpending */
+ ld a1, CI_CURPROC(tp)
+ beqz a1, 2f
+ lw a2, P_ASTPENDING(a1)
+ beqz a2, 2f
+
+ sw x0, P_ASTPENDING(a1)
+
+ /* Restore interrupts */
+ csrw sstatus, a4
+
+ /* handle the ast */
+ mv a0, sp
+ la t0, _C_LABEL(ast)
+ jalr t0
+ j 1b
+2:
+.endm
+
+ENTRY(cpu_trap_handler)
+ csrrw sp, sscratch, sp
+ beqz sp, 1f
+ /* User mode detected */
+ j cpu_trap_handler_user
+1:
+ /* Supervisor mode detected */
+ csrrw sp, sscratch, sp
+ j cpu_trap_handler_supervisor
+END(cpu_trap_handler)
+
+ENTRY(cpu_trap_handler_supervisor)
+ save_registers 1
+ mv a0, sp
+ call _C_LABEL(do_trap_supervisor)
+ restore_registers 1
+ sret
+END(cpu_trap_handler_supervisor)
+
+ENTRY(cpu_trap_handler_user)
+ save_registers 0
+ mv a0, sp
+ call _C_LABEL(do_trap_user)
+ do_ast
+ restore_registers 0
+ csrrw sp, sscratch, sp
+ sret
+END(cpu_trap_handler_user)
+
+ENTRY(syscall_return)
+ do_ast
+ restore_registers 0
+ csrrw sp, sscratch, sp
+ sret
+END(syscall_return)
--- /dev/null
+/*
+ * Copyright (c) 2020 Shivam Waghela <shivamwaghela@gmail.com>
+ * Copyright (c) 2020 Brian Bamsch <bbamsch@google.com>
+ * Copyright (c) 2020 Mengshi Li <mengshi.li.mars@gmail.com>
+ * Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/signalvar.h>
+#include <sys/siginfo.h>
+#include <sys/syscall.h>
+#include <sys/syscall_mi.h>
+
+#include <machine/riscvreg.h>
+#include <machine/syscall.h>
+#include <machine/db_machdep.h>
+
+/* Called from trap.S */
+void do_trap_supervisor(struct trapframe *);
+void do_trap_user(struct trapframe *);
+
+static void data_abort(struct trapframe *, int);
+
+static void
+dump_regs(struct trapframe *frame)
+{
+ int n;
+ int i;
+
+ n = (sizeof(frame->tf_t) / sizeof(frame->tf_t[0]));
+ for (i = 0; i < n; i++)
+ printf("t[%d] == 0x%016lx\n", i, frame->tf_t[i]);
+
+ n = (sizeof(frame->tf_s) / sizeof(frame->tf_s[0]));
+ for (i = 0; i < n; i++)
+ printf("s[%d] == 0x%016lx\n", i, frame->tf_s[i]);
+
+ n = (sizeof(frame->tf_a) / sizeof(frame->tf_a[0]));
+ for (i = 0; i < n; i++)
+ printf("a[%d] == 0x%016lx\n", i, frame->tf_a[i]);
+
+ printf("sepc == 0x%016lx\n", frame->tf_sepc);
+ printf("sstatus == 0x%016lx\n", frame->tf_sstatus);
+}
+
+void
+do_trap_supervisor(struct trapframe *frame)
+{
+ uint64_t exception;
+
+ /* Ensure we came from supervisor mode, interrupts disabled */
+ KASSERTMSG((csr_read(sstatus) & (SSTATUS_SPP | SSTATUS_SIE)) ==
+ SSTATUS_SPP, "Came from S mode with interrupts enabled");
+
+ if (frame->tf_scause & EXCP_INTR) {
+ /* Interrupt */
+ riscv_cpu_intr(frame);
+ return;
+ }
+
+ exception = (frame->tf_scause & EXCP_MASK);
+ switch(exception) {
+ case EXCP_FAULT_LOAD:
+ case EXCP_FAULT_STORE:
+ case EXCP_FAULT_FETCH:
+ case EXCP_STORE_PAGE_FAULT:
+ case EXCP_LOAD_PAGE_FAULT:
+ data_abort(frame, 0);
+ break;
+ case EXCP_BREAKPOINT:
+#ifdef DDB
+ // kdb_trap(exception, 0, frame);
+ db_trapper(frame->tf_sepc,0/*XXX*/, frame, exception);
+#else
+ dump_regs(frame);
+ panic("No debugger in kernel.\n");
+#endif
+ break;
+ case EXCP_ILLEGAL_INSTRUCTION:
+ dump_regs(frame);
+ panic("Illegal instruction at 0x%016lx\n", frame->tf_sepc);
+ break;
+ default:
+ dump_regs(frame);
+ panic("Unknown kernel exception %llx trap value %lx\n",
+ exception, frame->tf_stval);
+ }
+}
+
+
+void
+do_trap_user(struct trapframe *frame)
+{
+ uint64_t exception;
+ union sigval sv;
+ struct proc *p;
+ struct pcb *pcb;
+ uint64_t stval;
+
+ p = curcpu()->ci_curproc;
+ p->p_addr->u_pcb.pcb_tf = frame;
+ pcb = curcpu()->ci_curpcb;
+
+ /* Ensure we came from usermode, interrupts disabled */
+ KASSERTMSG((csr_read(sstatus) & (SSTATUS_SPP | SSTATUS_SIE)) == 0,
+ "Came from U mode with interrupts enabled");
+
+ /* Save fpu context before (possibly) calling interrupt handler.
+ * Could end up context switching in interrupt handler.
+ */
+ fpu_save(p, frame);
+
+ exception = (frame->tf_scause & EXCP_MASK);
+ if (frame->tf_scause & EXCP_INTR) {
+ /* Interrupt */
+ riscv_cpu_intr(frame);
+ frame->tf_sstatus &= ~SSTATUS_FS_MASK;
+ if (pcb->pcb_fpcpu == curcpu() && curcpu()->ci_fpuproc == p) {
+ frame->tf_sstatus |= SSTATUS_FS_CLEAN;
+ }
+ return;
+ }
+
+ enable_interrupts(); //XXX allow preemption?
+
+#if 0 // XXX Debug logging
+ printf( "do_trap_user: curproc: %p, sepc: %lx, ra: %lx frame: %p\n",
+ curcpu()->ci_curproc, frame->tf_sepc, frame->tf_ra, frame);
+#endif
+
+ switch(exception) {
+ case EXCP_FAULT_LOAD:
+ case EXCP_FAULT_STORE:
+ case EXCP_FAULT_FETCH:
+ case EXCP_STORE_PAGE_FAULT:
+ case EXCP_LOAD_PAGE_FAULT:
+ case EXCP_INST_PAGE_FAULT:
+ data_abort(frame, 1);
+ break;
+ case EXCP_USER_ECALL:
+ frame->tf_sepc += 4; /* Next instruction */
+ svc_handler(frame);
+ break;
+ case EXCP_ILLEGAL_INSTRUCTION:
+
+ if ((frame->tf_sstatus & SSTATUS_FS_MASK) ==
+ SSTATUS_FS_OFF) {
+ if(fpu_valid_opcode(frame->tf_stval)) {
+
+ /* XXX do this here or should it be in the
+ * trap handler in the restore path?
+ */
+ fpu_load(p);
+
+ frame->tf_sstatus &= ~SSTATUS_FS_MASK;
+ break;
+ }
+ }
+ printf("ILL at %lx scause %lx stval %lx\n", frame->tf_sepc, frame->tf_scause, frame->tf_stval);
+ sv.sival_int = stval;
+ KERNEL_LOCK();
+ trapsignal(p, SIGILL, 0, ILL_ILLTRP, sv);
+ KERNEL_UNLOCK();
+ userret(p);
+ break;
+ case EXCP_BREAKPOINT:
+ printf("BREAKPOINT\n");
+ sv.sival_int = stval;
+ KERNEL_LOCK();
+ trapsignal(p, SIGTRAP, 0, TRAP_BRKPT, sv);
+ KERNEL_UNLOCK();
+ userret(p);
+ break;
+ default:
+ dump_regs(frame);
+ panic("Unknown userland exception %llx, trap value %lx\n",
+ exception, frame->tf_stval);
+ }
+ disable_interrupts(); /* XXX - ??? */
+ /* now that we will not context switch again,
+ * see if we should enable FPU
+ */
+ frame->tf_sstatus &= ~SSTATUS_FS_MASK;
+ if (pcb->pcb_fpcpu == curcpu() && curcpu()->ci_fpuproc == p) {
+ frame->tf_sstatus |= SSTATUS_FS_CLEAN;
+ //printf ("FPU enabled userland %p %p\n",
+ // pcb->pcb_fpcpu, curcpu()->ci_fpuproc);
+ }
+}
+
+static void
+data_abort(struct trapframe *frame, int usermode)
+{
+ struct vm_map *map;
+ uint64_t stval;
+ union sigval sv;
+ struct pcb *pcb;
+ vm_prot_t ftype;
+ vaddr_t va;
+ struct proc *p;
+ int error, sig, code, access_type;
+
+ pcb = curcpu()->ci_curpcb;
+ p = curcpu()->ci_curproc;
+ stval = frame->tf_stval;
+
+ va = trunc_page(stval);
+
+ //if (va >= VM_MAXUSER_ADDRESS)
+ // curcpu()->ci_flush_bp();
+
+ if ((frame->tf_scause == EXCP_FAULT_STORE) ||
+ (frame->tf_scause == EXCP_STORE_PAGE_FAULT)) {
+ access_type = PROT_WRITE;
+ } else if (frame->tf_scause == EXCP_INST_PAGE_FAULT) {
+ access_type = PROT_EXEC;
+ } else {
+ access_type = PROT_READ;
+ }
+
+ ftype = VM_FAULT_INVALID; // should check for failed permissions.
+
+ if (usermode)
+ map = &p->p_vmspace->vm_map;
+ else if (stval >= VM_MAX_USER_ADDRESS)
+ map = kernel_map;
+ else {
+ if (pcb->pcb_onfault == 0)
+ goto fatal;
+ map = &p->p_vmspace->vm_map;
+ }
+
+ if (pmap_fault_fixup(map->pmap, va, ftype, usermode))
+ goto done;
+
+ KERNEL_LOCK();
+ error = uvm_fault(map, va, ftype, access_type);
+ KERNEL_UNLOCK();
+
+ if (error != 0) {
+ if (usermode) {
+ if (error == ENOMEM) {
+ sig = SIGKILL;
+ code = 0;
+ } else if (error == EIO) {
+ sig = SIGBUS;
+ code = BUS_OBJERR;
+ } else if (error == EACCES) {
+ sig = SIGSEGV;
+ code = SEGV_ACCERR;
+ } else {
+ sig = SIGSEGV;
+ code = SEGV_MAPERR;
+ }
+ sv.sival_int = stval;
+ KERNEL_LOCK();
+ //printf("signalling %d at pc 0%lx ra 0x%lx %llx\n", code, frame->tf_sepc, frame->tf_ra, stval);
+ trapsignal(p, sig, 0, code, sv);
+ KERNEL_UNLOCK();
+ } else {
+ if (curcpu()->ci_idepth == 0 && pcb->pcb_onfault != 0) {
+ frame->tf_a[0] = error;
+ frame->tf_sepc = (register_t)pcb->pcb_onfault;
+ return;
+ }
+ goto fatal;
+ }
+ }
+
+done:
+ if (usermode)
+ userret(p);
+ return;
+
+fatal:
+ dump_regs(frame);
+ panic("Fatal page fault at %#lx: %#08x", frame->tf_sepc, sv.sival_int);
+}
+
--- /dev/null
+/*-
+ * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/vnode.h>
+#include <sys/buf.h>
+#include <sys/user.h>
+#include <sys/exec.h>
+#include <sys/ptrace.h>
+#include <sys/signalvar.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/cpu.h>
+#include <machine/reg.h>
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the kernel stack and pcb, making the child
+ * ready to run, and marking it so that it can return differently
+ * than the parent. Returns 1 in the child process, 0 in the parent.
+ */
+void
+cpu_fork(struct proc *p1, struct proc *p2, void *stack, void *tcb,
+ void (*func)(void *), void *arg)
+{
+ struct pcb *pcb = (struct pcb *)&p2->p_addr->u_pcb;
+ struct trapframe *tf;
+ struct switchframe *sf;
+
+ // Does any flushing need to be done if process was running?
+
+ /* Copy the pcb. */
+ *pcb = p1->p_addr->u_pcb;
+ pcb->pcb_fpcpu = NULL;
+
+ pmap_activate(p2);
+
+ tf = (struct trapframe *)((u_long)p2->p_addr
+ + USPACE
+ - sizeof(struct trapframe)
+ - 0x10);
+
+ tf = (struct trapframe *)STACKALIGN(tf);
+ pcb->pcb_tf = tf;
+ *tf = *p1->p_addr->u_pcb.pcb_tf;
+
+ if (stack != NULL)
+ tf->tf_sp = STACKALIGN(stack);
+ if (tcb != NULL)
+ tf->tf_tp = (register_t)tcb;
+
+ /* Arguments for child */
+ tf->tf_a[0] = 0;
+ tf->tf_a[1] = 0;
+ tf->tf_sstatus |= (SSTATUS_SPIE); /* Enable interrupts. */
+ tf->tf_sstatus &= ~(SSTATUS_SPP); /* Enter user mode. */
+
+ sf = (struct switchframe *)tf - 1;
+ sf->sf_s[0] = (uint64_t)func;
+ sf->sf_s[1] = (uint64_t)arg;
+ sf->sf_ra = (u_int64_t)&proc_trampoline;
+ pcb->pcb_sp = (uint64_t)sf;
+}
+
+/*
+ * cpu_exit is called as the last action during exit.
+ *
+ * We clean up a little and then call sched_exit() with the old proc as an
+ * argument.
+ */
+void
+cpu_exit(struct proc *p)
+{
+ /* If we were using the FPU, forget about it. */
+ if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
+ fpu_discard(p); // XXX Discard FP
+
+ pmap_deactivate(p);
+ sched_exit(p);
+}
+
+struct kmem_va_mode kv_physwait = {
+ .kv_map = &phys_map,
+ .kv_wait = 1,
+};
+
+/*
+ * Map a user I/O request into kernel virtual address space.
+ * Note: the pages are already locked by uvm_vslock(), so we
+ * do not need to pass an access_type to pmap_enter().
+ */
+void
+vmapbuf(struct buf *bp, vsize_t len)
+{
+ vaddr_t faddr, taddr, off;
+ paddr_t fpa;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vmapbuf");
+ faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
+ off = (vaddr_t)bp->b_data - faddr;
+ len = round_page(off + len);
+ taddr = (vaddr_t)km_alloc(len, &kv_physwait, &kp_none, &kd_waitok);
+ bp->b_data = (caddr_t)(taddr + off);
+ /*
+ * The region is locked, so we expect that pmap_pte() will return
+ * non-NULL.
+ * XXX: unwise to expect this in a multithreaded environment.
+ * anything can happen to a pmap between the time we lock a
+ * region, release the pmap lock, and then relock it for
+ * the pmap_extract().
+ *
+ * no need to flush TLB since we expect nothing to be mapped
+ * where we we just allocated (TLB will be flushed when our
+ * mapping is removed).
+ */
+ while (len) {
+ (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
+ faddr, &fpa);
+ pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
+ faddr += PAGE_SIZE;
+ taddr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ pmap_update(pmap_kernel());
+}
+
+/*
+ * Unmap a previously-mapped user I/O request.
+ */
+void
+vunmapbuf(struct buf *bp, vsize_t len)
+{
+ vaddr_t addr, off;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vunmapbuf");
+ addr = trunc_page((vaddr_t)bp->b_data);
+ off = (vaddr_t)bp->b_data - addr;
+ len = round_page(off + len);
+ pmap_kremove(addr, len);
+ pmap_update(pmap_kernel());
+ km_free((void *)addr, len, &kv_physwait, &kp_none);
+ bp->b_data = bp->b_saveaddr;
+ bp->b_saveaddr = 0;
+}