-/* $OpenBSD: SYS.h,v 1.5 2020/02/18 12:19:11 kettenis Exp $ */
+/* $OpenBSD: SYS.h,v 1.6 2022/12/08 01:25:43 guenther Exp $ */
/* $NetBSD: SYS.h,v 1.8 2003/08/07 16:42:02 agc Exp $ */
/*-
#include <sys/syscall.h>
#define SYSENTRY(x) \
- .weak _C_LABEL(x); \
- _C_LABEL(x) = _C_LABEL(_thread_sys_ ## x); \
+ .weak x; \
+ x = _thread_sys_ ## x; \
ENTRY(_thread_sys_ ## x)
#define SYSENTRY_HIDDEN(x) \
ENTRY(_thread_sys_ ## x)
dsb nsh; \
isb
-#define CERROR _C_LABEL(__cerror)
+#define CERROR __cerror
#define _SYSCALL_NOERROR(x,y) \
SYSENTRY(x); \
-/* $OpenBSD: brk.S,v 1.6 2022/05/24 22:34:02 guenther Exp $ */
+/* $OpenBSD: brk.S,v 1.7 2022/12/08 01:25:43 guenther Exp $ */
/* $NetBSD: brk.S,v 1.7 2003/12/26 11:23:44 martin Exp $ */
/*-
#include "SYS.h"
- .globl _C_LABEL(_end)
+ .globl _end
.globl __curbrk
.data
.align 3
.type __minbrk,#object
__minbrk:
- .quad _C_LABEL(_end)
+ .quad _end
END(__minbrk)
/*
-/* $OpenBSD: sbrk.S,v 1.5 2022/05/24 22:34:02 guenther Exp $ */
+/* $OpenBSD: sbrk.S,v 1.6 2022/12/08 01:25:43 guenther Exp $ */
/* $NetBSD: sbrk.S,v 1.7 2003/08/07 16:42:05 agc Exp $ */
/*-
#include "SYS.h"
- .globl _C_LABEL(_end)
+ .globl _end
.data
.align 3
.type __curbrk,#object
.hidden __curbrk
__curbrk:
- .quad _C_LABEL(_end)
+ .quad _end
END(__curbrk)
/*
-/* $OpenBSD: SYS.h,v 1.18 2020/03/13 09:31:24 deraadt Exp $ */
+/* $OpenBSD: SYS.h,v 1.19 2022/12/08 01:25:43 guenther Exp $ */
/* $NetBSD: SYS.h,v 1.8 2003/08/07 16:42:02 agc Exp $ */
/*-
#define SYSENTRY(x) \
- .weak _C_LABEL(x); \
- _C_LABEL(x) = _C_LABEL(_thread_sys_ ## x); \
+ .weak x; \
+ x = _thread_sys_ ## x; \
ENTRY(_thread_sys_ ## x)
#define SYSENTRY_HIDDEN(x) \
ENTRY(_thread_sys_ ## x)
dsb nsh; \
isb
-#define CERROR _C_LABEL(__cerror)
+#define CERROR __cerror
#define _SYSCALL_NOERROR(x,y) \
SYSENTRY(x); \
-/* $OpenBSD: brk.S,v 1.12 2022/05/24 17:15:23 guenther Exp $ */
+/* $OpenBSD: brk.S,v 1.13 2022/12/08 01:25:43 guenther Exp $ */
/* $NetBSD: brk.S,v 1.7 2003/12/26 11:23:44 martin Exp $ */
/*-
#include "SYS.h"
- .globl _C_LABEL(_end)
+ .globl _end
.globl __curbrk
.data
.align 2
.type __minbrk,#object
__minbrk:
- .word _C_LABEL(_end)
+ .word _end
END(__minbrk)
/*
.align 2
#ifdef __PIC__
.Lgot:
- .word _C_LABEL(_GLOBAL_OFFSET_TABLE_) - (.L1+8)
+ .word _GLOBAL_OFFSET_TABLE_ - (.L1+8)
#endif
.Lminbrk:
.word PIC_SYM(__minbrk, GOT)
-/* $OpenBSD: sbrk.S,v 1.12 2022/05/24 17:15:23 guenther Exp $ */
+/* $OpenBSD: sbrk.S,v 1.13 2022/12/08 01:25:43 guenther Exp $ */
/* $NetBSD: sbrk.S,v 1.7 2003/08/07 16:42:05 agc Exp $ */
/*-
#include "SYS.h"
- .globl _C_LABEL(_end)
+ .globl _end
.data
.align 2
.type __curbrk,#object
.hidden __curbrk
__curbrk:
- .word _C_LABEL(_end)
+ .word _end
END(__curbrk)
/*
.align 2
#ifdef __PIC__
.Lgot:
- .word _C_LABEL(_GLOBAL_OFFSET_TABLE_) - (.L1+8)
+ .word _GLOBAL_OFFSET_TABLE_ - (.L1+8)
#endif
.Lcurbrk:
.word PIC_SYM(__curbrk, GOT)
-/* $OpenBSD: brk.S,v 1.9 2021/11/27 15:12:19 visa Exp $ */
+/* $OpenBSD: brk.S,v 1.10 2022/12/08 01:25:43 guenther Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
.data
__minbrk:
- PTR_VAL _C_LABEL(_end)
+ PTR_VAL _end
.size __minbrk, . - __minbrk
.type __minbrk,@object
.text
-/* $OpenBSD: sbrk.S,v 1.9 2021/11/27 15:12:19 visa Exp $ */
+/* $OpenBSD: sbrk.S,v 1.10 2022/12/08 01:25:43 guenther Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
.hidden __curbrk
.data
-__curbrk: PTR_VAL _C_LABEL(_end)
+__curbrk: PTR_VAL _end
.size __curbrk, . - __curbrk
.type __curbrk,@object
.text
-/* $OpenBSD: tfork_thread.S,v 1.4 2020/10/20 15:26:59 visa Exp $ */
+/* $OpenBSD: tfork_thread.S,v 1.5 2022/12/08 01:25:43 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
LEAF(__tfork_thread, FRAMESZ)
/* a0 = param, a1 = psize, a2 = func, a3 = arg */
PTR_SUBU sp, FRAMESZ
- SETUP_GP64(GPOFF, _C_LABEL(__tfork_thread))
+ SETUP_GP64(GPOFF, __tfork_thread)
.set reorder
move t1, a3 /* arg */
-/* $OpenBSD: brk.S,v 1.17 2022/06/10 01:56:02 guenther Exp $ */
+/* $OpenBSD: brk.S,v 1.18 2022/12/08 01:25:43 guenther Exp $ */
/*
* Copyright (c) 1996 Dale Rahn
#include "SYS.h"
.extern __curbrk
- .extern _C_LABEL(_end)
+ .extern _end
ENTRY_NB(brk)
/* check >= _end, if not make the call for _end */
#ifndef __PIC__
- addis 5,0,_C_LABEL(_end)@h
- ori 5,5,_C_LABEL(_end)@l /* # 5 = &_end */
+ addis 5,0,_end@h
+ ori 5,5,_end@l /* # 5 = &_end */
#else
mflr 10
bcl 20, 31, 1f
addis 9, 9, _GLOBAL_OFFSET_TABLE_-1b@ha
addi 9, 9, _GLOBAL_OFFSET_TABLE_-1b@l
mtlr 10
- lwz 5,_C_LABEL(_end)@got(9)
+ lwz 5,_end@got(9)
#endif
cmplw 3,5
bge+ .L_brk_call
-/* $OpenBSD: sbrk.S,v 1.16 2022/06/10 01:56:02 guenther Exp $ */
+/* $OpenBSD: sbrk.S,v 1.17 2022/12/08 01:25:43 guenther Exp $ */
/*
* Copyright (c) 1996 Dale Rahn
.globl __curbrk
.hidden __curbrk
__curbrk:
- .long _C_LABEL(_end)
+ .long _end
END(__curbrk)
.type __curbrk,@object
-/* $OpenBSD: ffs.S,v 1.8 2018/01/18 23:42:13 deraadt Exp $ */
+/* $OpenBSD: ffs.S,v 1.9 2022/12/08 01:25:43 guenther Exp $ */
/*
* Copyright (c) 1992, 1993
*/
ENTRY(ffs)
#ifdef __PIC__
- PICCY_SET(_C_LABEL(__ffstab), %o2, %o3)
+ PICCY_SET(__ffstab, %o2, %o3)
#else
- set _C_LABEL(__ffstab), %o2
+ set __ffstab, %o2
#endif
andcc %o0, 0xff, %o1 ! get low byte
be,a 1f ! try again if 0
END(ffs)
.protected ffs
-_C_LABEL(__ffstab):
+__ffstab:
.byte -24,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 00-0f */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 10-1f */
.byte 6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 20-2f */
-/* $OpenBSD: brk.S,v 1.9 2022/01/01 23:47:14 guenther Exp $ */
+/* $OpenBSD: brk.S,v 1.10 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: brk.S,v 1.9 2000/07/25 20:15:40 mycroft Exp $ */
/*
#include "SYS.h"
.globl __curbrk
- .globl _C_LABEL(_end)
+ .globl _end
.data
.align 8
__minbrk:
- .xword _C_LABEL(_end) /* lower brk limit; also for gmon code */
+ .xword _end /* lower brk limit; also for gmon code */
END(__minbrk)
OTYPE(__minbrk)
.text
-/* $OpenBSD: sbrk.S,v 1.7 2022/01/01 23:47:14 guenther Exp $ */
+/* $OpenBSD: sbrk.S,v 1.8 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: sbrk.S,v 1.7 2000/07/25 15:14:46 mycroft Exp $ */
/*
.globl __curbrk
.hidden __curbrk
- .globl _C_LABEL(_end)
+ .globl _end
.data
.align 8
__curbrk:
- .xword _C_LABEL(_end)
+ .xword _end
END(__curbrk)
OTYPE(__curbrk)
.text
-/* $OpenBSD: srt0.S,v 1.3 2012/10/12 15:00:32 jsing Exp $ */
+/* $OpenBSD: srt0.S,v 1.4 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
#define BOOTSTACK 0xfffc
- .globl _C_LABEL(end)
- .globl _C_LABEL(edata)
- .globl _C_LABEL(boot)
- .globl _C_LABEL(_rtt)
- .globl _C_LABEL(bios_bootdev)
- .globl _ASM_LABEL(pmm_init)
+ .globl end
+ .globl edata
+ .globl boot
+ .globl _rtt
+ .globl bios_bootdev
+ .globl pmm_init
.globl Gdtr
.text
mov %ax,%gs
movl $BOOTSTACK,%esp
pushl %edx
- movl %edx, _C_LABEL(bios_bootdev)
+ movl %edx, bios_bootdev
/* Now do it all */
#ifdef DEBUG
#endif
/* zero .bss */
xorl %eax, %eax
- movl $_C_LABEL(end), %ecx
- subl $_C_LABEL(edata),%ecx
- movl $_C_LABEL(edata), %edi
+ movl $end, %ecx
+ subl $edata,%ecx
+ movl $edata, %edi
cld
rep; stosb
- call _ASM_LABEL(pmm_init)
- call _C_LABEL(boot)
+ call pmm_init
+ call boot
- jmp _C_LABEL(_rtt)
+ jmp _rtt
-/* $OpenBSD: srt0.S,v 1.3 2012/10/29 13:54:56 jsing Exp $ */
+/* $OpenBSD: srt0.S,v 1.4 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
#define BOOTSTACK 0xfffc
- .globl _C_LABEL(end)
- .globl _C_LABEL(edata)
- .globl _C_LABEL(boot)
- .globl _C_LABEL(_rtt)
- .globl _C_LABEL(bios_bootdev)
- .globl _ASM_LABEL(pmm_init)
+ .globl end
+ .globl edata
+ .globl boot
+ .globl _rtt
+ .globl bios_bootdev
+ .globl pmm_init
.globl Gdtr
.text
movw $(CDBOOTADDR >> 4), %ax /* Reloc from %ds = 0x7c0. */
movw $(LINKADDR >> 4), %bx /* Reloc to %es = 0x4012. */
- movl $_C_LABEL(end), %edx
- subl $_C_LABEL(_start), %edx /* How big are we? */
+ movl $end, %edx
+ subl $_start, %edx /* How big are we? */
/*
* Relocate in blocks that are a maximum of 32KB in size, incrementing
/* Zero .bss */
xorl %eax, %eax
- movl $_C_LABEL(end), %ecx
- subl $_C_LABEL(edata), %ecx
- movl $_C_LABEL(edata), %edi
+ movl $end, %ecx
+ subl $edata, %ecx
+ movl $edata, %edi
cld
rep; stosb
/* Set up an interrupt descriptor table for protected mode. */
- call _ASM_LABEL(pmm_init)
+ call pmm_init
/* Set our program name ("CDBOOT", not "BOOT"). */
movl $cd_progname, %eax
/* Put the boot device number into the globals that need it */
popl %eax /* Get this back from the stack */
pushl %eax /* boot() takes this as a parameter */
- movl %eax, _C_LABEL(bios_bootdev)
- movl %eax, _C_LABEL(bios_cddev)
+ movl %eax, bios_bootdev
+ movl %eax, bios_cddev
/*
* Now call "main()".
movl $0xb8004, %ebx
movl $0x07410741, (%ebx)
#endif
- call _C_LABEL(boot)
+ call boot
/* boot() should not return. If it does, reset computer. */
- jmp _C_LABEL(_rtt)
+ jmp _rtt
ENTRY(debugchar)
pushl %ebx
-/* $OpenBSD: run_i386.S,v 1.2 2022/01/02 05:49:50 jsg Exp $ */
+/* $OpenBSD: run_i386.S,v 1.3 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 2015 YASUOKA Masahiko <yasuoka@yasuoka.net>
#define CODE_SEGMENT 0x10
#define DATA_SEGMENT 0x18
- .globl _C_LABEL(run_i386_size)
-_C_LABEL(run_i386_size):
- .long run_i386_end - _C_LABEL(run_i386_start)
+ .globl run_i386_size
+run_i386_size:
+ .long run_i386_end - run_i386_start
.align 4
.text
- .globl _C_LABEL(run_i386_start)
-_C_LABEL(run_i386_start):
+ .globl run_i386_start
+run_i386_start:
start:
/*
* run_i386(_start) is to call the loaded kernel's start() with
-/* $OpenBSD: run_i386.S,v 1.2 2022/01/02 05:49:50 jsg Exp $ */
+/* $OpenBSD: run_i386.S,v 1.3 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 2015 YASUOKA Masahiko <yasuoka@yasuoka.net>
#define CODE_SEGMENT 0x10
#define DATA_SEGMENT 0x18
- .globl _C_LABEL(run_i386_size)
-_C_LABEL(run_i386_size):
- .long run_i386_end - _C_LABEL(run_i386_start)
+ .globl run_i386_size
+run_i386_size:
+ .long run_i386_end - run_i386_start
.align 4
.text
- .globl _C_LABEL(run_i386_start)
-_C_LABEL(run_i386_start):
+ .globl run_i386_start
+run_i386_start:
start:
/*
* run_i386(_start) is to call the loaded kernel's start() with
-/* $OpenBSD: run_i386.S,v 1.2 2022/01/02 05:49:50 jsg Exp $ */
+/* $OpenBSD: run_i386.S,v 1.3 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 2015 YASUOKA Masahiko <yasuoka@yasuoka.net>
#define CODE_SEGMENT 0x10
#define DATA_SEGMENT 0x18
- .globl _C_LABEL(run_i386_size)
-_C_LABEL(run_i386_size):
- .long run_i386_end - _C_LABEL(run_i386_start)
+ .globl run_i386_size
+run_i386_size:
+ .long run_i386_end - run_i386_start
.align 4
.text
- .globl _C_LABEL(run_i386_start)
-_C_LABEL(run_i386_start):
+ .globl run_i386_start
+run_i386_start:
start:
/*
* run_i386(_start) is to call the loaded kernel's start() with
-/* $OpenBSD: gidt.S,v 1.12 2019/11/09 17:58:46 deraadt Exp $ */
+/* $OpenBSD: gidt.S,v 1.13 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
lidt Idtr;
- .globl _C_LABEL(BIOS_regs)
+ .globl BIOS_regs
.text
.code32
- .globl _ASM_LABEL(pmm_init)
- .globl _C_LABEL(_rtt)
+ .globl pmm_init
+ .globl _rtt
ENTRY(_rtt)
#ifdef SOFTRAID
- call _C_LABEL(sr_clear_keys)
+ call sr_clear_keys
#endif
#ifdef GIDT_DEBUG
movl $0xb8000, %ebx
mov %al, intno
/* Load BIOS registers prior to switching to real mode. */
- movl _C_LABEL(BIOS_regs)+BIOSR_ES, %eax
+ movl BIOS_regs+BIOSR_ES, %eax
mov %eax, 7f
- movl _C_LABEL(BIOS_regs)+BIOSR_DS, %eax
+ movl BIOS_regs+BIOSR_DS, %eax
mov %eax, 6f
prot2real
# movl $Leax, %eax
.byte 0xb8
4: .long 0x90909090
- movl %eax, _C_LABEL(BIOS_regs)+BIOSR_BX
+ movl %eax, BIOS_regs+BIOSR_BX
# movl $Leax, %eax
.byte 0xb8
3: .long 0x90909090
- movl %eax, _C_LABEL(BIOS_regs)+BIOSR_ES
+ movl %eax, BIOS_regs+BIOSR_ES
# movl $Leax, %eax
.byte 0xb8
movb %bh , 0xe*4(%esp)
/* save registers into save area */
- movl %eax, _C_LABEL(BIOS_regs)+BIOSR_AX
- movl %ecx, _C_LABEL(BIOS_regs)+BIOSR_CX
- movl %edx, _C_LABEL(BIOS_regs)+BIOSR_DX
- movl %ebp, _C_LABEL(BIOS_regs)+BIOSR_BP
- movl %esi, _C_LABEL(BIOS_regs)+BIOSR_SI
- movl %edi, _C_LABEL(BIOS_regs)+BIOSR_DI
+ movl %eax, BIOS_regs+BIOSR_AX
+ movl %ecx, BIOS_regs+BIOSR_CX
+ movl %edx, BIOS_regs+BIOSR_DX
+ movl %ebp, BIOS_regs+BIOSR_BP
+ movl %esi, BIOS_regs+BIOSR_SI
+ movl %edi, BIOS_regs+BIOSR_DI
/* clear NT flag in eflags */
pushf
-/* $OpenBSD: pxe_call.S,v 1.4 2006/01/02 00:26:29 tom Exp $ */
+/* $OpenBSD: pxe_call.S,v 1.5 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: pxe_call.S,v 1.2 2002/03/27 17:24:22 kanaoka Exp $ */
/*
/* For simplicity, just move all 32 bits. */
movl 8(%ebp), %ebx
- pushw _C_LABEL(pxe_command_buf_seg)
- pushw _C_LABEL(pxe_command_buf_off)
+ pushw pxe_command_buf_seg
+ pushw pxe_command_buf_off
pushw %bx
call prot_to_real /* Enter real mode */
sti
/* The encoding is: 0x9a offlo offhi seglo seghi */
lcall $0, $0xffff
- .globl _C_LABEL(bangpxe_off)
-_C_LABEL(bangpxe_off) = . - 4
- .globl _C_LABEL(bangpxe_seg)
-_C_LABEL(bangpxe_seg) = . - 2
+ .globl bangpxe_off
+bangpxe_off = . - 4
+ .globl bangpxe_seg
+bangpxe_seg = . - 2
cli
call real_to_prot /* Leave real mode */
* prot_to_real() will set %es to BOOTSEG, so we just need to set
* %(e)di up here. Remember to relocate it!
*/
- movl $_C_LABEL(pxe_command_buf), %edi
+ movl $pxe_command_buf, %edi
subl $LINKADDR, %edi
call prot_to_real /* Enter real mode */
/* The encoding is: 0x9a offlo offhi seglo seghi */
lcall $0, $0xffff
- .globl _C_LABEL(pxenv_off)
-_C_LABEL(pxenv_off) = . - 4
- .globl _C_LABEL(pxenv_seg)
-_C_LABEL(pxenv_seg) = . - 2
+ .globl pxenv_off
+pxenv_off = . - 4
+ .globl pxenv_seg
+pxenv_seg = . - 2
call real_to_prot /* Leave real mode */
.code32
.text
.code32
- .global _C_LABEL(launch_amd64_kernel_long)
+ .global launch_amd64_kernel_long
/*
* void launch_amd64_kernel_long(caddr_t base, caddr_t pml4,
* caddr_t rsp, uint64_t entry, int boothowto, int bootdev,
* int bootapiver, uint64_t end, int extmem, int cnvmem,
* int ac, uint64_t av);
*/
-_C_LABEL(launch_amd64_kernel_long):
+launch_amd64_kernel_long:
asm_start:
xchg %bx, %bx
-/* $OpenBSD: srt0.S,v 1.3 2012/10/29 14:18:11 jsing Exp $ */
+/* $OpenBSD: srt0.S,v 1.4 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
#define BOOTSTACK 0xfffc
- .globl _C_LABEL(end)
- .globl _C_LABEL(edata)
- .globl _C_LABEL(boot)
- .globl _C_LABEL(_rtt)
- .globl _C_LABEL(bios_bootdev)
- .globl _ASM_LABEL(pmm_init)
+ .globl end
+ .globl edata
+ .globl boot
+ .globl _rtt
+ .globl bios_bootdev
+ .globl pmm_init
.globl Gdtr
.text
movw $(PXEBOOTADDR >> 4), %ax /* Reloc from %ds = 0x7c0. */
movw $(LINKADDR >> 4), %bx /* Reloc to %es = 0x4012. */
- movl $_C_LABEL(end), %edx
- subl $_C_LABEL(_start), %edx /* How big are we? */
+ movl $end, %edx
+ subl $_start, %edx /* How big are we? */
/*
* Relocate in blocks that are a maximum of 32KB in size, incrementing
#endif
xorl %edx, %edx
- movl %edx, _C_LABEL(bios_bootdev)
+ movl %edx, bios_bootdev
pushl %edx /* boot() takes this as a parameter */
#ifdef DEBUG
/* Zero .bss */
xorl %eax, %eax
- movl $_C_LABEL(end), %ecx
- subl $_C_LABEL(edata), %ecx
- movl $_C_LABEL(edata), %edi
+ movl $end, %ecx
+ subl $edata, %ecx
+ movl $edata, %edi
cld
rep; stosb
/* Set up an interrupt descriptor table for protected mode. */
- call _ASM_LABEL(pmm_init)
+ call pmm_init
/* Set our program name ("PXEBOOT", not "BOOT"). */
movl $pxe_progname, %eax
movl $0xb8004, %ebx
movl $0x07410741, (%ebx)
#endif
- call _C_LABEL(boot)
+ call boot
/* boot() should not return. If it does, reset computer. */
- jmp _C_LABEL(_rtt)
+ jmp _rtt
ENTRY(debugchar)
pushl %ebx
-/* $OpenBSD: bcopyinout.S,v 1.9 2018/08/06 18:39:13 kettenis Exp $ */
+/* $OpenBSD: bcopyinout.S,v 1.10 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: bcopyinout.S,v 1.13 2003/10/31 16:54:05 scw Exp $ */
/*
#ifdef MULTIPROCESSOR
.Lcpu_info:
- .word _C_LABEL(cpu_info)
+ .word cpu_info
#else
.Lcpu_info_primary:
- .word _C_LABEL(cpu_info_primary)
+ .word cpu_info_primary
#endif
#define SAVE_REGS stmfd sp!, {r4-r11}
-/* $OpenBSD: bus_space_notimpl.S,v 1.3 2016/09/21 11:33:05 kettenis Exp $ */
+/* $OpenBSD: bus_space_notimpl.S,v 1.4 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: bus_space_notimpl.S,v 1.2 2001/09/10 02:20:19 reinoud Exp $ */
/*
#define NAME(func) __C(BUS_SPACE,__C(_bs_,func))
#define LNAME(func) __C(L,NAME(func))
-#define __L(x) _C_LABEL(x)
+#define __L(x) x
#define GLOBAL(func) .global __L(NAME(func))
#define LABEL(func) __L(NAME(func)):
#define LLABEL(func) LNAME(func):
adr r0, bs_notimpl_message
mov r1, r4
mov r2, sp
- b _C_LABEL(panic)
+ b panic
-/* $OpenBSD: copystr.S,v 1.9 2018/08/06 18:39:13 kettenis Exp $ */
+/* $OpenBSD: copystr.S,v 1.10 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: copystr.S,v 1.8 2002/10/13 14:54:48 bjh21 Exp $ */
/*
.align 2
#ifdef MULTIPROCESSOR
.Lcpu_info:
- .word _C_LABEL(cpu_info)
+ .word cpu_info
#else
.Lcpu_info_primary:
- .word _C_LABEL(cpu_info_primary)
+ .word cpu_info_primary
#endif
/*
mov r1, r0
adr r0, Lcopystrpcbfaulttext
bic sp, sp, #7 /* align stack to 8 bytes */
- b _C_LABEL(panic)
+ b panic
Lcopystrpcbfaulttext:
.asciz "No valid PCB during copyinoutstr() addr1=%08x addr2=%08x\n"
-/* $OpenBSD: cpufunc_asm_armv7.S,v 1.18 2020/01/06 19:12:39 kettenis Exp $ */
+/* $OpenBSD: cpufunc_asm_armv7.S,v 1.19 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 2008 Dale Rahn <drahn@openbsd.org>
*
* operations.
*/
.Larmv7_dcache_line_size:
- .word _C_LABEL(arm_dcache_min_line_size)
+ .word arm_dcache_min_line_size
.Larmv7_icache_line_size:
- .word _C_LABEL(arm_icache_min_line_size)
+ .word arm_icache_min_line_size
.Larmv7_idcache_line_size:
- .word _C_LABEL(arm_idcache_min_line_size)
+ .word arm_idcache_min_line_size
s_max .req r0
i_max .req r1
/* XXX The following macros should probably be moved to asm.h */
#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
-#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
+#define C_OBJECT(x) _DATA_OBJECT(x)
.align 2
C_OBJECT(armv7_dcache_sets_max)
-/* $OpenBSD: cpuswitch7.S,v 1.16 2020/03/11 21:04:58 deraadt Exp $ */
+/* $OpenBSD: cpuswitch7.S,v 1.17 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
/*
.text
.Lcpufuncs:
- .word _C_LABEL(cpufuncs)
+ .word cpufuncs
.Lcpu_do_powersave:
- .word _C_LABEL(cpu_do_powersave)
+ .word cpu_do_powersave
/*
* Idle loop, exercised while waiting for a process to wake up.
ENTRY(proc_trampoline)
#ifdef MULTIPROCESSOR
- bl _C_LABEL(proc_trampoline_mp)
+ bl proc_trampoline_mp
#endif
mov r0, #(IPL_NONE)
- bl _C_LABEL(_spllower)
+ bl _spllower
mov r0, r5
mov r1, sp
-/* $OpenBSD: exception.S,v 1.10 2020/03/11 21:04:58 deraadt Exp $ */
+/* $OpenBSD: exception.S,v 1.11 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $ */
/*
and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\
cmp r0, #(PSR_USR32_MODE) ;\
bne 1f ;\
- bl _C_LABEL(vfp_enable) ;\
+ bl vfp_enable ;\
1:
AST_LOCALS
ASENTRY_NP(reset_entry)
adr r0, Lreset_panicmsg
mov r1, lr
- bl _C_LABEL(panic)
+ bl panic
/* NOTREACHED */
Lreset_panicmsg:
.asciz "Reset vector called, LR = 0x%08x"
PUSHFRAME
mov r0, sp /* Pass the frame to any function */
- bl _C_LABEL(swi_handler) /* It's a SWI ! */
+ bl swi_handler /* It's a SWI ! */
DO_AST
PULLFRAME
ldr pc, [r1]
Lprefetch_abort_handler_address:
- .word _C_LABEL(prefetch_abort_handler_address)
+ .word prefetch_abort_handler_address
.data
- .global _C_LABEL(prefetch_abort_handler_address)
+ .global prefetch_abort_handler_address
-_C_LABEL(prefetch_abort_handler_address):
+prefetch_abort_handler_address:
.word abortprefetch
.text
abortprefetch:
adr r0, abortprefetchmsg
- b _C_LABEL(panic)
+ b panic
abortprefetchmsg:
.asciz "abortprefetch"
ldr pc, [r1]
Ldata_abort_handler_address:
- .word _C_LABEL(data_abort_handler_address)
+ .word data_abort_handler_address
.data
- .global _C_LABEL(data_abort_handler_address)
-_C_LABEL(data_abort_handler_address):
+ .global data_abort_handler_address
+data_abort_handler_address:
.word abortdata
.text
abortdata:
adr r0, abortdatamsg
- b _C_LABEL(panic)
+ b panic
abortdatamsg:
.asciz "abortdata"
mrs r2, spsr
mov r3, lr
adr r0, Laddress_exception_msg
- bl _C_LABEL(printf) /* XXX CLOBBERS LR!! */
+ bl printf /* XXX CLOBBERS LR!! */
b data_abort_entry
Laddress_exception_msg:
.asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n"
mov r0, sp
adr lr, exception_exit
- b _C_LABEL(undefinedinstruction)
+ b undefinedinstruction
.data
.align 2
.word 0
.word 0
- .global _C_LABEL(undefined_handler_address)
-_C_LABEL(undefined_handler_address):
- .word _C_LABEL(undefinedinstruction_bounce)
+ .global undefined_handler_address
+undefined_handler_address:
+ .word undefinedinstruction_bounce
-/* $OpenBSD: fiq_subr.S,v 1.4 2015/01/18 14:55:02 jsg Exp $ */
+/* $OpenBSD: fiq_subr.S,v 1.5 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: fiq_subr.S,v 1.3 2002/04/12 18:50:31 thorpej Exp $ */
/*
* Null handler copied down to the FIQ vector when the last
* FIQ handler is removed.
*/
- .global _C_LABEL(fiq_nullhandler), _C_LABEL(fiq_nullhandler_end)
-_C_LABEL(fiq_nullhandler):
+ .global fiq_nullhandler, fiq_nullhandler_end
+fiq_nullhandler:
subs pc, lr, #4
-_C_LABEL(fiq_nullhandler_end):
+fiq_nullhandler_end:
-/* $OpenBSD: in_cksum_arm.S,v 1.8 2020/02/28 11:38:56 jsg Exp $ */
+/* $OpenBSD: in_cksum_arm.S,v 1.9 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: in_cksum_arm.S,v 1.3 2003/11/26 10:31:53 rearnsha Exp $ */
/*
eor r11, r10, r0
add r10, r10, r1
adds r2, r1, #0x00
- blne _ASM_LABEL(L_cksumdata)
+ blne L_cksumdata
tst r11, #0x01
movne r2, r2, ror #8
adds r8, r8, r2
.Lin4_cksum_whoops:
adr r0, .Lin4_cksum_whoops_str
- bl _C_LABEL(panic)
+ bl panic
.Lin4_cksum_whoops_str:
.asciz "in4_cksum: out of mbufs\n"
.align 5
-/* $OpenBSD: irq_dispatch.S,v 1.16 2020/03/11 21:04:58 deraadt Exp $ */
+/* $OpenBSD: irq_dispatch.S,v 1.17 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: irq_dispatch.S,v 1.5 2003/10/30 08:57:24 scw Exp $ */
/*
.text
.align 2
.Lcpu_info_primary:
- .word _C_LABEL(cpu_info_primary)
+ .word cpu_info_primary
#define STOREVFP \
- bl _C_LABEL(vfp_save)
+ bl vfp_save
AST_LOCALS
.bss
.align 2
- .global _C_LABEL(astpending)
-_C_LABEL(astpending):
+ .global astpending
+astpending:
.word 0
-/* $OpenBSD: locore.S,v 1.22 2020/05/18 17:24:03 deraadt Exp $ */
+/* $OpenBSD: locore.S,v 1.23 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
/*
mov r1, r5
mov r2, r6
mov r3, r7
- bl _C_LABEL(initarm) /* Off we go */
+ bl initarm /* Off we go */
/* init arm will return the new stack pointer. */
mov sp, r0
stmfd sp!, {fp, ip, lr, pc}
sub fp, ip, #4
- bl _C_LABEL(main) /* call main()! */
+ bl main /* call main()! */
adr r0, .Lmainreturned
- b _C_LABEL(panic)
+ b panic
/* NOTREACHED */
.Lstart:
.word _edata
.word _end
- .word _C_LABEL(cpu_info_primary)
+ .word cpu_info_primary
.word svcstk + INIT_ARM_STACK_SIZE
.Lmainreturned:
/* OFW based systems will use OF_boot() */
.Lcpufuncs:
- .word _C_LABEL(cpufuncs)
+ .word cpufuncs
ENTRY_NP(cpu_reset)
mrs r2, cpsr
* This variable is provided by the hardware specific code
*/
.Lcpu_reset_address:
- .word _C_LABEL(cpu_reset_address)
+ .word cpu_reset_address
#endif /* OFW */
.data
.align 2
- .global _C_LABEL(esym)
-_C_LABEL(esym): .word _C_LABEL(end)
+ .global esym
+esym: .word end
ENTRY_NP(abort)
- b _C_LABEL(abort)
+ b abort
-/* $OpenBSD: sigcode.S,v 1.11 2021/06/28 18:21:08 kettenis Exp $ */
+/* $OpenBSD: sigcode.S,v 1.12 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: sigcode.S,v 1.6 2003/10/05 19:44:58 matt Exp $ */
/*
swi 0
dsb nsh
isb
- .globl _C_LABEL(sigcoderet)
-_C_LABEL(sigcoderet):
+ .globl sigcoderet
+sigcoderet:
/* Well if that failed we better exit quick ! */
mov r12, #SYS_exit
swi 0
dsb nsh
isb
- .global _C_LABEL(esigcode)
-_C_LABEL(esigcode):
+ .global esigcode
+esigcode:
.globl sigfill
sigfill:
-/* $OpenBSD: vectors.S,v 1.3 2018/08/06 18:39:13 kettenis Exp $ */
+/* $OpenBSD: vectors.S,v 1.4 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: vectors.S,v 1.4 2002/08/17 16:36:32 thorpej Exp $ */
/*
.text
.align 2
- .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
- .global _C_LABEL(fiqvector)
+ .global page0, page0_data, page0_end
+ .global fiqvector
-_C_LABEL(page0):
+page0:
ldr pc, .Lreset_target
ldr pc, .Lundefined_target
ldr pc, .Lswi_target
ldr pc, .Lfiq_target
#else
.Lfiqvector:
- .set _C_LABEL(fiqvector), . - _C_LABEL(page0)
+ .set fiqvector, . - page0
subs pc, lr, #4
.org .Lfiqvector + 0x100
#endif
-_C_LABEL(page0_data):
+page0_data:
.Lreset_target:
.word reset_entry
#ifdef __ARM_FIQ_INDIRECT
.Lfiq_target:
- .word _C_LABEL(fiqvector)
+ .word fiqvector
#else
.word 0 /* pad it out */
#endif
-_C_LABEL(page0_end):
+page0_end:
#ifdef __ARM_FIQ_INDIRECT
.data
.align 2
-_C_LABEL(fiqvector):
+fiqvector:
subs pc, lr, #4
- .org _C_LABEL(fiqvector) + 0x100
+ .org fiqvector + 0x100
#endif
-/* $OpenBSD: asm.h,v 1.12 2022/08/30 16:26:29 miod Exp $ */
+/* $OpenBSD: asm.h,v 1.13 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: asm.h,v 1.4 2001/07/16 05:43:32 matt Exp $ */
/*
# define _PROF_PROLOGUE
#endif
-#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
-#define ENTRY_NP(y) _ENTRY(_C_LABEL(y))
+#define ENTRY(y) _ENTRY(y); _PROF_PROLOGUE
+#define ENTRY_NP(y) _ENTRY(y)
#define ENTRY_NB(y) _ENTRY_NB(y); _PROF_PROLOGUE
-#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
-#define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y))
+#define ASENTRY(y) _ENTRY(y); _PROF_PROLOGUE
+#define ASENTRY_NP(y) _ENTRY(y)
#define END(y) .size y, . - y
#if defined(__PIC__)
-/* $OpenBSD: frame.h,v 1.13 2018/06/30 15:23:37 deraadt Exp $ */
+/* $OpenBSD: frame.h,v 1.14 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: frame.h,v 1.9 2003/12/01 08:48:33 scw Exp $ */
/*
#define AST_LOCALS \
.Laflt_astpending: ;\
- .word _C_LABEL(astpending)
+ .word astpending
#define DO_AST \
ldr r0, [sp] /* Get the SPSR from stack */ ;\
msr cpsr_c, r4 /* Restore interrupts */ ;\
mov r0, sp ;\
adr lr, 1b ;\
- b _C_LABEL(ast) /* ast(frame) */ ;\
+ b ast /* ast(frame) */ ;\
2:
/*
-/* $OpenBSD: cpuswitch.S,v 1.5 2022/01/01 18:52:36 kettenis Exp $ */
+/* $OpenBSD: cpuswitch.S,v 1.6 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 2015 Dale Rahn <drahn@dalerahn.com>
*
ENTRY(proc_trampoline)
#ifdef MULTIPROCESSOR
- bl _C_LABEL(proc_trampoline_mp)
+ bl proc_trampoline_mp
#endif
// call it or just set the variable?
mov x0, IPL_NONE
-/* $OpenBSD: exception.S,v 1.13 2022/01/01 18:52:36 kettenis Exp $ */
+/* $OpenBSD: exception.S,v 1.14 2022/12/08 01:25:44 guenther Exp $ */
/*-
* Copyright (c) 2014 Andrew Turner
* All rights reserved.
/* handle the ast */
mov x0, sp
- bl _C_LABEL(ast)
+ bl ast
b 1b
2:
.endm
-/* $OpenBSD: locore.S,v 1.40 2022/07/13 09:28:18 kettenis Exp $ */
+/* $OpenBSD: locore.S,v 1.41 2022/12/08 01:25:44 guenther Exp $ */
/*-
* Copyright (c) 2012-2014 Andrew Turner
* All rights reserved.
b abort
.data
- .global _C_LABEL(esym)
-_C_LABEL(esym): .xword _C_LABEL(end)
+ .global esym
+esym: .xword end
data_align_pad:
.space 32
initstack_end:
.text
- .globl _C_LABEL(sigcode)
- .type _C_LABEL(sigcode),@function
-_C_LABEL(sigcode):
+ .globl sigcode
+ .type sigcode,@function
+sigcode:
sub sp, sp, #17 * 32
mov x3, sp
stp q0, q1, [x3], #32
svc 0
dsb nsh
isb
- .globl _C_LABEL(sigcoderet)
-_C_LABEL(sigcoderet):
+ .globl sigcoderet
+sigcoderet:
/* sigreturn failed, exit */
mov x8, #SYS_exit
dsb nsh
isb
END(sigcode)
- .global _C_LABEL(esigcode)
-_C_LABEL(esigcode):
+ .global esigcode
+esigcode:
.globl sigfill
sigfill:
-/* $OpenBSD: locore0.S,v 1.7 2022/03/18 01:49:47 jsg Exp $ */
+/* $OpenBSD: locore0.S,v 1.8 2022/12/08 01:25:44 guenther Exp $ */
/*-
* Copyright (c) 2012-2014 Andrew Turner
* All rights reserved.
mov fp, #0
/* Branch to C code */
bl initarm
- bl _C_LABEL(main)
+ bl main
/* We should not get here */
brk 0
-/* $OpenBSD: asm.h,v 1.10 2022/08/30 16:26:29 miod Exp $ */
+/* $OpenBSD: asm.h,v 1.11 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: asm.h,v 1.4 2001/07/16 05:43:32 matt Exp $ */
/*
# define RETGUARD_SYMBOL(x)
#endif
-#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
-#define ENTRY_NP(y) _ENTRY(_C_LABEL(y))
+#define ENTRY(y) _ENTRY(y); _PROF_PROLOGUE
+#define ENTRY_NP(y) _ENTRY(y)
#define ENTRY_NB(y) _ENTRY_NB(y); _PROF_PROLOGUE
-#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
-#define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y))
+#define ASENTRY(y) _ENTRY(y); _PROF_PROLOGUE
+#define ASENTRY_NP(y) _ENTRY(y)
#define END(y) .size y, . - y
#define EENTRY(sym) .globl sym; sym:
#define EEND(sym)
-/* $OpenBSD: locore0.S,v 1.7 2022/01/02 23:29:12 jsg Exp $ */
+/* $OpenBSD: locore0.S,v 1.8 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: lubbock_start.S,v 1.1 2003/06/18 10:51:15 bsh Exp $ */
/*
*/
.text
- .global _C_LABEL(bootstrap_start)
-_C_LABEL(bootstrap_start):
+ .global bootstrap_start
+bootstrap_start:
/* Save U-Boot arguments */
mov r6, r0
mov r7, r1
-/* $OpenBSD: start.S,v 1.3 2018/03/31 18:07:14 patrick Exp $ */
+/* $OpenBSD: start.S,v 1.4 2022/12/08 01:25:44 guenther Exp $ */
/*-
* Copyright (c) 2014, 2015 Andrew Turner
* All rights reserved.
ldr r1, .Ldynamic
add r1, r1, r5
- bl _C_LABEL(self_reloc)
+ bl self_reloc
/* Zero the BSS, _reloc fixed the values for us */
ldr r0, .Lbss
2:
pop {r0, r1}
- bl _C_LABEL(efi_main)
+ bl efi_main
1: b 1b
#define addr32
#endif
-#define _ACPI_TRMP_LABEL(a) a = . - _C_LABEL(acpi_real_mode_resume) + ACPI_TRAMPOLINE
-#define _ACPI_TRMP_OFFSET(a) a = . - _C_LABEL(acpi_real_mode_resume)
-#define _ACPI_TRMP_DATA_LABEL(a) a = . - _C_LABEL(acpi_tramp_data_start) + \
+#define _ACPI_TRMP_LABEL(a) a = . - acpi_real_mode_resume + ACPI_TRAMPOLINE
+#define _ACPI_TRMP_OFFSET(a) a = . - acpi_real_mode_resume
+#define _ACPI_TRMP_DATA_LABEL(a) a = . - acpi_tramp_data_start + \
ACPI_TRAMP_DATA
-#define _ACPI_TRMP_DATA_OFFSET(a) a = . - _C_LABEL(acpi_tramp_data_start)
+#define _ACPI_TRMP_DATA_OFFSET(a) a = . - acpi_tramp_data_start
#define _ACPI_RM_CODE_SEG (ACPI_TRAMPOLINE >> 4)
#define _ACPI_RM_DATA_SEG (ACPI_TRAMP_DATA >> 4)
.text
.code16
.align 4, 0xcc
- .global _C_LABEL(acpi_real_mode_resume)
- .global _C_LABEL(acpi_protected_mode_resume)
- .global _C_LABEL(acpi_resume_end)
- .global _C_LABEL(acpi_tramp_data_start)
- .global _C_LABEL(acpi_tramp_data_end)
-_C_LABEL(acpi_real_mode_resume):
+ .global acpi_real_mode_resume
+ .global acpi_protected_mode_resume
+ .global acpi_resume_end
+ .global acpi_tramp_data_start
+ .global acpi_tramp_data_end
+acpi_real_mode_resume:
_ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real)
nop
cli
.code32
.align 16, 0xcc
_ACPI_TRMP_LABEL(.Lacpi_protected_mode_trampoline)
-_C_LABEL(acpi_protected_mode_resume):
+acpi_protected_mode_resume:
nop
/*
/*
* End of resume code (code copied to ACPI_TRAMPOLINE)
*/
-_C_LABEL(acpi_resume_end):
+acpi_resume_end:
/*
* Initial copy of this data gets placed in .rodata, kernel makes
* RW copy of it in the tramp data page.
*/
.section .rodata
-_C_LABEL(acpi_tramp_data_start):
+acpi_tramp_data_start:
_ACPI_TRMP_DATA_OFFSET(.Ltmp_gdt)
.word .Ltmp_gdt_end - .Ltmp_gdtable
.long .Ltmp_gdtable
_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_tr)
.short 0xcccc
-_C_LABEL(acpi_tramp_data_end):
+acpi_tramp_data_end:
/*
* acpi_savecpu saves the processor's registers and flags
-/* $OpenBSD: apicvec.s,v 1.35 2018/06/18 23:15:05 bluhm Exp $ */
+/* $OpenBSD: apicvec.s,v 1.36 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: apicvec.s,v 1.1.2.2 2000/02/21 21:54:01 sommerfeld Exp $ */
/*-
#include <machine/i82093reg.h>
#include <machine/i82489reg.h>
- .globl _C_LABEL(apic_stray)
+ .globl apic_stray
#ifdef MULTIPROCESSOR
IDTVEC(intripi)
subl $8,%esp /* space for tf_{err,trapno} */
INTRENTRY(ipi)
pushl CPL
- movl _C_LABEL(lapic_ppr),%eax
+ movl lapic_ppr,%eax
movl %eax,CPL
ioapic_asm_ack()
sti /* safe to take interrupts.. */
- call _C_LABEL(i386_ipi_handler)
+ call i386_ipi_handler
cli
popl CPL
#ifdef DIAGNOSTIC
subl $8,%esp /* space for tf_{err,trapno} */
INTRENTRY(ltimer)
pushl CPL
- movl _C_LABEL(lapic_ppr),%eax
+ movl lapic_ppr,%eax
movl %eax,CPL
ioapic_asm_ack()
sti
incl CPUVAR(IDEPTH)
movl %esp,%eax
pushl %eax
- call _C_LABEL(lapic_clockintr)
+ call lapic_clockintr
addl $4,%esp
decl CPUVAR(IDEPTH)
- jmp _C_LABEL(Xdoreti)
+ jmp Xdoreti
KIDTVEC(intrsoftclock)
subl $8,%esp /* space for tf_{err,trapno} */
sti
incl CPUVAR(IDEPTH)
pushl $I386_SOFTINTR_SOFTCLOCK
- call _C_LABEL(softintr_dispatch)
+ call softintr_dispatch
addl $4,%esp
decl CPUVAR(IDEPTH)
- jmp _C_LABEL(Xdoreti)
+ jmp Xdoreti
KIDTVEC(intrsoftnet)
subl $8,%esp /* space for tf_{err,trapno} */
sti
incl CPUVAR(IDEPTH)
pushl $I386_SOFTINTR_SOFTNET
- call _C_LABEL(softintr_dispatch)
+ call softintr_dispatch
addl $4,%esp
decl CPUVAR(IDEPTH)
- jmp _C_LABEL(Xdoreti)
+ jmp Xdoreti
#undef DONETISR
KIDTVEC(intrsofttty)
sti
incl CPUVAR(IDEPTH)
pushl $I386_SOFTINTR_SOFTTTY
- call _C_LABEL(softintr_dispatch)
+ call softintr_dispatch
addl $4,%esp
decl CPUVAR(IDEPTH)
- jmp _C_LABEL(Xdoreti)
+ jmp Xdoreti
#if NIOAPIC > 0
subl $8,%esp /* space for tf_{err,trapno} */ ;\
INTRENTRY(intr_##name##num) ;\
pushl CPL ;\
- movl _C_LABEL(lapic_ppr),%eax ;\
+ movl lapic_ppr,%eax ;\
orl $num,%eax ;\
- movl _C_LABEL(apic_maxlevel)(,%eax,4),%ebx ;\
+ movl apic_maxlevel(,%eax,4),%ebx ;\
movl %ebx,CPL ;\
mask(num) /* mask it in hardware */ ;\
early_ack(num) /* and allow other intrs */ ;\
- incl _C_LABEL(uvmexp)+V_INTR /* statistical info */ ;\
+ incl uvmexp+V_INTR /* statistical info */ ;\
sti ;\
- movl _C_LABEL(apic_intrhand)(,%eax,4),%ebx /* chain head */ ;\
+ movl apic_intrhand(,%eax,4),%ebx /* chain head */ ;\
testl %ebx,%ebx ;\
- jz _C_LABEL(Xstray_##name##num) ;\
+ jz Xstray_##name##num ;\
APIC_STRAY_INIT /* nobody claimed it yet */ ;\
7: incl CPUVAR(IDEPTH) ;\
movl %esp, %eax /* save frame pointer in eax */ ;\
pushl %ebx /* arg 2: ih structure */ ;\
pushl %eax /* arg 1: frame pointer */ ;\
- call _C_LABEL(intr_handler) /* call it */ ;\
+ call intr_handler /* call it */ ;\
addl $8, %esp /* toss args */ ;\
APIC_STRAY_INTEGRATE /* maybe he claimed it */ ;\
orl %eax,%eax /* should it be counted? */ ;\
jz 4f ;\
addl $1,IH_COUNT(%ebx) /* count the intrs */ ;\
adcl $0,IH_COUNT+4(%ebx) ;\
- cmpl $0,_C_LABEL(intr_shared_edge) ;\
+ cmpl $0,intr_shared_edge ;\
jne 4f /* if no shared edges ... */ ;\
orl %eax,%eax /* ... 1 means stop trying */ ;\
js 4f ;\
8: \
unmask(num) /* unmask it in hardware */ ;\
late_ack(num) ;\
- jmp _C_LABEL(Xdoreti) ;\
-_C_LABEL(Xstray_##name##num): \
+ jmp Xdoreti ;\
+Xstray_##name##num: \
pushl $num ;\
- call _C_LABEL(apic_stray) ;\
+ call apic_stray ;\
addl $4,%esp ;\
jmp 8b ;\
orl %eax,%esi
#define APIC_STRAY_TEST(name,num) \
testl %esi,%esi ;\
- jz _C_LABEL(Xstray_##name##num)
+ jz Xstray_##name##num
#else /* !DEBUG */
#define APIC_STRAY_INIT
#define APIC_STRAY_INTEGRATE
APICINTR(ioapic,14, voidop, ioapic_asm_ack, voidop, voidop, voidop)
APICINTR(ioapic,15, voidop, ioapic_asm_ack, voidop, voidop, voidop)
- .globl _C_LABEL(Xintr_ioapic0),_C_LABEL(Xintr_ioapic1)
- .globl _C_LABEL(Xintr_ioapic2),_C_LABEL(Xintr_ioapic3)
- .globl _C_LABEL(Xintr_ioapic4),_C_LABEL(Xintr_ioapic5)
- .globl _C_LABEL(Xintr_ioapic6),_C_LABEL(Xintr_ioapic7)
- .globl _C_LABEL(Xintr_ioapic8),_C_LABEL(Xintr_ioapic9)
- .globl _C_LABEL(Xintr_ioapic10),_C_LABEL(Xintr_ioapic11)
- .globl _C_LABEL(Xintr_ioapic12),_C_LABEL(Xintr_ioapic13)
- .globl _C_LABEL(Xintr_ioapic14),_C_LABEL(Xintr_ioapic15)
- .globl _C_LABEL(apichandler)
-
-_C_LABEL(apichandler):
- .long _C_LABEL(Xintr_ioapic0),_C_LABEL(Xintr_ioapic1)
- .long _C_LABEL(Xintr_ioapic2),_C_LABEL(Xintr_ioapic3)
- .long _C_LABEL(Xintr_ioapic4),_C_LABEL(Xintr_ioapic5)
- .long _C_LABEL(Xintr_ioapic6),_C_LABEL(Xintr_ioapic7)
- .long _C_LABEL(Xintr_ioapic8),_C_LABEL(Xintr_ioapic9)
- .long _C_LABEL(Xintr_ioapic10),_C_LABEL(Xintr_ioapic11)
- .long _C_LABEL(Xintr_ioapic12),_C_LABEL(Xintr_ioapic13)
- .long _C_LABEL(Xintr_ioapic14),_C_LABEL(Xintr_ioapic15)
+ .globl Xintr_ioapic0,Xintr_ioapic1
+ .globl Xintr_ioapic2,Xintr_ioapic3
+ .globl Xintr_ioapic4,Xintr_ioapic5
+ .globl Xintr_ioapic6,Xintr_ioapic7
+ .globl Xintr_ioapic8,Xintr_ioapic9
+ .globl Xintr_ioapic10,Xintr_ioapic11
+ .globl Xintr_ioapic12,Xintr_ioapic13
+ .globl Xintr_ioapic14,Xintr_ioapic15
+ .globl apichandler
+
+apichandler:
+ .long Xintr_ioapic0,Xintr_ioapic1
+ .long Xintr_ioapic2,Xintr_ioapic3
+ .long Xintr_ioapic4,Xintr_ioapic5
+ .long Xintr_ioapic6,Xintr_ioapic7
+ .long Xintr_ioapic8,Xintr_ioapic9
+ .long Xintr_ioapic10,Xintr_ioapic11
+ .long Xintr_ioapic12,Xintr_ioapic13
+ .long Xintr_ioapic14,Xintr_ioapic15
#endif
-/* $OpenBSD: apmcall.S,v 1.6 2013/11/28 19:30:46 brad Exp $ */
+/* $OpenBSD: apmcall.S,v 1.7 2022/12/08 01:25:44 guenther Exp $ */
/*
* Copyright (c) 2000,2001 Michael Shalayeff
#endif /* APM_DISABLE_INTERRUPTS */
.data
- .globl _C_LABEL(apm_ep)
- .globl _C_LABEL(apm_cli)
-_C_LABEL(apm_cli):
+ .globl apm_ep
+ .globl apm_cli
+apm_cli:
.long APM_DISABLE_INTERRUPTS
/*
* int apmcall(u_int f, int dev, struct apmregs *r)
pushfl
- cmpl $0, _C_LABEL(apm_cli)
+ cmpl $0, apm_cli
je 1f
cli
1:
movl 8(%ebp), %eax
clc
- lcall *%cs:(_C_LABEL(apm_ep))
+ lcall *%cs:(apm_ep)
pushl %eax
setc %al
movzbl %al, %esi
-/* $OpenBSD: in_cksum.s,v 1.9 2017/06/29 17:17:28 deraadt Exp $ */
+/* $OpenBSD: in_cksum.s,v 1.10 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: in_cksum.S,v 1.2 2003/08/07 16:27:54 agc Exp $ */
/*-
.Lout_of_mbufs:
pushl $cksum_ood
- call _C_LABEL(printf)
+ call printf
leal 4(%esp), %esp
jmp .Lreturn
-/* $OpenBSD: locore.s,v 1.197 2022/08/22 09:33:40 jsg Exp $ */
+/* $OpenBSD: locore.s,v 1.198 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */
/*-
#define _ALIGN_TEXT ALIGN_TEXT
#include <machine/asm.h>
-#define CPL _C_LABEL(lapic_tpr)
+#define CPL lapic_tpr
#define GET_CURPCB(reg) \
movl CPUVAR(CURPCB), reg
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
*/
- .globl _C_LABEL(PTmap), _C_LABEL(PTD)
- .set _C_LABEL(PTmap), (PDSLOT_PTE << PDSHIFT)
- .set _C_LABEL(PTD), (_C_LABEL(PTmap) + PDSLOT_PTE * NBPG)
+ .globl PTmap, PTD
+ .set PTmap, (PDSLOT_PTE << PDSHIFT)
+ .set PTD, (PTmap + PDSLOT_PTE * NBPG)
/*
* Initialization
*/
.data
- .globl _C_LABEL(cpu_id), _C_LABEL(cpu_vendor)
- .globl _C_LABEL(cpu_brandstr)
- .globl _C_LABEL(cpuid_level)
- .globl _C_LABEL(cpu_miscinfo)
- .globl _C_LABEL(cpu_feature), _C_LABEL(cpu_ecxfeature)
- .globl _C_LABEL(ecpu_feature), _C_LABEL(ecpu_eaxfeature)
- .globl _C_LABEL(ecpu_ecxfeature)
- .globl _C_LABEL(cpu_cache_eax), _C_LABEL(cpu_cache_ebx)
- .globl _C_LABEL(cpu_cache_ecx), _C_LABEL(cpu_cache_edx)
- .globl _C_LABEL(cpu_perf_eax)
- .globl _C_LABEL(cpu_perf_ebx)
- .globl _C_LABEL(cpu_perf_edx)
- .globl _C_LABEL(cpu_apmi_edx)
- .globl _C_LABEL(cold), _C_LABEL(cnvmem), _C_LABEL(extmem)
- .globl _C_LABEL(cpu_pae)
- .globl _C_LABEL(esym)
- .globl _C_LABEL(ssym)
- .globl _C_LABEL(nkptp_max)
- .globl _C_LABEL(boothowto), _C_LABEL(bootdev), _C_LABEL(atdevbase)
- .globl _C_LABEL(proc0paddr), _C_LABEL(PTDpaddr), _C_LABEL(PTDsize)
- .globl _C_LABEL(gdt)
- .globl _C_LABEL(bootapiver), _C_LABEL(bootargc), _C_LABEL(bootargv)
- .globl _C_LABEL(lapic_tpr)
- .globl _C_LABEL(pg_g_kern)
- .globl _C_LABEL(cpu_meltdown)
+ .globl cpu_id, cpu_vendor
+ .globl cpu_brandstr
+ .globl cpuid_level
+ .globl cpu_miscinfo
+ .globl cpu_feature, cpu_ecxfeature
+ .globl ecpu_feature, ecpu_eaxfeature
+ .globl ecpu_ecxfeature
+ .globl cpu_cache_eax, cpu_cache_ebx
+ .globl cpu_cache_ecx, cpu_cache_edx
+ .globl cpu_perf_eax
+ .globl cpu_perf_ebx
+ .globl cpu_perf_edx
+ .globl cpu_apmi_edx
+ .globl cold, cnvmem, extmem
+ .globl cpu_pae
+ .globl esym
+ .globl ssym
+ .globl nkptp_max
+ .globl boothowto, bootdev, atdevbase
+ .globl proc0paddr, PTDpaddr, PTDsize
+ .globl gdt
+ .globl bootapiver, bootargc, bootargv
+ .globl lapic_tpr
+ .globl pg_g_kern
+ .globl cpu_meltdown
#if NLAPIC > 0
.align NBPG
- .globl _C_LABEL(local_apic), _C_LABEL(lapic_id)
-_C_LABEL(local_apic):
+ .globl local_apic, lapic_id
+local_apic:
.space LAPIC_ID
-_C_LABEL(lapic_id):
+lapic_id:
.long 0x00000000
.space LAPIC_TPRI-(LAPIC_ID+4)
-_C_LABEL(lapic_tpr):
+lapic_tpr:
.space LAPIC_PPRI-LAPIC_TPRI
-_C_LABEL(lapic_ppr):
+lapic_ppr:
.space LAPIC_ISR-LAPIC_PPRI
-_C_LABEL(lapic_isr):
+lapic_isr:
.space NBPG-LAPIC_ISR
#else
-_C_LABEL(lapic_tpr):
+lapic_tpr:
.long 0
#endif
-_C_LABEL(cpu_id): .long 0 # saved from 'cpuid' instruction
-_C_LABEL(cpu_pae): .long 0 # are we using PAE paging mode?
-_C_LABEL(cpu_miscinfo): .long 0 # misc info (apic/brand id) from 'cpuid'
-_C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid' instruction
-_C_LABEL(ecpu_feature): .long 0 # extended feature flags from 'cpuid'
-_C_LABEL(cpu_ecxfeature):.long 0 # ecx feature flags from 'cpuid'
-_C_LABEL(ecpu_eaxfeature): .long 0 # extended eax feature flags
-_C_LABEL(ecpu_ecxfeature): .long 0 # extended ecx feature flags
-_C_LABEL(cpuid_level): .long -1 # max. lvl accepted by 'cpuid' insn
-_C_LABEL(cpu_cache_eax):.long 0
-_C_LABEL(cpu_cache_ebx):.long 0
-_C_LABEL(cpu_cache_ecx):.long 0
-_C_LABEL(cpu_cache_edx):.long 0
-_C_LABEL(cpu_perf_eax): .long 0 # arch. perf. mon. flags from 'cpuid'
-_C_LABEL(cpu_perf_ebx): .long 0 # arch. perf. mon. flags from 'cpuid'
-_C_LABEL(cpu_perf_edx): .long 0 # arch. perf. mon. flags from 'cpuid'
-_C_LABEL(cpu_apmi_edx): .long 0 # adv. power management info. 'cpuid'
-_C_LABEL(cpu_vendor): .space 16 # vendor string returned by 'cpuid' instruction
-_C_LABEL(cpu_brandstr): .space 48 # brand string returned by 'cpuid'
-_C_LABEL(cold): .long 1 # cold till we are not
-_C_LABEL(ssym): .long 0 # ptr to start of syms
-_C_LABEL(esym): .long 0 # ptr to end of syms
-_C_LABEL(cnvmem): .long 0 # conventional memory size
-_C_LABEL(extmem): .long 0 # extended memory size
-_C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual
-_C_LABEL(bootapiver): .long 0 # /boot API version
-_C_LABEL(bootargc): .long 0 # /boot argc
-_C_LABEL(bootargv): .long 0 # /boot argv
-_C_LABEL(bootdev): .long 0 # device we booted from
-_C_LABEL(proc0paddr): .long 0
-_C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm
-_C_LABEL(PTDsize): .long NBPG # size of PTD, for libkvm
-_C_LABEL(pg_g_kern): .long 0 # 0x100 if global pages should be used
+cpu_id: .long 0 # saved from 'cpuid' instruction
+cpu_pae: .long 0 # are we using PAE paging mode?
+cpu_miscinfo: .long 0 # misc info (apic/brand id) from 'cpuid'
+cpu_feature: .long 0 # feature flags from 'cpuid' instruction
+ecpu_feature: .long 0 # extended feature flags from 'cpuid'
+cpu_ecxfeature: .long 0 # ecx feature flags from 'cpuid'
+ecpu_eaxfeature: .long 0 # extended eax feature flags
+ecpu_ecxfeature: .long 0 # extended ecx feature flags
+cpuid_level: .long -1 # max. lvl accepted by 'cpuid' insn
+cpu_cache_eax: .long 0
+cpu_cache_ebx: .long 0
+cpu_cache_ecx: .long 0
+cpu_cache_edx: .long 0
+cpu_perf_eax: .long 0 # arch. perf. mon. flags from 'cpuid'
+cpu_perf_ebx: .long 0 # arch. perf. mon. flags from 'cpuid'
+cpu_perf_edx: .long 0 # arch. perf. mon. flags from 'cpuid'
+cpu_apmi_edx: .long 0 # adv. power management info. 'cpuid'
+cpu_vendor: .space 16 # vendor string returned by 'cpuid' instruction
+cpu_brandstr: .space 48 # brand string returned by 'cpuid'
+cold: .long 1 # cold till we are not
+ssym: .long 0 # ptr to start of syms
+esym: .long 0 # ptr to end of syms
+cnvmem: .long 0 # conventional memory size
+extmem: .long 0 # extended memory size
+atdevbase: .long 0 # location of start of iomem in virtual
+bootapiver: .long 0 # /boot API version
+bootargc: .long 0 # /boot argc
+bootargv: .long 0 # /boot argv
+bootdev: .long 0 # device we booted from
+proc0paddr: .long 0
+PTDpaddr: .long 0 # paddr of PTD, for libkvm
+PTDsize: .long NBPG # size of PTD, for libkvm
+pg_g_kern: .long 0 # 0x100 if global pages should be used
# in kernel mappings, 0 otherwise (for
# insecure CPUs)
-_C_LABEL(cpu_meltdown): .long 0 # 1 if this CPU has Meltdown
+cpu_meltdown: .long 0 # 1 if this CPU has Meltdown
.text
NENTRY(proc_trampoline)
#ifdef MULTIPROCESSOR
- call _C_LABEL(proc_trampoline_mp)
+ call proc_trampoline_mp
#endif
movl $IPL_NONE,CPL
pushl %ebx
/* This must come before any use of the CODEPATCH macros */
.section .codepatch,"a"
.align 8
- .globl _C_LABEL(codepatch_begin)
-_C_LABEL(codepatch_begin):
+ .globl codepatch_begin
+codepatch_begin:
.previous
.section .codepatchend,"a"
- .globl _C_LABEL(codepatch_end)
-_C_LABEL(codepatch_end):
+ .globl codepatch_end
+codepatch_end:
.previous
/*****************************************************************************/
* Signal trampoline; copied to top of user stack.
*/
.section .rodata
- .globl _C_LABEL(sigcode)
-_C_LABEL(sigcode):
+ .globl sigcode
+sigcode:
call *SIGF_HANDLER(%esp)
leal SIGF_SC(%esp),%eax # scp (the call may have clobbered the
# copy at SIGF_SCP(%esp))
pushl %eax # junk to fake return address
movl $SYS_sigreturn,%eax
int $0x80 # enter kernel with args on stack
- .globl _C_LABEL(sigcoderet)
-_C_LABEL(sigcoderet):
+ .globl sigcoderet
+sigcoderet:
movl $SYS_exit,%eax
int $0x80 # exit if sigreturn fails
- .globl _C_LABEL(esigcode)
-_C_LABEL(esigcode):
+ .globl esigcode
+esigcode:
- .globl _C_LABEL(sigfill)
-_C_LABEL(sigfill):
+ .globl sigfill
+sigfill:
int3
-_C_LABEL(esigfill):
+esigfill:
.data
- .globl _C_LABEL(sigfillsiz)
-_C_LABEL(sigfillsiz):
- .long _C_LABEL(esigfill) - _C_LABEL(sigfill)
+ .globl sigfillsiz
+sigfillsiz:
+ .long esigfill - sigfill
.text
pushl %edi
GET_CURPCB(%eax) # load curpcb into eax and set on-fault
pushl PCB_ONFAULT(%eax)
- movl $_C_LABEL(copy_fault), PCB_ONFAULT(%eax)
+ movl $copy_fault, PCB_ONFAULT(%eax)
movl 16+FPADD(%esp),%esi
movl 20+FPADD(%esp),%edi
*/
movl %edi,%edx
addl %eax,%edx
- jc _C_LABEL(copy_fault)
+ jc copy_fault
cmpl $VM_MAXUSER_ADDRESS,%edx
- ja _C_LABEL(copy_fault)
+ ja copy_fault
GET_CURPCB(%edx)
- movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
+ movl $copy_fault,PCB_ONFAULT(%edx)
SMAP_STAC
/* bcopy(%esi, %edi, %eax); */
pushl %edi
GET_CURPCB(%eax)
pushl $0
- movl $_C_LABEL(copy_fault),PCB_ONFAULT(%eax)
+ movl $copy_fault,PCB_ONFAULT(%eax)
SMAP_STAC
movl 16+FPADD(%esp),%esi
*/
movl %esi,%edx
addl %eax,%edx
- jc _C_LABEL(copy_fault)
+ jc copy_fault
cmpl $VM_MAXUSER_ADDRESS,%edx
- ja _C_LABEL(copy_fault)
+ ja copy_fault
/* bcopy(%esi, %edi, %eax); */
movl %eax,%ecx
movl 20+FPADD(%esp),%edx # edx = maxlen
5: GET_CURPCB(%eax)
- movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%eax)
+ movl $copystr_fault,PCB_ONFAULT(%eax)
SMAP_STAC
/*
* Get min(%edx, VM_MAXUSER_ADDRESS-%edi).
*/
movl $VM_MAXUSER_ADDRESS,%eax
subl %edi,%eax
- jbe _C_LABEL(copystr_fault) # die if CF == 1 || ZF == 1
+ jbe copystr_fault # die if CF == 1 || ZF == 1
# i.e. make sure that %edi
# is below VM_MAXUSER_ADDRESS
2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
cmpl $VM_MAXUSER_ADDRESS,%edi
- jae _C_LABEL(copystr_fault)
+ jae copystr_fault
movl $ENAMETOOLONG,%eax
jmp copystr_return
pushl %esi
pushl %edi
GET_CURPCB(%ecx)
- movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%ecx)
+ movl $copystr_fault,PCB_ONFAULT(%ecx)
SMAP_STAC
movl 12+FPADD(%esp),%esi # %esi = from
*/
movl $VM_MAXUSER_ADDRESS,%eax
subl %esi,%eax
- jbe _C_LABEL(copystr_fault) # Error if CF == 1 || ZF == 1
+ jbe copystr_fault # Error if CF == 1 || ZF == 1
# i.e. make sure that %esi
# is below VM_MAXUSER_ADDRESS
cmpl %edx,%eax
2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
cmpl $VM_MAXUSER_ADDRESS,%esi
- jae _C_LABEL(copystr_fault)
+ jae copystr_fault
movl $ENAMETOOLONG,%eax
jmp copystr_return
*/
pushl %edi
pushl %esi
- call _C_LABEL(pmap_switch)
+ call pmap_switch
addl $8,%esp
/* Restore cr0 (including FPU state). */
ret
ENTRY(cpu_idle_enter)
- movl _C_LABEL(cpu_idle_enter_fcn),%eax
+ movl cpu_idle_enter_fcn,%eax
cmpl $0,%eax
je 1f
jmpl *%eax
ret
ENTRY(cpu_idle_cycle)
- movl _C_LABEL(cpu_idle_cycle_fcn),%eax
+ movl cpu_idle_cycle_fcn,%eax
cmpl $0,%eax
je 1f
call *%eax
ret
ENTRY(cpu_idle_leave)
- movl _C_LABEL(cpu_idle_leave_fcn),%eax
+ movl cpu_idle_leave_fcn,%eax
cmpl $0,%eax
je 1f
jmpl *%eax
* handler.
*/
-#define TRAP(a) pushl $(a) ; jmp _C_LABEL(alltraps)
+#define TRAP(a) pushl $(a) ; jmp alltraps
#define ZTRAP(a) pushl $0 ; TRAP(a)
IDTVEC(div)
INTRENTRY(dna)
sti
pushl CPUVAR(SELF)
- call *_C_LABEL(npxdna_func)
+ call *npxdna_func
addl $4,%esp
testl %eax,%eax
jz calltrap
pushl $T_PROTFLT
/* If iret faults, we'll get a trap at doreti_iret+3 with CPL == 0. */
pushl %eax
- leal _C_LABEL(doreti_iret+3),%eax
+ leal doreti_iret+3,%eax
cmpl %eax,12(%esp) /* over %eax, trapno and err to %eip */
popl %eax
jne 97f
testb $PGEX_U,TF_ERR(%esp)
jnz calltrap
movl %cr2,%eax
- subl _C_LABEL(idt),%eax
+ subl idt,%eax
cmpl $(6*8),%eax
jne calltrap
movb $T_PRIVINFLT,TF_TRAPNO(%esp)
sti
pushl CPL # if_ppl in intrframe
pushl %esp # push address of intrframe
- incl _C_LABEL(uvmexp)+V_TRAP
- call _C_LABEL(npxintr)
+ incl uvmexp+V_TRAP
+ call npxintr
addl $8,%esp # pop address and if_ppl
#ifdef DIAGNOSTIC
movl $0xfc,%esi
pushl %esp
subl $4, %esp
pushl %eax
- leal _C_LABEL(dt_prov_kprobe), %eax
+ leal dt_prov_kprobe, %eax
movl %eax, 4(%esp)
popl %eax
- call _C_LABEL(dt_prov_kprobe_hook)
+ call dt_prov_kprobe_hook
addl $8, %esp
cmpl $0, %eax
je .Lreal_trap
.Lreal_trap:
#endif /* !defined(GPROF) && defined(DDBPROF) */
pushl %esp
- call _C_LABEL(trap)
+ call trap
addl $4,%esp
.Lalltraps_check_asts:
5: CLEAR_ASTPENDING(%ecx)
sti
pushl %esp
- call _C_LABEL(ast)
+ call ast
addl $4,%esp
jmp .Lalltraps_check_asts
1:
INTRFASTEXIT
3: sti
pushl $spl_lowered
- call _C_LABEL(printf)
+ call printf
addl $4,%esp
#if defined(DDB) && 0
int $3
pushl %esi /* marker indicating where we came from */
pushl %edx /* EFLAGS are in %edx */
pushl $.Lnot_blocked
- call _C_LABEL(printf)
+ call printf
addl $12,%esp
#ifdef DDB
int $3
INTRENTRY(syscall)
sti
pushl %esp
- call _C_LABEL(syscall)
+ call syscall
addl $4,%esp
.Lsyscall_check_asts:
CLEAR_ASTPENDING(%ecx)
sti
pushl %esp
- call _C_LABEL(ast)
+ call ast
addl $4,%esp
jmp .Lsyscall_check_asts
1:
popl %fs
popl %eax
popl %ebp
- .globl _C_LABEL(doreti_iret)
-_C_LABEL(doreti_iret):
+ .globl doreti_iret
+doreti_iret:
/* we have an iretframe */
addl $IRF_EIP,%esp
iret
*/
ENTRY(cpu_paenable)
movl $-1, %eax
- testl $CPUID_PAE, _C_LABEL(cpu_feature)
+ testl $CPUID_PAE, cpu_feature
jz 1f
pushl %esi
subl $KERNBASE, %eax
movl %eax, %cr3 /* reload real PDPT */
movl $4*NBPG, %eax
- movl %eax, _C_LABEL(PTDsize)
+ movl %eax, PTDsize
xorl %eax, %eax
popl %edi
#endif
.section .rodata
- .globl _C_LABEL(_stac)
-_C_LABEL(_stac):
+ .globl _stac
+_stac:
stac
- .globl _C_LABEL(_clac)
-_C_LABEL(_clac):
+ .globl _clac
+_clac:
clac
-/* $OpenBSD: locore0.S,v 1.9 2022/08/22 08:53:55 jsg Exp $ */
+/* $OpenBSD: locore0.S,v 1.10 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */
/*-
.text
.globl start
- .globl _C_LABEL(kernel_text)
- _C_LABEL(kernel_text) = KERNTEXTOFF
+ .globl kernel_text
+ kernel_text = KERNTEXTOFF
start: movw $0x1234,0x472 # warm boot
/*
* (If we want to hold onto /boot, it's physical %esp up to _end.)
*/
movl 4(%esp),%eax
- movl %eax,RELOC(_C_LABEL(boothowto))
+ movl %eax,RELOC(boothowto)
movl 8(%esp),%eax
- movl %eax,RELOC(_C_LABEL(bootdev))
+ movl %eax,RELOC(bootdev)
movl 16(%esp),%eax
testl %eax,%eax
jz 1f
addl $KERNBASE,%eax
-1: movl %eax,RELOC(_C_LABEL(esym))
- movl $__kernel_bss_end, RELOC(_C_LABEL(ssym))
+1: movl %eax,RELOC(esym)
+ movl $__kernel_bss_end, RELOC(ssym)
movl 12(%esp),%eax
- movl %eax,RELOC(_C_LABEL(bootapiver))
+ movl %eax,RELOC(bootapiver)
movl 28(%esp), %eax
- movl %eax, RELOC(_C_LABEL(bootargc))
+ movl %eax, RELOC(bootargc)
movl 32(%esp), %eax
- movl %eax, RELOC(_C_LABEL(bootargv))
+ movl %eax, RELOC(bootargv)
/* First, reset the PSL. */
pushl $PSL_MBO
.Ltry586: /* Use the `cpuid' instruction. */
xorl %eax,%eax
cpuid
- movl %eax,RELOC(_C_LABEL(cpuid_level))
- movl %ebx,RELOC(_C_LABEL(cpu_vendor)) # store vendor string
- movl %edx,RELOC(_C_LABEL(cpu_vendor))+4
- movl %ecx,RELOC(_C_LABEL(cpu_vendor))+8
- movl $0, RELOC(_C_LABEL(cpu_vendor))+12
+ movl %eax,RELOC(cpuid_level)
+ movl %ebx,RELOC(cpu_vendor) # store vendor string
+ movl %edx,RELOC(cpu_vendor)+4
+ movl %ecx,RELOC(cpu_vendor)+8
+ movl $0, RELOC(cpu_vendor)+12
/*
* Determine if CPU has meltdown. Certain Intel CPUs do not properly
* sanitized page table lacking kernel mappings when executing user
* processes, and may not use PG_G global PTEs for kernel VAs.
*/
- movl $0x1, RELOC(_C_LABEL(cpu_meltdown))
- movl $0x0, RELOC(_C_LABEL(pg_g_kern))
+ movl $0x1, RELOC(cpu_meltdown)
+ movl $0x0, RELOC(pg_g_kern)
cmpl $0x756e6547,%ebx # "Genu"
jne .Lcpu_secure
jz .Lcpu_check_finished
.Lcpu_secure:
- movl $0x0, RELOC(_C_LABEL(cpu_meltdown))
- movl $PG_G, RELOC(_C_LABEL(pg_g_kern))
+ movl $0x0, RELOC(cpu_meltdown)
+ movl $PG_G, RELOC(pg_g_kern)
.Lcpu_check_finished:
movl $1,%eax
xorl %ecx,%ecx
cpuid
- movl %eax,RELOC(_C_LABEL(cpu_id)) # store cpu_id and features
- movl %ebx,RELOC(_C_LABEL(cpu_miscinfo))
- movl %edx,RELOC(_C_LABEL(cpu_feature))
- movl %ecx,RELOC(_C_LABEL(cpu_ecxfeature))
+ movl %eax,RELOC(cpu_id) # store cpu_id and features
+ movl %ebx,RELOC(cpu_miscinfo)
+ movl %edx,RELOC(cpu_feature)
+ movl %ecx,RELOC(cpu_ecxfeature)
- movl RELOC(_C_LABEL(cpuid_level)),%eax
+ movl RELOC(cpuid_level),%eax
cmp $2,%eax
jl 1f
movl $2,%eax
cpuid
- movl %eax,RELOC(_C_LABEL(cpu_cache_eax))
- movl %ebx,RELOC(_C_LABEL(cpu_cache_ebx))
- movl %ecx,RELOC(_C_LABEL(cpu_cache_ecx))
- movl %edx,RELOC(_C_LABEL(cpu_cache_edx))
+ movl %eax,RELOC(cpu_cache_eax)
+ movl %ebx,RELOC(cpu_cache_ebx)
+ movl %ecx,RELOC(cpu_cache_ecx)
+ movl %edx,RELOC(cpu_cache_edx)
movl $0x0a,%eax
cpuid
- movl %eax,RELOC(_C_LABEL(cpu_perf_eax))
- movl %ebx,RELOC(_C_LABEL(cpu_perf_ebx))
- movl %edx,RELOC(_C_LABEL(cpu_perf_edx))
+ movl %eax,RELOC(cpu_perf_eax)
+ movl %ebx,RELOC(cpu_perf_ebx)
+ movl %edx,RELOC(cpu_perf_edx)
1:
/* Check if brand identification string is supported */
jbe 2f
movl $0x80000001,%eax
cpuid
- movl %eax,RELOC(_C_LABEL(ecpu_eaxfeature))
- movl %edx,RELOC(_C_LABEL(ecpu_feature))
- movl %ecx,RELOC(_C_LABEL(ecpu_ecxfeature))
+ movl %eax,RELOC(ecpu_eaxfeature)
+ movl %edx,RELOC(ecpu_feature)
+ movl %ecx,RELOC(ecpu_ecxfeature)
movl $0x80000002,%eax
cpuid
- movl %eax,RELOC(_C_LABEL(cpu_brandstr))
- movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+4
- movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+8
- movl %edx,RELOC(_C_LABEL(cpu_brandstr))+12
+ movl %eax,RELOC(cpu_brandstr)
+ movl %ebx,RELOC(cpu_brandstr)+4
+ movl %ecx,RELOC(cpu_brandstr)+8
+ movl %edx,RELOC(cpu_brandstr)+12
movl $0x80000003,%eax
cpuid
- movl %eax,RELOC(_C_LABEL(cpu_brandstr))+16
- movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+20
- movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+24
- movl %edx,RELOC(_C_LABEL(cpu_brandstr))+28
+ movl %eax,RELOC(cpu_brandstr)+16
+ movl %ebx,RELOC(cpu_brandstr)+20
+ movl %ecx,RELOC(cpu_brandstr)+24
+ movl %edx,RELOC(cpu_brandstr)+28
movl $0x80000004,%eax
cpuid
- movl %eax,RELOC(_C_LABEL(cpu_brandstr))+32
- movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+36
- movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+40
+ movl %eax,RELOC(cpu_brandstr)+32
+ movl %ebx,RELOC(cpu_brandstr)+36
+ movl %ecx,RELOC(cpu_brandstr)+40
andl $0x00ffffff,%edx /* Shouldn't be necessary */
- movl %edx,RELOC(_C_LABEL(cpu_brandstr))+44
+ movl %edx,RELOC(cpu_brandstr)+44
movl $0x80000007,%eax
cpuid
- movl %edx,RELOC(_C_LABEL(cpu_apmi_edx))
+ movl %edx,RELOC(cpu_apmi_edx)
2:
/*
#define PROC0STACK ((0) * NBPG)
#define PROC0PDIR (( UPAGES) * NBPG)
#define SYSMAP ((4+UPAGES) * NBPG)
-#define TABLESIZE ((4+UPAGES) * NBPG) /* + _C_LABEL(nkpde) * NBPG */
+#define TABLESIZE ((4+UPAGES) * NBPG) /* + nkpde * NBPG */
/* Find end of kernel image. */
- movl $RELOC(_C_LABEL(end)),%edi
+ movl $RELOC(end),%edi
#if (NKSYMS || defined(DDB))
/* Save the symbols (if loaded). */
- movl RELOC(_C_LABEL(esym)),%eax
+ movl RELOC(esym),%eax
testl %eax,%eax
jz 1f
subl $KERNBASE,%eax
* Calculate the size of the kernel page table directory, and
* how many entries it will have.
*/
- movl RELOC(_C_LABEL(nkpde)),%ecx # get nkpde
+ movl RELOC(nkpde),%ecx # get nkpde
cmpl $NKPTP_MIN,%ecx # larger than min?
jge 1f
movl $NKPTP_MIN,%ecx # set at min
jmp 2f
-1: cmpl RELOC(_C_LABEL(nkptp_max)),%ecx # larger than max?
+1: cmpl RELOC(nkptp_max),%ecx # larger than max?
jle 2f
- movl RELOC(_C_LABEL(nkptp_max)),%ecx
-2: movl %ecx,RELOC(_C_LABEL(nkpde)) # and store it back
+ movl RELOC(nkptp_max),%ecx
+2: movl %ecx,RELOC(nkpde) # and store it back
/* Clear memory for bootstrap tables. */
shll $PGSHIFT,%ecx
* Build initial page tables.
*/
/* Calculate end of text segment, rounded to a page. */
- leal (RELOC(_C_LABEL(etext))+PGOFSET),%edx
+ leal (RELOC(etext)+PGOFSET),%edx
andl $~PGOFSET,%edx
/* Skip over the first 2MB. */
/* Map the data, BSS, and bootstrap tables read-write. */
leal (PG_V|PG_KW)(%edx),%eax
- movl RELOC(_C_LABEL(nkpde)),%ecx
+ movl RELOC(nkpde),%ecx
shll $PGSHIFT,%ecx
addl $TABLESIZE,%ecx
addl %esi,%ecx # end of tables
/*
* Construct a page table directory.
*/
- movl RELOC(_C_LABEL(nkpde)),%ecx # count of pdes,
+ movl RELOC(nkpde),%ecx # count of pdes,
leal (PROC0PDIR+0*4)(%esi),%ebx # where temp maps!
leal (SYSMAP+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for KPT in proc 0
fillkpt
* Map kernel PDEs: this is the real mapping used
* after the temp mapping outlives its usefulness.
*/
- movl RELOC(_C_LABEL(nkpde)),%ecx # count of pde s,
+ movl RELOC(nkpde),%ecx # count of pde s,
leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # map them high
leal (SYSMAP+PG_V|PG_KW|PG_U|PG_M)(%esi),%eax # pte for KPT in proc 0
fillkpt
/* Save phys. addr of PTD, for libkvm. */
leal (PROC0PDIR)(%esi),%eax # phys address of ptd in proc 0
- movl %eax,RELOC(_C_LABEL(PTDpaddr))
+ movl %eax,RELOC(PTDpaddr)
/* Load base of page directory and enable mapping. */
movl %eax,%cr3 # load ptd addr into mmu
begin:
/* Now running relocated at KERNBASE. Remove double mapping. */
- movl _C_LABEL(nkpde),%ecx # for this many pde s,
+ movl nkpde,%ecx # for this many pde s,
leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
addl $(KERNBASE), %ebx # now use relocated address
1: movl $0,(%ebx)
loop 1b
/* Relocate atdevbase. */
- movl _C_LABEL(nkpde),%edx
+ movl nkpde,%edx
shll $PGSHIFT,%edx
addl $(TABLESIZE+KERNBASE),%edx
addl %esi,%edx
- movl %edx,_C_LABEL(atdevbase)
+ movl %edx,atdevbase
/* Set up bootstrap stack. */
leal (PROC0STACK+KERNBASE)(%esi),%eax
- movl %eax,_C_LABEL(proc0paddr)
+ movl %eax,proc0paddr
leal (USPACE-FRAMESIZE)(%eax),%esp
leal (PROC0PDIR)(%esi),%ebx # phys address of ptd in proc 0
movl %ebx,PCB_CR3(%eax) # pcb->pcb_cr3
xorl %ebp,%ebp # mark end of frames
- movl _C_LABEL(nkpde),%eax
+ movl nkpde,%eax
shll $PGSHIFT,%eax
addl $TABLESIZE,%eax
addl %esi,%eax # skip past stack and page tables
pushl %eax
- call _C_LABEL(init386) # wire 386 chip for unix operation
+ call init386 # wire 386 chip for unix operation
addl $4,%esp
- call _C_LABEL(main)
+ call main
/* NOTREACHED */
-/* $OpenBSD: mptramp.s,v 1.26 2022/08/22 08:53:55 jsg Exp $ */
+/* $OpenBSD: mptramp.s,v 1.27 2022/12/08 01:25:44 guenther Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
#define GDTE(a,b) .byte 0xff,0xff,0x0,0x0,0x0,a,b,0x0
#define _RELOC(x) ((x) - KERNBASE)
-#define RELOC(x) _RELOC(_C_LABEL(x))
+#define RELOC(x) _RELOC(x)
-#define _TRMP_LABEL(a) a = . - _C_LABEL(cpu_spinup_trampoline) + MP_TRAMPOLINE
-#define _TRMP_OFFSET(a) a = . - _C_LABEL(cpu_spinup_trampoline)
-#define _TRMP_DATA_LABEL(a) a = . - _C_LABEL(mp_tramp_data_start) + \
- MP_TRAMP_DATA
-#define _TRMP_DATA_OFFSET(a) a = . - _C_LABEL(mp_tramp_data_start)
+#define _TRMP_LABEL(a) a = . - cpu_spinup_trampoline + MP_TRAMPOLINE
+#define _TRMP_OFFSET(a) a = . - cpu_spinup_trampoline
+#define _TRMP_DATA_LABEL(a) a = . - mp_tramp_data_start + MP_TRAMP_DATA
+#define _TRMP_DATA_OFFSET(a) a = . - mp_tramp_data_start
- .globl _C_LABEL(cpu_id),_C_LABEL(cpu_vendor)
- .globl _C_LABEL(cpuid_level),_C_LABEL(cpu_feature)
+ .globl cpu_id,cpu_vendor
+ .globl cpuid_level,cpu_feature
- .global _C_LABEL(cpu_spinup_trampoline)
- .global _C_LABEL(cpu_spinup_trampoline_end)
- .global _C_LABEL(cpu_hatch)
- .global _C_LABEL(mp_pdirpa)
- .global _C_LABEL(mp_tramp_data_start)
- .global _C_LABEL(mp_tramp_data_end)
- .global _C_LABEL(gdt), _C_LABEL(local_apic)
+ .global cpu_spinup_trampoline
+ .global cpu_spinup_trampoline_end
+ .global cpu_hatch
+ .global mp_pdirpa
+ .global mp_tramp_data_start
+ .global mp_tramp_data_end
+ .global gdt, local_apic
.text
.align 4, 0xcc
.code16
-_C_LABEL(cpu_spinup_trampoline):
+cpu_spinup_trampoline:
cli
movw $(MP_TRAMP_DATA >> 4), %ax
movw %ax, %ds
/* Load base of page directory and enable mapping. */
movl %ecx,%cr3 # load ptd addr into mmu
#ifndef SMALL_KERNEL
- testl $0x1, RELOC(_C_LABEL(cpu_pae))
+ testl $0x1, RELOC(cpu_pae)
jz nopae
movl %cr4,%eax
# ok, we're now running with paging enabled and sharing page tables with cpu0.
# figure out which processor we really are, what stack we should be on, etc.
- movl _C_LABEL(local_apic)+LAPIC_ID,%eax
+ movl local_apic+LAPIC_ID,%eax
shrl $LAPIC_ID_SHIFT,%eax
xorl %ebx,%ebx
1:
leal 0(,%ebx,4),%ecx
incl %ebx
- movl _C_LABEL(cpu_info)(%ecx),%ecx
+ movl cpu_info(%ecx),%ecx
movl CPU_INFO_APICID(%ecx),%edx
cmpl %eax,%edx
jne 1b
pushl $mp_cont
lret
-_C_LABEL(cpu_spinup_trampoline_end): #end of code copied to MP_TRAMPOLINE
+cpu_spinup_trampoline_end: #end of code copied to MP_TRAMPOLINE
mp_cont:
movl CPU_INFO_IDLE_PCB(%ecx),%esi
movl PCB_CR0(%esi),%eax
movl %eax,%cr0
pushl %ecx
- call _C_LABEL(cpu_hatch)
+ call cpu_hatch
/* NOTREACHED */
.section .rodata
-_C_LABEL(mp_tramp_data_start):
+mp_tramp_data_start:
_TRMP_DATA_LABEL(.Lgdt_table)
.word 0x0,0x0,0x0,0x0 # null GDTE
GDTE(0x9f,0xcf) # Kernel text
_TRMP_DATA_OFFSET(.Lgdt_desc)
.word 0x17 # limit 3 entries
.long .Lgdt_table # where is gdt
-_C_LABEL(mp_tramp_data_end):
+mp_tramp_data_end:
-/* $OpenBSD: vector.s,v 1.23 2018/06/18 23:15:05 bluhm Exp $ */
+/* $OpenBSD: vector.s,v 1.24 2022/12/08 01:25:44 guenther Exp $ */
/* $NetBSD: vector.s,v 1.32 1996/01/07 21:29:47 mycroft Exp $ */
/*
* segment registers.
*/
- .globl _C_LABEL(isa_strayintr)
+ .globl isa_strayintr
#define voidop(num)
INTRENTRY(intr_##name##num) ;\
mask(num) /* mask it in hardware */ ;\
early_ack(num) /* and allow other intrs */ ;\
- incl _C_LABEL(uvmexp)+V_INTR /* statistical info */ ;\
- movl _C_LABEL(iminlevel) + (num) * 4, %eax ;\
+ incl uvmexp+V_INTR /* statistical info */ ;\
+ movl iminlevel + (num) * 4, %eax ;\
movl CPL,%ebx ;\
cmpl %eax,%ebx ;\
- jae _C_LABEL(Xhold_##name##num)/* currently masked; hold it */;\
+ jae Xhold_##name##num/* currently masked; hold it */;\
pushl %ebx /* cpl to restore on exit */ ;\
1: ;\
- movl _C_LABEL(imaxlevel) + (num) * 4,%eax ;\
+ movl imaxlevel + (num) * 4,%eax ;\
movl %eax,CPL /* block enough for this irq */ ;\
sti /* safe to take intrs now */ ;\
- movl _C_LABEL(intrhand) + (num) * 4,%ebx /* head of chain */ ;\
+ movl intrhand + (num) * 4,%ebx /* head of chain */ ;\
testl %ebx,%ebx ;\
- jz _C_LABEL(Xstray_##name##num) /* no handlers; we're stray */ ;\
+ jz Xstray_##name##num /* no handlers; we're stray */ ;\
STRAY_INITIALIZE /* nobody claimed it yet */ ;\
incl CPUVAR(IDEPTH) ;\
7: movl %esp, %eax /* save frame pointer in eax */ ;\
pushl %ebx /* arg 2: ih structure */ ;\
pushl %eax /* arg 1: frame pointer */ ;\
- call _C_LABEL(intr_handler) /* call it */ ;\
+ call intr_handler /* call it */ ;\
addl $8, %esp /* toss args */ ;\
STRAY_INTEGRATE /* maybe he claimed it */ ;\
orl %eax,%eax /* should it be counted? */ ;\
jz 5f /* no, skip it */ ;\
addl $1,IH_COUNT(%ebx) /* count the intrs */ ;\
adcl $0,IH_COUNT+4(%ebx) ;\
- cmpl $0,_C_LABEL(intr_shared_edge) ;\
+ cmpl $0,intr_shared_edge ;\
jne 5f /* if no shared edges ... */ ;\
orl %eax,%eax /* ... 1 means stop trying */ ;\
jns 8f ;\
STRAY_TEST(name,num) /* see if it's a stray */ ;\
6: unmask(num) /* unmask it in hardware */ ;\
late_ack(num) ;\
- jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
+ jmp Xdoreti /* lower spl and do ASTs */ ;\
KIDTVEC(stray_##name##num) ;\
pushl $num ;\
- call _C_LABEL(isa_strayintr) ;\
+ call isa_strayintr ;\
addl $4,%esp ;\
jmp 6b ;\
KIDTVEC(hold_##name##num) ;\
orl %eax,%esi
#define STRAY_TEST(name,num) \
testl %esi,%esi ;\
- jz _C_LABEL(Xstray_##name##num)
+ jz Xstray_##name##num
#else /* !DEBUG */
#define STRAY_INITIALIZE
#define STRAY_INTEGRATE
*/
/* interrupt service routine entry points */
IDTVEC(intr)
- .long _C_LABEL(Xintr_legacy0), _C_LABEL(Xintr_legacy1)
- .long _C_LABEL(Xintr_legacy2), _C_LABEL(Xintr_legacy3)
- .long _C_LABEL(Xintr_legacy4), _C_LABEL(Xintr_legacy5)
- .long _C_LABEL(Xintr_legacy6), _C_LABEL(Xintr_legacy7)
- .long _C_LABEL(Xintr_legacy8), _C_LABEL(Xintr_legacy9)
- .long _C_LABEL(Xintr_legacy10), _C_LABEL(Xintr_legacy11)
- .long _C_LABEL(Xintr_legacy12), _C_LABEL(Xintr_legacy13)
- .long _C_LABEL(Xintr_legacy14), _C_LABEL(Xintr_legacy15)
+ .long Xintr_legacy0, Xintr_legacy1
+ .long Xintr_legacy2, Xintr_legacy3
+ .long Xintr_legacy4, Xintr_legacy5
+ .long Xintr_legacy6, Xintr_legacy7
+ .long Xintr_legacy8, Xintr_legacy9
+ .long Xintr_legacy10, Xintr_legacy11
+ .long Xintr_legacy12, Xintr_legacy13
+ .long Xintr_legacy14, Xintr_legacy15
/*
* These tables are used by Xdoreti() and Xspllower().
*/
/* resume points for suspended interrupts */
IDTVEC(resume)
- .long _C_LABEL(Xresume_legacy0), _C_LABEL(Xresume_legacy1)
- .long _C_LABEL(Xresume_legacy2), _C_LABEL(Xresume_legacy3)
- .long _C_LABEL(Xresume_legacy4), _C_LABEL(Xresume_legacy5)
- .long _C_LABEL(Xresume_legacy6), _C_LABEL(Xresume_legacy7)
- .long _C_LABEL(Xresume_legacy8), _C_LABEL(Xresume_legacy9)
- .long _C_LABEL(Xresume_legacy10), _C_LABEL(Xresume_legacy11)
- .long _C_LABEL(Xresume_legacy12), _C_LABEL(Xresume_legacy13)
- .long _C_LABEL(Xresume_legacy14), _C_LABEL(Xresume_legacy15)
+ .long Xresume_legacy0, Xresume_legacy1
+ .long Xresume_legacy2, Xresume_legacy3
+ .long Xresume_legacy4, Xresume_legacy5
+ .long Xresume_legacy6, Xresume_legacy7
+ .long Xresume_legacy8, Xresume_legacy9
+ .long Xresume_legacy10, Xresume_legacy11
+ .long Xresume_legacy12, Xresume_legacy13
+ .long Xresume_legacy14, Xresume_legacy15
/* for soft interrupts */
.long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- .long _C_LABEL(Xsofttty), _C_LABEL(Xsoftnet), _C_LABEL(Xsoftclock)
+ .long Xsofttty, Xsoftnet, Xsoftclock
.long 0, 0
/* fake interrupts to resume from splx() */
IDTVEC(recurse)
- .long _C_LABEL(Xrecurse_legacy0), _C_LABEL(Xrecurse_legacy1)
- .long _C_LABEL(Xrecurse_legacy2), _C_LABEL(Xrecurse_legacy3)
- .long _C_LABEL(Xrecurse_legacy4), _C_LABEL(Xrecurse_legacy5)
- .long _C_LABEL(Xrecurse_legacy6), _C_LABEL(Xrecurse_legacy7)
- .long _C_LABEL(Xrecurse_legacy8), _C_LABEL(Xrecurse_legacy9)
- .long _C_LABEL(Xrecurse_legacy10), _C_LABEL(Xrecurse_legacy11)
- .long _C_LABEL(Xrecurse_legacy12), _C_LABEL(Xrecurse_legacy13)
- .long _C_LABEL(Xrecurse_legacy14), _C_LABEL(Xrecurse_legacy15)
+ .long Xrecurse_legacy0, Xrecurse_legacy1
+ .long Xrecurse_legacy2, Xrecurse_legacy3
+ .long Xrecurse_legacy4, Xrecurse_legacy5
+ .long Xrecurse_legacy6, Xrecurse_legacy7
+ .long Xrecurse_legacy8, Xrecurse_legacy9
+ .long Xrecurse_legacy10, Xrecurse_legacy11
+ .long Xrecurse_legacy12, Xrecurse_legacy13
+ .long Xrecurse_legacy14, Xrecurse_legacy15
/* for soft interrupts */
.long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- .long _C_LABEL(Xsofttty), _C_LABEL(Xsoftnet), _C_LABEL(Xsoftclock)
+ .long Xsofttty, Xsoftnet, Xsoftclock
.long 0, 0
-/* $OpenBSD: asm.h,v 1.18 2022/08/30 16:26:29 miod Exp $ */
+/* $OpenBSD: asm.h,v 1.19 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: asm.h,v 1.7 1994/10/27 04:15:56 cgd Exp $ */
/*-
call 666f; \
666: \
popl %ebx; \
- addl $_C_LABEL(_GLOBAL_OFFSET_TABLE_)+[.-666b], %ebx
+ addl $_GLOBAL_OFFSET_TABLE_+[.-666b], %ebx
#define PIC_EPILOGUE \
popl %ebx
#define PIC_PLT(x) x@PLT
#define _C_LABEL(name) name
#define _ASM_LABEL(x) x
-#define CVAROFF(x, y) _C_LABEL(x) + y
+#define CVAROFF(x, y) x + y
#ifdef __STDC__
# define __CONCAT(x,y) x ## y
# define _PROF_PROLOGUE
#endif
-#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
+#define ENTRY(y) _ENTRY(y); _PROF_PROLOGUE
#define ENTRY_NB(y) _ENTRY_NB(y); _PROF_PROLOGUE
-#define NENTRY(y) _ENTRY(_C_LABEL(y))
-#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
-#define NASENTRY(y) _ENTRY(_ASM_LABEL(y))
+#define NENTRY(y) _ENTRY(y)
+#define ASENTRY(y) _ENTRY(y); _PROF_PROLOGUE
+#define NASENTRY(y) _ENTRY(y)
#define END(y) .size y, . - y
-#define ALTENTRY(name) .globl _C_LABEL(name); _C_LABEL(name):
+#define ALTENTRY(name) .globl name; name:
#ifdef _KERNEL
-/* $OpenBSD: i82093reg.h,v 1.5 2011/06/05 19:36:25 deraadt Exp $ */
+/* $OpenBSD: i82093reg.h,v 1.6 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: i82093reg.h,v 1.1.2.2 2000/02/21 18:54:07 sommerfeld Exp $ */
/*-
#ifdef _KERNEL
#define ioapic_asm_ack(num) \
- movl $0,_C_LABEL(local_apic) + LAPIC_EOI
+ movl $0,local_apic + LAPIC_EOI
#endif
-/* $OpenBSD: icu.s,v 1.35 2018/07/09 19:20:30 guenther Exp $ */
+/* $OpenBSD: icu.s,v 1.36 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: icu.s,v 1.45 1996/01/07 03:59:34 mycroft Exp $ */
/*-
*/
.data
- .globl _C_LABEL(imen)
-_C_LABEL(imen):
+ .globl imen
+imen:
.long 0xffff # interrupt mask enable (all off)
.text
movl $1f,%esi # address to resume loop at
1: movl %ebx,%eax # get cpl
shrl $4,%eax # find its mask.
- movl _C_LABEL(iunmask)(,%eax,4),%eax
+ movl iunmask(,%eax,4),%eax
cli
andl CPUVAR(IPENDING),%eax # any non-masked bits left?
jz 2f
bsfl %eax,%eax
btrl %eax,CPUVAR(IPENDING)
jnc 1b
- jmp *_C_LABEL(Xrecurse)(,%eax,4)
+ jmp *Xrecurse(,%eax,4)
2: movl %ebx,CPL
sti
popl %edi
movl $1f,%esi # address to resume loop at
1: movl %ebx,%eax
shrl $4,%eax
- movl _C_LABEL(iunmask)(,%eax,4),%eax
+ movl iunmask(,%eax,4),%eax
cli
andl CPUVAR(IPENDING),%eax
jz 2f
btrl %eax,CPUVAR(IPENDING)
jnc 1b # some intr cleared the in-memory bit
cli
- jmp *_C_LABEL(Xresume)(,%eax,4)
+ jmp *Xresume(,%eax,4)
2: /* Check for ASTs on exit to user mode. */
CHECK_ASTPENDING(%ecx)
movl %ebx,CPL
4: CLEAR_ASTPENDING(%ecx)
sti
pushl %esp
- call _C_LABEL(ast)
+ call ast
addl $4,%esp
cli
jmp 2b
movl %eax,CPL
sti
pushl $I386_SOFTINTR_SOFTTTY
- call _C_LABEL(softintr_dispatch)
+ call softintr_dispatch
addl $4,%esp
jmp *%esi
movl %eax,CPL
sti
pushl $I386_SOFTINTR_SOFTNET
- call _C_LABEL(softintr_dispatch)
+ call softintr_dispatch
addl $4,%esp
jmp *%esi
#undef DONETISR
movl %eax,CPL
sti
pushl $I386_SOFTINTR_SOFTCLOCK
- call _C_LABEL(softintr_dispatch)
+ call softintr_dispatch
addl $4,%esp
jmp *%esi
-/* $OpenBSD: srt0.S,v 1.16 2012/10/12 15:00:33 jsing Exp $ */
+/* $OpenBSD: srt0.S,v 1.17 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
#define BOOTSTACK 0xfffc
- .globl _C_LABEL(end)
- .globl _C_LABEL(edata)
- .globl _C_LABEL(boot)
- .globl _C_LABEL(_rtt)
- .globl _C_LABEL(bios_bootdev)
- .globl _ASM_LABEL(pmm_init)
+ .globl end
+ .globl edata
+ .globl boot
+ .globl _rtt
+ .globl bios_bootdev
+ .globl pmm_init
.globl Gdtr
.text
mov %ax,%gs
movl $BOOTSTACK,%esp
pushl %edx
- movl %edx, _C_LABEL(bios_bootdev)
+ movl %edx, bios_bootdev
/* Now do it all */
#ifdef DEBUG
#endif
/* zero .bss */
xorl %eax, %eax
- movl $_C_LABEL(end), %ecx
- subl $_C_LABEL(edata),%ecx
- movl $_C_LABEL(edata), %edi
+ movl $end, %ecx
+ subl $edata,%ecx
+ movl $edata, %edi
cld
rep; stosb
- call _ASM_LABEL(pmm_init)
- call _C_LABEL(boot)
+ call pmm_init
+ call boot
- jmp _C_LABEL(_rtt)
+ jmp _rtt
-/* $OpenBSD: srt0.S,v 1.3 2012/10/31 14:31:30 jsing Exp $ */
+/* $OpenBSD: srt0.S,v 1.4 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
#define BOOTSTACK 0xfffc
- .globl _C_LABEL(end)
- .globl _C_LABEL(edata)
- .globl _C_LABEL(boot)
- .globl _C_LABEL(_rtt)
- .globl _C_LABEL(bios_bootdev)
- .globl _ASM_LABEL(pmm_init)
+ .globl end
+ .globl edata
+ .globl boot
+ .globl _rtt
+ .globl bios_bootdev
+ .globl pmm_init
.globl Gdtr
.text
movw $(CDBOOTADDR >> 4), %ax /* Reloc from %ds = 0x7c0. */
movw $(LINKADDR >> 4), %bx /* Reloc to %es = 0x4012. */
- movl $_C_LABEL(end), %edx
- subl $_C_LABEL(_start), %edx /* How big are we? */
+ movl $end, %edx
+ subl $_start, %edx /* How big are we? */
/*
* Relocate in blocks that are a maximum of 32KB in size, incrementing
/* Zero .bss */
xorl %eax, %eax
- movl $_C_LABEL(end), %ecx
- subl $_C_LABEL(edata), %ecx
- movl $_C_LABEL(edata), %edi
+ movl $end, %ecx
+ subl $edata, %ecx
+ movl $edata, %edi
cld
rep; stosb
/* Set up an interrupt descriptor table for protected mode. */
- call _ASM_LABEL(pmm_init)
+ call pmm_init
/* Set our program name ("CDBOOT", not "BOOT"). */
movl $cd_progname, %eax
/* Put the boot device number into the globals that need it */
popl %eax /* Get this back from the stack */
pushl %eax /* boot() takes this as a parameter */
- movl %eax, _C_LABEL(bios_bootdev)
- movl %eax, _C_LABEL(bios_cddev)
+ movl %eax, bios_bootdev
+ movl %eax, bios_cddev
/*
* Now call "main()".
movl $0xb8004, %ebx
movl $0x07410741, (%ebx)
#endif
- call _C_LABEL(boot)
+ call boot
/* boot() should not return. If it does, reset computer. */
- jmp _C_LABEL(_rtt)
+ jmp _rtt
ENTRY(debugchar)
pushl %ebx
-/* $OpenBSD: debug_i386.S,v 1.12 2004/03/09 19:12:12 tom Exp $ */
+/* $OpenBSD: debug_i386.S,v 1.13 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
movb $0x17, %ah
movl %eax, (%edi)
#endif
- call _C_LABEL(check_regs)
+ call check_regs
#ifdef DEBUG_DEBUG
movl $0xb8290, %edi
movl $0x47394738, (%edi)
movl 0x0d*4(%esp), %eax /* trapno */
pushl %ecx
pushl %eax
- call _C_LABEL(dump_regs)
+ call dump_regs
popl %eax
popl %eax
-/* $OpenBSD: debug_md.h,v 1.7 2006/06/06 13:30:42 mickey Exp $ */
+/* $OpenBSD: debug_md.h,v 1.8 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
"reserved fault base"
#ifdef _LOCORE
- .globl _C_LABEL(reg)
+ .globl reg
#define DUMP_REGS int $2
#else
#define DUMP_REGS __asm("int $2")
-/* $OpenBSD: gidt.S,v 1.37 2019/11/09 17:58:48 deraadt Exp $ */
+/* $OpenBSD: gidt.S,v 1.38 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
lidt Idtr;
- .globl _C_LABEL(BIOS_regs)
+ .globl BIOS_regs
.text
.code32
- .globl _ASM_LABEL(pmm_init)
- .globl _C_LABEL(_rtt)
+ .globl pmm_init
+ .globl _rtt
ENTRY(_rtt)
#ifdef SOFTRAID
- call _C_LABEL(sr_clear_keys)
+ call sr_clear_keys
#endif
#ifdef GIDT_DEBUG
movl $0xb8000, %ebx
mov %al, intno
/* Load BIOS registers prior to switching to real mode. */
- movl _C_LABEL(BIOS_regs)+BIOSR_ES, %eax
+ movl BIOS_regs+BIOSR_ES, %eax
mov %eax, 7f
- movl _C_LABEL(BIOS_regs)+BIOSR_DS, %eax
+ movl BIOS_regs+BIOSR_DS, %eax
mov %eax, 6f
prot2real
# movl $Leax, %eax
.byte 0xb8
4: .long 0x90909090
- movl %eax, _C_LABEL(BIOS_regs)+BIOSR_BX
+ movl %eax, BIOS_regs+BIOSR_BX
# movl $Leax, %eax
.byte 0xb8
3: .long 0x90909090
- movl %eax, _C_LABEL(BIOS_regs)+BIOSR_ES
+ movl %eax, BIOS_regs+BIOSR_ES
# movl $Leax, %eax
.byte 0xb8
movb %bh , 0xe*4(%esp)
/* save registers into save area */
- movl %eax, _C_LABEL(BIOS_regs)+BIOSR_AX
- movl %ecx, _C_LABEL(BIOS_regs)+BIOSR_CX
- movl %edx, _C_LABEL(BIOS_regs)+BIOSR_DX
- movl %ebp, _C_LABEL(BIOS_regs)+BIOSR_BP
- movl %esi, _C_LABEL(BIOS_regs)+BIOSR_SI
- movl %edi, _C_LABEL(BIOS_regs)+BIOSR_DI
+ movl %eax, BIOS_regs+BIOSR_AX
+ movl %ecx, BIOS_regs+BIOSR_CX
+ movl %edx, BIOS_regs+BIOSR_DX
+ movl %ebp, BIOS_regs+BIOSR_BP
+ movl %esi, BIOS_regs+BIOSR_SI
+ movl %edi, BIOS_regs+BIOSR_DI
/* clear NT flag in eflags */
pushf
-/* $OpenBSD: pslid.S,v 1.2 2020/06/14 17:05:45 deraadt Exp $ */
+/* $OpenBSD: pslid.S,v 1.3 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
#include <machine/asm.h>
#include <machine/psl.h>
- .globl _C_LABEL(pslid)
+ .globl pslid
ENTRY(pslid)
// See if we have CPU identification.
pushfl
-/* $OpenBSD: pxe_call.S,v 1.4 2006/01/02 00:26:29 tom Exp $ */
+/* $OpenBSD: pxe_call.S,v 1.5 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: pxe_call.S,v 1.2 2002/03/27 17:24:22 kanaoka Exp $ */
/*
/* For simplicity, just move all 32 bits. */
movl 8(%ebp), %ebx
- pushw _C_LABEL(pxe_command_buf_seg)
- pushw _C_LABEL(pxe_command_buf_off)
+ pushw pxe_command_buf_seg
+ pushw pxe_command_buf_off
pushw %bx
call prot_to_real /* Enter real mode */
sti
/* The encoding is: 0x9a offlo offhi seglo seghi */
lcall $0, $0xffff
- .globl _C_LABEL(bangpxe_off)
-_C_LABEL(bangpxe_off) = . - 4
- .globl _C_LABEL(bangpxe_seg)
-_C_LABEL(bangpxe_seg) = . - 2
+ .globl bangpxe_off
+bangpxe_off = . - 4
+ .globl bangpxe_seg
+bangpxe_seg = . - 2
cli
call real_to_prot /* Leave real mode */
* prot_to_real() will set %es to BOOTSEG, so we just need to set
* %(e)di up here. Remember to relocate it!
*/
- movl $_C_LABEL(pxe_command_buf), %edi
+ movl $pxe_command_buf, %edi
subl $LINKADDR, %edi
call prot_to_real /* Enter real mode */
/* The encoding is: 0x9a offlo offhi seglo seghi */
lcall $0, $0xffff
- .globl _C_LABEL(pxenv_off)
-_C_LABEL(pxenv_off) = . - 4
- .globl _C_LABEL(pxenv_seg)
-_C_LABEL(pxenv_seg) = . - 2
+ .globl pxenv_off
+pxenv_off = . - 4
+ .globl pxenv_seg
+pxenv_seg = . - 2
call real_to_prot /* Leave real mode */
.code32
-/* $OpenBSD: srt0.S,v 1.3 2012/10/31 14:31:30 jsing Exp $ */
+/* $OpenBSD: srt0.S,v 1.4 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 1997 Michael Shalayeff
#define BOOTSTACK 0xfffc
- .globl _C_LABEL(end)
- .globl _C_LABEL(edata)
- .globl _C_LABEL(boot)
- .globl _C_LABEL(_rtt)
- .globl _C_LABEL(bios_bootdev)
- .globl _ASM_LABEL(pmm_init)
+ .globl end
+ .globl edata
+ .globl boot
+ .globl _rtt
+ .globl bios_bootdev
+ .globl pmm_init
.globl Gdtr
.text
movw $(PXEBOOTADDR >> 4), %ax /* Reloc from %ds = 0x7c0. */
movw $(LINKADDR >> 4), %bx /* Reloc to %es = 0x4012. */
- movl $_C_LABEL(end), %edx
- subl $_C_LABEL(_start), %edx /* How big are we? */
+ movl $end, %edx
+ subl $_start, %edx /* How big are we? */
/*
* Relocate in blocks that are a maximum of 32KB in size, incrementing
#endif
xorl %edx, %edx
- movl %edx, _C_LABEL(bios_bootdev)
+ movl %edx, bios_bootdev
pushl %edx /* boot() takes this as a parameter */
#ifdef DEBUG
/* Zero .bss */
xorl %eax, %eax
- movl $_C_LABEL(end), %ecx
- subl $_C_LABEL(edata), %ecx
- movl $_C_LABEL(edata), %edi
+ movl $end, %ecx
+ subl $edata, %ecx
+ movl $edata, %edi
cld
rep; stosb
/* Set up an interrupt descriptor table for protected mode. */
- call _ASM_LABEL(pmm_init)
+ call pmm_init
/* Set our program name ("PXEBOOT", not "BOOT"). */
movl $pxe_progname, %eax
movl $0xb8004, %ebx
movl $0x07410741, (%ebx)
#endif
- call _C_LABEL(boot)
+ call boot
/* boot() should not return. If it does, reset computer. */
- jmp _C_LABEL(_rtt)
+ jmp _rtt
ENTRY(debugchar)
pushl %ebx
-/* $OpenBSD: locore.S,v 1.60 2022/10/25 15:15:38 guenther Exp $ */
+/* $OpenBSD: locore.S,v 1.61 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: locore.S,v 1.2 1996/10/16 19:33:09 ws Exp $ */
/*
.text
#ifdef MULTIPROCESSOR
-_ENTRY(_C_LABEL(cpu_spinup_trampoline))
- lis %r3,_C_LABEL(cpu_hatch_stack)@ha
- lwz %r1,_C_LABEL(cpu_hatch_stack)@l(%r3)
+_ENTRY(cpu_spinup_trampoline)
+ lis %r3,cpu_hatch_stack@ha
+ lwz %r1,cpu_hatch_stack@l(%r3)
- b _C_LABEL(cpu_hatch)
+ b cpu_hatch
/* NOTREACHED */
#endif
* void cpu_switchto(struct proc *old, struct proc *new)
* Switch from "old" proc to "new".
*/
-_ENTRY(_C_LABEL(cpu_switchto_asm))
+_ENTRY(cpu_switchto_asm)
mflr %r0 /* save lr */
stw %r0,4(%r1)
stwu %r1,(-SFRAMELEN - 16)(%r1)
RETGUARD_CHECK(cpu_switchto_asm, %r9, %r0)
blr
-_ENTRY(_C_LABEL(cpu_idle_enter))
+_ENTRY(cpu_idle_enter)
RETGUARD_SETUP(cpu_idle_enter, %r11, %r12)
- lis %r4, _C_LABEL(ppc_cpuidle)@ha
- lwz %r4, _C_LABEL(ppc_cpuidle)@l(%r4)
+ lis %r4, ppc_cpuidle@ha
+ lwz %r4, ppc_cpuidle@l(%r4)
cmpwi %r4, 0
beq 1f
/* must disable external interrupts during idle queue checking */
RETGUARD_CHECK(cpu_idle_enter, %r11, %r12)
blr
-_ENTRY(_C_LABEL(cpu_idle_cycle))
+_ENTRY(cpu_idle_cycle)
RETGUARD_SETUP(cpu_idle_cycle, %r11, %r12)
- lis %r4, _C_LABEL(ppc_cpuidle)@ha
- lwz %r4, _C_LABEL(ppc_cpuidle)@l(%r4)
+ lis %r4, ppc_cpuidle@ha
+ lwz %r4, ppc_cpuidle@l(%r4)
cmpwi %r4, 0
beq idledone
RETGUARD_CHECK(cpu_idle_cycle, %r11, %r12)
blr
-_ENTRY(_C_LABEL(cpu_idle_leave))
+_ENTRY(cpu_idle_leave)
RETGUARD_SETUP(cpu_idle_leave, %r11, %r12)
- lis %r4, _C_LABEL(ppc_cpuidle)@ha
- lwz %r4, _C_LABEL(ppc_cpuidle)@l(%r4)
+ lis %r4, ppc_cpuidle@ha
+ lwz %r4, ppc_cpuidle@l(%r4)
cmpwi %r4, 0
beq 1f
/* enable interrupts disabled in cpu_idle_enter. */
* except ISI/DSI, ALI, and the interrupts
*/
.text
- .globl _C_LABEL(trapcode),_C_LABEL(trapsize)
- .type _C_LABEL(trapcode),@function
- .type _C_LABEL(trapsize),@object
-_C_LABEL(trapcode):
+ .globl trapcode,trapsize
+ .type trapcode,@function
+ .type trapsize,@object
+trapcode:
mtsprg 1,%r1 /* save SP */
nop32_1s:
mfmsr %r1
addi %r1,%r1,USPACE /* stack is top of user struct */
1:
bla s_trap
-_C_LABEL(trapsize) = .-_C_LABEL(trapcode)
+trapsize = .-trapcode
/*
* For ALI: has to save DSISR and DAR
*/
- .globl _C_LABEL(alitrap),_C_LABEL(alisize)
-_C_LABEL(alitrap):
+ .globl alitrap,alisize
+alitrap:
mtsprg 1,%r1 /* save SP */
nop32_2s:
mfmsr %r1
addi %r1,%r1,USPACE /* stack is top of user struct */
1:
bla s_trap
-_C_LABEL(alisize) = .-_C_LABEL(alitrap)
+alisize = .-alitrap
/*
* Similar to the above for DSI
* Has to handle BAT spills
* and standard pagetable spills
*/
- .globl _C_LABEL(dsitrap),_C_LABEL(dsisize)
- .type _C_LABEL(dsitrap),@function
- .type _C_LABEL(dsisize),@object
-_C_LABEL(dsitrap):
+ .globl dsitrap,dsisize
+ .type dsitrap,@function
+ .type dsisize,@object
+dsitrap:
mtsprg 1,%r1
GET_CPUINFO(%r1)
stmw %r28,CI_DISISAVE(%r1) /* free r28-r31 */
bc 12,17,1f /* branch if PSL_PR is set */
mfdar %r31 /* get fault address */
rlwinm %r31,%r31,7,25,28 /* get segment * 8 */
- addis %r31,%r31,_C_LABEL(battable)@ha
- lwz %r30,_C_LABEL(battable)@l(%r31) /* get batu */
+ addis %r31,%r31,battable@ha
+ lwz %r30,battable@l(%r31) /* get batu */
mtcr %r30
bc 4,30,1f /* branch if supervisor valid is false */
- lwz %r31,_C_LABEL(battable)+4@l(%r31) /* get batl */
+ lwz %r31,battable+4@l(%r31) /* get batl */
/* We randomly use the highest two bat registers here */
mftb %r28
andi. %r28,%r28,1
nopbat_1e:
mflr %r28 /* save LR */
bla s_dsitrap
-_C_LABEL(dsisize) = .-_C_LABEL(dsitrap)
+dsisize = .-dsitrap
/*
* Similar to the above for ISI
*/
- .globl _C_LABEL(isitrap),_C_LABEL(isisize)
- .type _C_LABEL(isitrap),@function
- .type _C_LABEL(isisize),@object
-_C_LABEL(isitrap):
+ .globl isitrap,isisize
+ .type isitrap,@function
+ .type isisize,@object
+isitrap:
mtsprg 1,%r1 /* save SP */
nop32_4s:
mfmsr %r1
mfsrr1 %r31 /* test kernel mode */
mfsprg %r1,1 /* restore SP */
bla s_isitrap
-_C_LABEL(isisize) = .-_C_LABEL(isitrap)
+isisize = .-isitrap
/*
* This one for the external interrupt handler.
*/
- .globl _C_LABEL(extint),_C_LABEL(extsize)
- .type _C_LABEL(extint),@function
- .type _C_LABEL(extsize),@object
-_C_LABEL(extint):
+ .globl extint,extsize
+ .type extint,@function
+ .type extsize,@object
+extint:
mtsprg 1,%r1 /* save SP */
nop32_5s:
mfmsr %r1
mfsprg %r1,1 /* yes, get old SP */
1:
ba extintr
-_C_LABEL(extsize) = .-_C_LABEL(extint)
+extsize = .-extint
/*
* And this one for the decrementer interrupt handler.
*/
- .globl _C_LABEL(decrint),_C_LABEL(decrsize)
- .type _C_LABEL(decrint),@function
- .type _C_LABEL(decrsize),@object
-_C_LABEL(decrint):
+ .globl decrint,decrsize
+ .type decrint,@function
+ .type decrsize,@object
+decrint:
mtsprg 1,%r1 /* save SP */
nop32_6s:
mfmsr %r1
mfsprg %r1,1 /* yes, get old SP */
1:
ba decrintr
-_C_LABEL(decrsize) = .-_C_LABEL(decrint)
+decrsize = .-decrint
/*
* Now the tlb software load for 603 processors:
#define tlbli .long 0x7c0007e4+0x800*
#define tlbld .long 0x7c0007a4+0x800*
- .globl _C_LABEL(tlbimiss),_C_LABEL(tlbimsize)
- .type _C_LABEL(tlbimiss),@function
- .type _C_LABEL(tlbimsize),@object
-_C_LABEL(tlbimiss):
+ .globl tlbimiss,tlbimsize
+ .type tlbimiss,@function
+ .type tlbimsize,@object
+tlbimiss:
mfspr %r2,HASH1 /* get first pointer */
li %r1,8
mfctr %r0 /* save counter */
mtmsr %r0 /* now with native gprs */
isync
ba EXC_ISI
-_C_LABEL(tlbimsize) = .-_C_LABEL(tlbimiss)
+tlbimsize = .-tlbimiss
- .globl _C_LABEL(tlbdlmiss),_C_LABEL(tlbdlmsize)
- .type _C_LABEL(tlbdlmiss),@function
- .type _C_LABEL(tlbdlmsize),@object
-_C_LABEL(tlbdlmiss):
+ .globl tlbdlmiss,tlbdlmsize
+ .type tlbdlmiss,@function
+ .type tlbdlmsize,@object
+tlbdlmiss:
mfspr %r2,HASH1 /* get first pointer */
li %r1,8
mfctr %r0 /* save counter */
mtmsr %r0 /* now with native gprs */
isync
ba EXC_DSI
-_C_LABEL(tlbdlmsize) = .-_C_LABEL(tlbdlmiss)
+tlbdlmsize = .-tlbdlmiss
- .globl _C_LABEL(tlbdsmiss),_C_LABEL(tlbdsmsize)
- .type _C_LABEL(tlbdsmiss),@function
- .type _C_LABEL(tlbdsmsize),@object
-_C_LABEL(tlbdsmiss):
+ .globl tlbdsmiss,tlbdsmsize
+ .type tlbdsmiss,@function
+ .type tlbdsmsize,@object
+tlbdsmiss:
mfspr %r2,HASH1 /* get first pointer */
li %r1,8
mfctr %r0 /* save counter */
mtmsr %r0 /* now with native gprs */
isync
ba EXC_DSI
-_C_LABEL(tlbdsmsize) = .-_C_LABEL(tlbdsmiss)
+tlbdsmsize = .-tlbdsmiss
#ifdef DDB
/*
* In case of DDB we want a separate trap catcher for it
*/
- .globl _C_LABEL(ddblow),_C_LABEL(ddbsize)
-_C_LABEL(ddblow):
+ .globl ddblow,ddbsize
+ddblow:
mtsprg 1,%r1 /* save SP */
nop32_7s:
mfmsr %r1
lwz %r30,CI_INTSTK(%r30) /* get interrupt stack */
addi %r1,%r30,(SPILLSTK+DDBSTK)
bla ddbtrap
-_C_LABEL(ddbsize) = .-_C_LABEL(ddblow)
+ddbsize = .-ddblow
#endif /* DDB */
/*
beq 1f; \
andi. sr2,sr2,~flag@l; \
stw sr2,CI_FLAGS(sr1); \
- lis rSRR0,_C_LABEL(idledone)@ha; \
- addi rSRR0,rSRR0,_C_LABEL(idledone)@l; \
+ lis rSRR0,idledone@ha; \
+ addi rSRR0,rSRR0,idledone@l; \
1:
/*
mfsrr1 %r30; \
stw %r30,savearea+28(%r31); \
/* load all kernel segment registers. */ \
- lis %r31,_C_LABEL(kernel_pmap_)@ha; \
- addi %r31,%r31,_C_LABEL(kernel_pmap_)@l; \
+ lis %r31,kernel_pmap_@ha; \
+ addi %r31,%r31,kernel_pmap_@l; \
lwz %r30,0(%r31); mtsr 0,%r30; \
lwz %r30,4(%r31); mtsr 1,%r30; \
lwz %r30,8(%r31); mtsr 2,%r30; \
/* Call C trap code: */
trapagain:
addi %r3,%r1,8
- bl _C_LABEL(trap)
+ bl trap
.globl trapexit
trapexit:
* Child comes here at the end of a fork.
* Mostly similar to the above.
*/
- .globl _C_LABEL(proc_trampoline)
- .type _C_LABEL(proc_trampoline),@function
-_C_LABEL(proc_trampoline):
+ .globl proc_trampoline
+ .type proc_trampoline,@function
+proc_trampoline:
#ifdef MULTIPROCESSOR
- bl _C_LABEL(proc_trampoline_mp)
+ bl proc_trampoline_mp
#endif
li %r3,0
- bl _C_LABEL(lcsplx)
+ bl lcsplx
mtlr %r31
mr %r3,%r30
blrl /* jump indirect to r31 */
li %r3,0
bne 1f
mr %r3,%r7
- bl _C_LABEL(pte_spill_r) /* try a spill */
+ bl pte_spill_r /* try a spill */
1:
cmpwi 0,%r3,0
mtctr %r31 /* restore CTR */
stw %r4,12(%r1); \
stw %r3,8(%r1); \
/* load all kernel segment registers. */ \
- lis 3,_C_LABEL(kernel_pmap_)@ha; \
- addi 3,3,_C_LABEL(kernel_pmap_)@l; \
+ lis 3,kernel_pmap_@ha; \
+ addi 3,3,kernel_pmap_@l; \
lwz %r5,0(%r3); mtsr 0,%r5; \
lwz %r5,4(%r3); mtsr 1,%r5; \
lwz %r5,8(%r3); mtsr 2,%r5; \
mtmsr %r5; \
isync
- .globl _C_LABEL(extint_call)
- .type _C_LABEL(extint_call),@function
+ .globl extint_call
+ .type extint_call,@function
extintr:
INTRENTER
-_C_LABEL(extint_call):
- bl _C_LABEL(extint_call) /* to be filled in later */
+extint_call:
+ bl extint_call /* to be filled in later */
intr_exit:
/* Disable interrupts (should already be disabled) and MMU here: */
mfmsr %r3
decrintr:
INTRENTER
addi %r3,%r1,8 /* intr frame */
- bl _C_LABEL(decr_intr)
+ bl decr_intr
b intr_exit
* or the (currently used) C code optimized, so it doesn't use any non-volatile
* registers.
*/
- .globl _C_LABEL(setfault)
- .type _C_LABEL(setfault),@function
-_C_LABEL(setfault):
+ .globl setfault
+ .type setfault,@function
+setfault:
mflr %r0
RETGUARD_SETUP_LATE(setfault, %r11, %r0)
mfcr %r12
* All other registers are unchanged.
*/
.section .rodata
- .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
- .type _C_LABEL(sigcode),@function
- .type _C_LABEL(esigcode),@function
-_C_LABEL(sigcode):
+ .globl sigcode,esigcode
+ .type sigcode,@function
+ .type esigcode,@function
+sigcode:
addi %r1,%r1,-((16+FPSIG_SIZEOF+15)& ~0xf) /* reserved space for callee */
addi %r6,%r1,8
stfd %f0,0(%r6)
addi %r3,%r1,((16+FPSIG_SIZEOF+15)&~0xf)+SF_SC /* compute &sf_sc */
li %r0,SYS_sigreturn
sc /* sigreturn(scp) */
- .globl _C_LABEL(sigcoderet)
-_C_LABEL(sigcoderet):
+ .globl sigcoderet
+sigcoderet:
li %r0,SYS_exit
sc /* exit(errno) */
-_C_LABEL(esigcode):
+esigcode:
- .globl _C_LABEL(sigfill)
-_C_LABEL(sigfill):
+ .globl sigfill
+sigfill:
.long 0 # illegal
-_C_LABEL(esigfill):
+esigfill:
.align 4
- .globl _C_LABEL(sigfillsiz)
-_C_LABEL(sigfillsiz):
- .long _C_LABEL(esigfill) - _C_LABEL(sigfill)
+ .globl sigfillsiz
+sigfillsiz:
+ .long esigfill - sigfill
.text
/*
* Deliberate entry to ddbtrap
*/
- .globl _C_LABEL(ddb_trap)
-_C_LABEL(ddb_trap):
+ .globl ddb_trap
+ddb_trap:
mtsprg 1,%r1
mfmsr %r3
mtsrr1 %r3
FRAME_SETUP_FLAG(CI_DDBSAVE, 0)
/* Call C trap code: */
addi %r3,%r1,8
- bl _C_LABEL(db_trap_glue)
+ bl db_trap_glue
or. %r3,%r3,%r3
bne ddbleave
/* This wasn't for DDB, so switch to real trap: */
rfi4: rfi
#endif /* DDB */
- .globl _C_LABEL(rfi_inst)
-_C_LABEL(rfi_inst):
+ .globl rfi_inst
+rfi_inst:
rfi
- .globl _C_LABEL(rfid_inst)
-_C_LABEL(rfid_inst):
+ .globl rfid_inst
+rfid_inst:
rfid
- .globl _C_LABEL(nop_inst)
- _C_LABEL(nop_inst):
+ .globl nop_inst
+ nop_inst:
nop
- .globl _C_LABEL(rfi_start)
-_C_LABEL(rfi_start):
+ .globl rfi_start
+rfi_start:
.long rfi1, rfi1 + 4
.long rfi2, rfi2 + 4
.long rfi3, rfi3 + 4
.long 0, 0
- .globl _C_LABEL(nopbat_start)
-_C_LABEL(nopbat_start):
+ .globl nopbat_start
+nopbat_start:
.long nopbat_1s, nopbat_1e
.long 0, 0
- .globl _C_LABEL(nop32_start)
-_C_LABEL(nop32_start):
+ .globl nop32_start
+nop32_start:
.long nop32_1s, nop32_1e
.long nop32_2s, nop32_2e
.long nop32_3s, nop32_3e
-/* $OpenBSD: locore0.S,v 1.2 2019/09/03 14:37:22 deraadt Exp $ */
+/* $OpenBSD: locore0.S,v 1.3 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: locore.S,v 1.2 1996/10/16 19:33:09 ws Exp $ */
/*
/*
* Globals
*/
- .globl _C_LABEL(esym),_C_LABEL(proc0paddr)
- .type _C_LABEL(esym),@object
- .type _C_LABEL(proc0paddr),@object
+ .globl esym,proc0paddr
+ .type esym,@object
+ .type proc0paddr,@object
.data
-_C_LABEL(esym): .long 0 /* end of symbol table */
-_C_LABEL(proc0paddr): .long 0 /* proc0 p_addr */
+esym: .long 0 /* end of symbol table */
+proc0paddr: .long 0 /* proc0 p_addr */
.globl fwargsave
fwargsave:
/*
* Startup entry
*/
-_ENTRY(_C_LABEL(kernel_text))
+_ENTRY(kernel_text)
/* arguments to start
* r1 - stack provided by firmware/bootloader
* r3 - unused
lwz %r9, -4(%r9)
cmpwi %r9,0
beq 1f
- lis %r8,_C_LABEL(esym)@ha
- stw %r9,_C_LABEL(esym)@l(%r8)
+ lis %r8,esym@ha
+ stw %r9,esym@l(%r8)
mr %r8, %r9
1:
#endif
li %r9,PGOFSET
add %r8,%r8,%r9
andc %r8,%r8,%r9
- lis %r9,_C_LABEL(cpu_info)@ha
- addi %r9,%r9,_C_LABEL(cpu_info)@l
+ lis %r9,cpu_info@ha
+ addi %r9,%r9,cpu_info@l
mtsprg 0,%r9
addi %r8,%r8,INTSTK
stw %r8,CI_INTSTK(%r9)
li %r0,-1
stw %r0,CI_INTRDEPTH(%r9)
addi %r8,%r8,SPILLSTK+DDBSTK /* leave room for spillstk and ddbstk */
- lis %r9,_C_LABEL(proc0paddr)@ha
- stw %r8,_C_LABEL(proc0paddr)@l(%r9)
+ lis %r9,proc0paddr@ha
+ stw %r8,proc0paddr@l(%r9)
addi %r1,%r8,USPACE-FRAMELEN /* stackpointer for proc0 */
mr %r4,%r1 /* end of mem reserved for kernel */
li %r0,0
lis %r3,start@ha
addi %r3,%r3,start@l
mr %r5,%r6 /* args string */
- bl _C_LABEL(initppc)
- bl _C_LABEL(main)
- b _C_LABEL(OF_exit)
+ bl initppc
+ bl main
+ b OF_exit
-/* $OpenBSD: ofwreal.S,v 1.8 2022/01/02 23:31:10 jsg Exp $ */
+/* $OpenBSD: ofwreal.S,v 1.9 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: ofwreal.S,v 1.1 1996/09/30 16:34:51 ws Exp $ */
/*
#define SVSIZE (SRSIZE+SPRGSIZE+SDR1SIZE+MSRSIZE)
#define BATSIZE (16*4)
- .global _C_LABEL(fwcall)
-_C_LABEL(fwcall): .long 0
+ .global fwcall
+fwcall: .long 0
.lcomm fwsave,SVSIZE,8
.lcomm fwbatsave,BATSIZE,8
.lcomm clbatsave,BATSIZE,8
.lcomm ofsrsave,16*4,4 /* 16 words of 4 bytes to store OF segment registers */
.lcomm srsave,16*4,4 /* 16 words of 4 bytes to swap OF segment registers*/
- .globl _C_LABEL(ofmsr)
-_C_LABEL(ofmsr): .long 0 /* area to store msr for openfirmware*/
+ .globl ofmsr
+ofmsr: .long 0 /* area to store msr for openfirmware*/
.text
-_ENTRY(_C_LABEL(ofw_init))
+_ENTRY(ofw_init)
mflr %r31 /* save return address */
mr %r13,%r6 /* save args (only pointer used) */
stw %r5,fwcall@l(%r4)
mfmsr %r5
- lis %r4,_C_LABEL(ofmsr)@ha /* save msr from openfirmware */
- stw %r5,_C_LABEL(ofmsr)@l(%r4)
+ lis %r4,ofmsr@ha /* save msr from openfirmware */
+ stw %r5,ofmsr@l(%r4)
#if 0
lis %r0,(0x80001ffe)@ha
addi %r0,%r0,(0x80001ffe)@l
bl savemmu
/* save openfirmware address mappings */
- bl _C_LABEL(save_ofw_mapping)
+ bl save_ofw_mapping
#if 0
/* dont really need the bats from firmware saved, 0 to disable */
blr
-_ENTRY(_C_LABEL(fwentry))
+_ENTRY(fwentry)
mflr %r4
RETGUARD_SETUP_LATE(fwentry, %r11, %r4)
stwu %r1,-16(%r1)
blr
.lcomm firmstk,NBPG,16
-.comm _C_LABEL(OF_buf),NBPG
+.comm OF_buf,NBPG
/*
* OpenFirmware entry point
* Note: caller has to set the machine state register (msr)
* to be correct for OpenFirmware.
*/
-_ENTRY(_C_LABEL(openfirmware))
+_ENTRY(openfirmware)
mflr %r0
RETGUARD_SETUP_LATE(openfirmware, %r11, %r0)
stw %r0,4(%r1) /* save return address */
-/* $OpenBSD: context.S,v 1.63 2021/05/01 16:11:11 visa Exp $ */
+/* $OpenBSD: context.S,v 1.64 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
move zero, ra
#endif
#ifdef MULTIPROCESSOR
- jal _C_LABEL(proc_trampoline_mp)
+ jal proc_trampoline_mp
NOP
#endif
/*
-/* $OpenBSD: lcore_access.S,v 1.32 2022/01/28 16:20:09 visa Exp $ */
+/* $OpenBSD: lcore_access.S,v 1.33 2022/12/08 01:25:45 guenther Exp $ */
/*
* Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
PTR_ADDU a0, sp, 4*REGSZ # address of sigcontext
LI v0, SYS_sigreturn # sigreturn(scp)
syscall
- .globl _C_LABEL(sigcoderet)
-_C_LABEL(sigcoderet):
+ .globl sigcoderet
+sigcoderet:
LI v0, SYS_exit # just in case sigreturn fails
syscall
.globl esigcode
-/* $OpenBSD: asm.h,v 1.17 2022/06/10 01:56:02 guenther Exp $ */
+/* $OpenBSD: asm.h,v 1.18 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: asm.h,v 1.1 1996/09/30 16:34:20 ws Exp $ */
/*
# define _PROF_PROLOGUE(y)
#endif
-#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE(y)
+#define ENTRY(y) _ENTRY(y); _PROF_PROLOGUE(y)
#define ENTRY_NB(y) _ENTRY_NB(y); _PROF_PROLOGUE(y)
-#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE(y)
+#define ASENTRY(y) _ENTRY(y); _PROF_PROLOGUE(y)
#define END(y) .size y, . - y
#define STRONG_ALIAS(alias,sym) \
-/* $OpenBSD: setjmp.S,v 1.10 2020/11/28 19:49:30 gkoehler Exp $ */
+/* $OpenBSD: setjmp.S,v 1.11 2022/12/08 01:25:45 guenther Exp $ */
/* kernel version of this file, does not have signal goop */
/* int setjmp(jmp_buf env) */
#define JMP_sig 0x60
-ENTRY(_C_LABEL(setjmp))
+ENTRY(setjmp)
mflr %r12
RETGUARD_SETUP_LATE(setjmp, %r11, %r12)
stw %r31, JMP_r31(%r3)
blr
-ENTRY(_C_LABEL(longjmp))
+ENTRY(longjmp)
/* lr, r31 */
lwz %r12, JMP_lr(%r3)
RETGUARD_SETUP_LATE(longjmp, %r11, %r12)
-/* $OpenBSD: asm.h,v 1.16 2022/10/25 06:05:57 guenther Exp $ */
+/* $OpenBSD: asm.h,v 1.17 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: asm.h,v 1.15 2000/08/02 22:24:39 eeh Exp $ */
/*
#define _PROF_PROLOGUE
#endif
-#define ENTRY(name) _ENTRY(_C_LABEL(name)); _PROF_PROLOGUE
-#define NENTRY(name) _ENTRY(_C_LABEL(name))
+#define ENTRY(name) _ENTRY(name); _PROF_PROLOGUE
+#define NENTRY(name) _ENTRY(name)
#define ENTRY_NB(name) _ENTRY_NB(name); _PROF_PROLOGUE
-#define ASENTRY(name) _ENTRY(_ASM_LABEL(name)); _PROF_PROLOGUE
+#define ASENTRY(name) _ENTRY(name); _PROF_PROLOGUE
#define FUNC(name) ASENTRY(name)
#define END(y) .size y, . - y
-#define RODATA(name) .align 4; .text; .globl _C_LABEL(name); \
- OTYPE(_C_LABEL(name)); _C_LABEL(name):
+#define RODATA(name) .align 4; .text; .globl name; \
+ OTYPE(name); name:
#define STRONG_ALIAS(alias,sym) \
.global alias; \
-/* $OpenBSD: in_cksum.S,v 1.2 2005/05/01 05:42:43 brad Exp $ */
+/* $OpenBSD: in_cksum.S,v 1.3 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: in_cksum.S,v 1.2 2001/08/10 20:53:11 eeh Exp $ */
/*
ENTRY(in_cksum)
clr %o3 ! sum = 0;
clr %o2
-_ENTRY(_C_LABEL(in_cksum_internal))
+_ENTRY(in_cksum_internal)
brz %o0, Lfinish ! for (; m && len > 0; m->m_next) {
clr %g1 ! swapped = 0;
brlez %o1, Lfinish
-/* $OpenBSD: locore.s,v 1.193 2022/10/21 18:55:42 miod Exp $ */
+/* $OpenBSD: locore.s,v 1.194 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: locore.s,v 1.137 2001/08/13 06:10:10 jdolecek Exp $ */
/*
#endif /* 1 */
.section .sun4v_patch, "ax"
- .globl _C_LABEL(sun4v_patch)
-_C_LABEL(sun4v_patch):
+ .globl sun4v_patch
+sun4v_patch:
.previous
.section .sun4v_patch_end, "ax"
- .globl _C_LABEL(sun4v_patch_end)
-_C_LABEL(sun4v_patch_end):
+ .globl sun4v_patch_end
+sun4v_patch_end:
.previous
.section .sun4v_pause_patch, "ax"
- .globl _C_LABEL(sun4v_pause_patch)
-_C_LABEL(sun4v_pause_patch):
+ .globl sun4v_pause_patch
+sun4v_pause_patch:
.previous
.section .sun4v_pause_patch_end, "ax"
- .globl _C_LABEL(sun4v_pause_patch_end)
-_C_LABEL(sun4v_pause_patch_end):
+ .globl sun4v_pause_patch_end
+sun4v_pause_patch_end:
.previous
#ifdef MULTIPROCESSOR
.section .sun4v_mp_patch, "ax"
- .globl _C_LABEL(sun4v_mp_patch)
-_C_LABEL(sun4v_mp_patch):
+ .globl sun4v_mp_patch
+sun4v_mp_patch:
.previous
.section .sun4v_mp_patch_end, "ax"
- .globl _C_LABEL(sun4v_mp_patch_end)
-_C_LABEL(sun4v_mp_patch_end):
+ .globl sun4v_mp_patch_end
+sun4v_mp_patch_end:
.previous
.section .sun4u_mtp_patch, "ax"
- .globl _C_LABEL(sun4u_mtp_patch)
-_C_LABEL(sun4u_mtp_patch):
+ .globl sun4u_mtp_patch
+sun4u_mtp_patch:
.previous
.section .sun4u_mtp_patch_end, "ax"
- .globl _C_LABEL(sun4u_mtp_patch_end)
-_C_LABEL(sun4u_mtp_patch_end):
+ .globl sun4u_mtp_patch_end
+sun4u_mtp_patch_end:
.previous
#endif
* something like:
* foointr:
* TRAP_SETUP ... ! makes %o registers safe
- * INCR _C_LABEL(cnt)+V_FOO ! count a foo
+ * INCR cnt+V_FOO ! count a foo
*/
.macro INCR what
sethi %hi(\what), %o0
.data
- .globl _C_LABEL(data_start)
-_C_LABEL(data_start): ! Start of data segment
-#define DATA_START _C_LABEL(data_start)
+ .globl data_start
+data_start: ! Start of data segment
+#define DATA_START data_start
/*
* Process 0's u.
*
* This must be aligned on an 8 byte boundary.
*/
- .globl _C_LABEL(u0)
-_C_LABEL(u0): .xword 0
+ .globl u0
+u0: .xword 0
estack0: .xword 0
/*
* the many variations of different sun4* machines. It contains
* the value CPU_SUN4U or CPU_SUN4V.
*/
- .globl _C_LABEL(cputyp)
-_C_LABEL(cputyp):
+ .globl cputyp
+cputyp:
.word CPU_SUN4U
- .globl _C_LABEL(cold)
-_C_LABEL(cold):
+ .globl cold
+cold:
.word 1
_ALIGN
#endif /* DEBUG */
/* hardware interrupts (can be linked or made `fast') */
.macro HARDINT4U lev
- VTRAP \lev, _C_LABEL(sparc_interrupt)
+ VTRAP \lev, sparc_interrupt
.endm
/* software interrupts (may not be made direct, sorry---but you
#endif
- .globl start, _C_LABEL(kernel_text)
- _C_LABEL(kernel_text) = start ! for kvm_mkdb(8)
+ .globl start, kernel_text
+ kernel_text = start ! for kvm_mkdb(8)
start:
/* Traps from TL=0 -- traps from user mode */
- .globl _C_LABEL(trapbase)
-_C_LABEL(trapbase):
+ .globl trapbase
+trapbase:
b dostart; nop; TA8 ! 000 = reserved -- Use it to boot
/* We should not get the next 5 traps */
UTRAP 0x001 ! 001 = POR Reset -- ROM should get this
#ifdef SUN4V
.align 0x8000
- .globl _C_LABEL(trapbase_sun4v)
-_C_LABEL(trapbase_sun4v):
+ .globl trapbase_sun4v
+trapbase_sun4v:
sun4v_tl0_reserved 8 ! 0x0-0x7
VTRAP T_INST_EXCEPT, sun4v_tl0_itsb_miss ! 0x8
VTRAP T_TEXTFAULT, sun4v_tl0_itsb_miss ! 0x9
! set stack pointer redzone to base+minstack; alters base
.macro SET_SP_REDZONE base, tmp
add \base, REDSIZE, \base
- sethi %hi(_C_LABEL(redzone)), \tmp
- stx \base, [\tmp + %lo(_C_LABEL(redzone))]
+ sethi %hi(redzone), \tmp
+ stx \base, [\tmp + %lo(redzone)]
.endm
! variant with a constant
.macro SET_SP_REDZONE_CONST const, tmp1, tmp2
set (\const) + REDSIZE, \tmp1
- sethi %hi(_C_LABEL(redzone)), \tmp2
- stx \tmp1, [\tmp2 + %lo(_C_LABEL(redzone))]
+ sethi %hi(redzone), \tmp2
+ stx \tmp1, [\tmp2 + %lo(redzone)]
.endm
! check stack pointer against redzone (uses two temps)
sethi KERNBASE, \t1
cmp %sp, \t1
blu,pt %xcc, 7f
- sethi %hi(_C_LABEL(redzone)), \t1
- ldx [\t1 + %lo(_C_LABEL(redzone))], \t2
+ sethi %hi(redzone), \t1
+ ldx [\t1 + %lo(redzone)], \t2
cmp %sp, \t2 ! if sp >= \t2, not in red zone
blu panic_red
nop ! and can continue normally
panic_red:
/* move to panic stack */
- stx %g0, [t1 + %lo(_C_LABEL(redzone))];
+ stx %g0, [t1 + %lo(redzone)];
set eredstack - BIAS, %sp;
/* prevent panic() from lowering ipl */
- sethi %hi(_C_LABEL(panicstr)), t2;
+ sethi %hi(panicstr), t2;
set Lpanic_red, t2;
- st t2, [t1 + %lo(_C_LABEL(panicstr))];
+ st t2, [t1 + %lo(panicstr)];
wrpr g0, 15, %pil /* t1 = splhigh() */
save %sp, -CCF64SZ, %sp; /* preserve current window */
sethi %hi(Lpanic_red), %o0;
- call _C_LABEL(panic);
+ call panic;
or %o0, %lo(Lpanic_red), %o0;
mov TLB_TAG_ACCESS, %g3
sethi %hi(0x1fff), %g6 ! 8K context mask
ldxa [%g3] ASI_DMMU, %g3 ! Get fault addr from Tag Target
- sethi %hi(_C_LABEL(ctxbusy)), %g4
+ sethi %hi(ctxbusy), %g4
or %g6, %lo(0x1fff), %g6
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
srax %g3, HOLESHIFT, %g5 ! Check for valid address
and %g3, %g6, %g6 ! Isolate context
mov TLB_TAG_ACCESS, %g3 ! Get real fault page
sethi %hi(0x1fff), %g6 ! 8K context mask
ldxa [%g3] ASI_DMMU, %g3 ! from tag access register
- sethi %hi(_C_LABEL(ctxbusy)), %g4
+ sethi %hi(ctxbusy), %g4
or %g6, %lo(0x1fff), %g6
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
srax %g3, HOLESHIFT, %g5 ! Check for valid address
and %g3, %g6, %g6 ! Isolate context
! ba 0f ! DEBUG -- don't use phys addresses
wr %g0, ASI_NUCLEUS, %asi ! In case of problems finding PA
- sethi %hi(_C_LABEL(ctxbusy)), %g1
- ldx [%g1 + %lo(_C_LABEL(ctxbusy))], %g1 ! Load start of ctxbusy
+ sethi %hi(ctxbusy), %g1
+ ldx [%g1 + %lo(ctxbusy)], %g1 ! Load start of ctxbusy
#ifdef DEBUG
srax %g6, HOLESHIFT, %g7 ! Check for valid address
brz,pt %g7, 1f ! Should be zero or -1
add %sp, -CC64FSZ-BIAS, %sp ! Overwrite proc 0's stack.
#endif /* DEBUG */
ta 1; nop ! This helps out traptrace.
- call _C_LABEL(panic) ! This needs to be fixed properly but we should panic here
+ call panic ! This needs to be fixed properly but we should panic here
mov %g1, %o1
NOTREACHED
.data
TRAP_SETUP -CC64FSZ-TF_SIZE
Ldatafault_internal:
- INCR _C_LABEL(uvmexp)+V_FAULTS ! cnt.v_faults++ (clobbers %o0,%o1,%o2) should not fault
+ INCR uvmexp+V_FAULTS ! cnt.v_faults++ (clobbers %o0,%o1,%o2) should not fault
! ldx [%sp + CC64FSZ + BIAS + TF_FAULT], %g1 ! DEBUG make sure this has not changed
mov %g1, %o0 ! Move these to the out regs so we can save the globals
mov %g2, %o4
mov %o0, %o3 ! (argument: trap address)
mov %g2, %o2 ! (argument: trap pc)
- call _C_LABEL(data_access_fault) ! data_access_fault(&tf, type,
+ call data_access_fault ! data_access_fault(&tf, type,
! pc, addr, sfva, sfsr)
add %sp, CC64FSZ + BIAS, %o0 ! (argument: &tf)
NOTREACHED
data_error:
- call _C_LABEL(data_access_error) ! data_access_error(&tf, type,
+ call data_access_error ! data_access_error(&tf, type,
! afva, afsr, sfva, sfsr)
add %sp, CC64FSZ + BIAS, %o0 ! (argument: &tf)
ba data_recover
mov TLB_TAG_ACCESS, %g3 ! Get real fault page
sethi %hi(0x1fff), %g7 ! 8K context mask
ldxa [%g3] ASI_IMMU, %g3 ! from tag access register
- sethi %hi(_C_LABEL(ctxbusy)), %g4
+ sethi %hi(ctxbusy), %g4
or %g7, %lo(0x1fff), %g7
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
srax %g3, HOLESHIFT, %g5 ! Check for valid address
and %g3, %g7, %g6 ! Isolate context
sllx %g6, 3, %g6 ! Make it into an offset into ctxbusy
membar #Sync ! No real reason for this XXXX
TRAP_SETUP -CC64FSZ-TF_SIZE
- INCR _C_LABEL(uvmexp)+V_FAULTS ! cnt.v_faults++ (clobbers %o0,%o1,%o2)
+ INCR uvmexp+V_FAULTS ! cnt.v_faults++ (clobbers %o0,%o1,%o2)
mov %g3, %o3
st %g4, [%sp + CC64FSZ + BIAS + TF_Y] ! set tf.tf_y
wrpr %g0, PSTATE_INTR, %pstate ! reenable interrupts
- call _C_LABEL(text_access_fault) ! mem_access_fault(&tf, type, pc, sfsr)
+ call text_access_fault ! mem_access_fault(&tf, type, pc, sfsr)
add %sp, CC64FSZ + BIAS, %o0 ! (argument: &tf)
text_recover:
CHKPT %o1,%o2,2
text_error:
wrpr %g0, PSTATE_INTR, %pstate ! reenable interrupts
- call _C_LABEL(text_access_error) ! mem_access_fault(&tfm type, sfva [pc], sfsr,
+ call text_access_error ! mem_access_fault(&tfm type, sfva [pc], sfsr,
! afva, afsr);
add %sp, CC64FSZ + BIAS, %o0 ! (argument: &tf)
ba text_recover
ldxa [%g3] ASI_PHYS_CACHED, %g3
add %g1, 0x50, %g6
ldxa [%g6] ASI_PHYS_CACHED, %g6
- sethi %hi(_C_LABEL(ctxbusy)), %g4
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ sethi %hi(ctxbusy), %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
sllx %g6, 3, %g6 ! Make it into an offset into ctxbusy
ldx [%g4 + %g6], %g4 ! Load up our page table.
bne,pn %xcc, 1b
or %g4, SUN4V_TLB_ACCESS, %g4 ! Update the modified bit
2:
- sethi %hi(_C_LABEL(tsb_dmmu)), %g2
- ldx [%g2 + %lo(_C_LABEL(tsb_dmmu))], %g2
+ sethi %hi(tsb_dmmu), %g2
+ ldx [%g2 + %lo(tsb_dmmu)], %g2
mov %g1, %g7
/* Construct TSB tag word. */
or %g1, %g6, %g1
srlx %g3, PTSHIFT, %g3
- sethi %hi(_C_LABEL(tsbsize)), %g5
+ sethi %hi(tsbsize), %g5
mov 512, %g6
- ld [%g5 + %lo(_C_LABEL(tsbsize))], %g5
+ ld [%g5 + %lo(tsbsize)], %g5
sllx %g6, %g5, %g5
sub %g5, 1, %g5
and %g3, %g5, %g3
ldxa [%g3] ASI_PHYS_CACHED, %g3
add %g1, 0x50, %g6
ldxa [%g6] ASI_PHYS_CACHED, %g6
- sethi %hi(_C_LABEL(ctxbusy)), %g4
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ sethi %hi(ctxbusy), %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
sllx %g6, 3, %g6 ! Make it into an offset into ctxbusy
ldx [%g4 + %g6], %g4 ! Load up our page table.
or %g4, SUN4V_TLB_MODIFY|SUN4V_TLB_ACCESS|SUN4V_TLB_W, %g4
! Update the modified bit
2:
- sethi %hi(_C_LABEL(tsb_dmmu)), %g2
- ldx [%g2 + %lo(_C_LABEL(tsb_dmmu))], %g2
+ sethi %hi(tsb_dmmu), %g2
+ ldx [%g2 + %lo(tsb_dmmu)], %g2
mov %g1, %g7
/* Construct TSB tag word. */
or %g1, %g6, %g1
srlx %g3, PTSHIFT, %g3
- sethi %hi(_C_LABEL(tsbsize)), %g5
+ sethi %hi(tsbsize), %g5
mov 512, %g6
- ld [%g5 + %lo(_C_LABEL(tsbsize))], %g5
+ ld [%g5 + %lo(tsbsize)], %g5
sllx %g6, %g5, %g5
sub %g5, 1, %g5
and %g3, %g5, %g3
ldxa [%g3] ASI_PHYS_CACHED, %g3
add %g1, 0x50, %g6
ldxa [%g6] ASI_PHYS_CACHED, %g6
- sethi %hi(_C_LABEL(ctxbusy)), %g4
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ sethi %hi(ctxbusy), %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
sllx %g6, 3, %g6 ! Make it into an offset into ctxbusy
ldx [%g4 + %g6], %g4 ! Load up our page table.
bne,pn %xcc, 1b
or %g4, SUN4V_TLB_ACCESS, %g4 ! Update the modified bit
2:
- sethi %hi(_C_LABEL(tsb_dmmu)), %g2
- ldx [%g2 + %lo(_C_LABEL(tsb_dmmu))], %g2
+ sethi %hi(tsb_dmmu), %g2
+ ldx [%g2 + %lo(tsb_dmmu)], %g2
mov %g1, %g7
/* Construct TSB tag word. */
or %g1, %g6, %g1
srlx %g3, PTSHIFT, %g3
- sethi %hi(_C_LABEL(tsbsize)), %g5
+ sethi %hi(tsbsize), %g5
mov 512, %g6
- ld [%g5 + %lo(_C_LABEL(tsbsize))], %g5
+ ld [%g5 + %lo(tsbsize)], %g5
sllx %g6, %g5, %g5
sub %g5, 1, %g5
and %g3, %g5, %g3
ldxa [%g3] ASI_PHYS_CACHED, %g3
add %g1, 0x50, %g6
ldxa [%g6] ASI_PHYS_CACHED, %g6
- sethi %hi(_C_LABEL(ctxbusy)), %g4
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ sethi %hi(ctxbusy), %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
sllx %g6, 3, %g6 ! Make it into an offset into ctxbusy
ldx [%g4 + %g6], %g4 ! Load up our page table.
or %g4, SUN4V_TLB_MODIFY|SUN4V_TLB_ACCESS|SUN4V_TLB_W, %g4
! Update the modified bit
2:
- sethi %hi(_C_LABEL(tsb_dmmu)), %g2
- ldx [%g2 + %lo(_C_LABEL(tsb_dmmu))], %g2
+ sethi %hi(tsb_dmmu), %g2
+ ldx [%g2 + %lo(tsb_dmmu)], %g2
mov %g1, %g7
/* Construct TSB tag word. */
or %g1, %g6, %g1
srlx %g3, PTSHIFT, %g3
- sethi %hi(_C_LABEL(tsbsize)), %g5
+ sethi %hi(tsbsize), %g5
mov 512, %g6
- ld [%g5 + %lo(_C_LABEL(tsbsize))], %g5
+ ld [%g5 + %lo(tsbsize)], %g5
sllx %g6, %g5, %g5
sub %g5, 1, %g5
and %g3, %g5, %g3
ldxa [%g3] ASI_PHYS_CACHED, %g3
add %g1, 0x10, %g6
ldxa [%g6] ASI_PHYS_CACHED, %g6
- sethi %hi(_C_LABEL(ctxbusy)), %g4
- ldx [%g4 + %lo(_C_LABEL(ctxbusy))], %g4
+ sethi %hi(ctxbusy), %g4
+ ldx [%g4 + %lo(ctxbusy)], %g4
sllx %g6, 3, %g6 ! Make it into an offset into ctxbusy
ldx [%g4 + %g6], %g4 ! Load up our page table.
bne,pn %xcc, 1b
or %g4, SUN4V_TLB_ACCESS, %g4 ! Update the modified bit
2:
- sethi %hi(_C_LABEL(tsb_dmmu)), %g2
- ldx [%g2 + %lo(_C_LABEL(tsb_dmmu))], %g2
+ sethi %hi(tsb_dmmu), %g2
+ ldx [%g2 + %lo(tsb_dmmu)], %g2
mov %g1, %g7
/* Construct TSB tag word. */
or %g1, %g6, %g1
srlx %g3, PTSHIFT, %g3
- sethi %hi(_C_LABEL(tsbsize)), %g5
+ sethi %hi(tsbsize), %g5
mov 512, %g6
- ld [%g5 + %lo(_C_LABEL(tsbsize))], %g5
+ ld [%g5 + %lo(tsbsize)], %g5
sllx %g6, %g5, %g5
sub %g5, 1, %g5
and %g3, %g5, %g3
wr %g0, ASI_PHYS_CACHED, %asi
ldxa [%g6 + CI_CPCB] %asi, %g6
- sethi %hi(_C_LABEL(ctxbusy)), %g1
- ldx [%g1 + %lo(_C_LABEL(ctxbusy))], %g1
+ sethi %hi(ctxbusy), %g1
+ ldx [%g1 + %lo(ctxbusy)], %g1
ldx [%g1], %g1
srlx %g6, STSHIFT, %g7
wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Restore default ASI
wrpr %g0, PSTATE_INTR, %pstate ! traps on again
- call _C_LABEL(data_access_fault) ! data_acces_fault(tf, type, ...)
+ call data_access_fault ! data_acces_fault(tf, type, ...)
nop
ba,a,pt %icc, return_from_trap
wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Restore default ASI
wrpr %g0, PSTATE_INTR, %pstate ! traps on again
- call _C_LABEL(text_access_fault) ! text_access_fault(tf, type, ...)
+ call text_access_fault ! text_access_fault(tf, type, ...)
nop
ba,a,pt %icc, return_from_trap
wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Restore default ASI
wrpr %g0, PSTATE_INTR, %pstate ! traps on again
- call _C_LABEL(trap) ! trap(tf, type, pc, pstate)
+ call trap ! trap(tf, type, pc, pstate)
nop
ba,a,pt %icc, return_from_trap
wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Restore default ASI
GET_CPUINFO_VA(%g7)
- call _C_LABEL(syscall) ! syscall(&tf, code, pc)
+ call syscall ! syscall(&tf, code, pc)
wrpr %g0, PSTATE_INTR, %pstate ! turn on interrupts
/* see `proc_trampoline' for the reason for this label */
#define INTRDEBUG_LEVEL 0x2
#define INTRDEBUG_FUNC 0x4
#define INTRDEBUG_SPUR 0x8
- .globl _C_LABEL(intrdebug)
-_C_LABEL(intrdebug): .word 0x0
+ .globl intrdebug
+intrdebug: .word 0x0
/*
* Note: we use the local label `97' to branch forward to, to skip
* actual debugging code following a `intrdebug' bit test.
stxa %g0, [%g0] ASI_IRSR ! Ack IRQ
membar #Sync ! Should not be needed due to retry
- sethi %hi(_C_LABEL(intrlev)), %g3
- or %g3, %lo(_C_LABEL(intrlev)), %g3
+ sethi %hi(intrlev), %g3
+ or %g3, %lo(intrlev), %g3
sllx %g5, 3, %g5 ! Calculate entry number
ldx [%g3 + %g5], %g5 ! We have a pointer to the handler
#ifdef DEBUG
stx %g5, [%g1]
#ifdef DEBUG
- set _C_LABEL(intrdebug), %g7
+ set intrdebug, %g7
ld [%g7], %g7
btst INTRDEBUG_VECTOR, %g7
bz,pt %icc, 97f
3:
#ifdef DEBUG
- set _C_LABEL(intrdebug), %g7
+ set intrdebug, %g7
ld [%g7], %g7
btst INTRDEBUG_SPUR, %g7
bz,pt %icc, 97f
bgeu,pt %xcc, 1f
nop
- sethi %hi(_C_LABEL(intrlev)), %g3
- or %g3, %lo(_C_LABEL(intrlev)), %g3
+ sethi %hi(intrlev), %g3
+ or %g3, %lo(intrlev), %g3
sllx %g5, 3, %g5 ! Calculate entry number
ldx [%g3 + %g5], %g5 ! We have a pointer to the handler
1:
* IRQ# = %tt - 0x40
*/
- .globl _C_LABEL(sparc_interrupt) ! This is for interrupt debugging
-_C_LABEL(sparc_interrupt):
+ .globl sparc_interrupt ! This is for interrupt debugging
+sparc_interrupt:
/*
* If this is a %tick softint, clear it then call interrupt_vector.
*/
#endif
rd %y, %l6
- INCR _C_LABEL(uvmexp)+V_INTR ! cnt.v_intr++; (clobbers %o0,%o1,%o2)
+ INCR uvmexp+V_INTR ! cnt.v_intr++; (clobbers %o0,%o1,%o2)
rdpr %tt, %l5 ! Find out our current IPL
rdpr %tstate, %l0
rdpr %tpc, %l1
! At this point, the current ih could already be added
! back to the pending table.
- call _C_LABEL(intr_handler)
+ call intr_handler
mov %l2, %o1
brz,pn %l1, 0f
mov 1, %l5 ! initialize intr count for next run
#ifdef DEBUG
- set _C_LABEL(intrdebug), %o2
+ set intrdebug, %o2
ld [%o2], %o2
btst INTRDEBUG_FUNC, %o2
bz,a,pt %icc, 97f
wrpr %g1, %g7, %tstate
/* XXX Rewrite sun4u code to handle faults like sun4v. */
- sethi %hi(_C_LABEL(cputyp)), %g2
- ld [%g2 + %lo(_C_LABEL(cputyp))], %g2
+ sethi %hi(cputyp), %g2
+ ld [%g2 + %lo(cputyp)], %g2
cmp %g2, CPU_SUN4V
bne,pt %icc, 1f
nop
retry
! exported end marker for kernel gdb
- .globl _C_LABEL(endtrapcode)
-_C_LABEL(endtrapcode):
+ .globl endtrapcode
+endtrapcode:
#ifdef DDB
!!!
membar #Sync
inc %l2
set 2f, %o0
- call _C_LABEL(db_printf)
+ call db_printf
inc 8, %l1
ldxa [%l1] ASI_DMMU_TLB_TAG, %o2
membar #Sync
inc %l2
set 3f, %o0
- call _C_LABEL(db_printf)
+ call db_printf
inc 8, %l1
cmp %l1, %l3
nop
ldx [%o1+8], %l4
- sethi %hi(_C_LABEL(esym)), %l3 ! store esym
- stx %l4, [%l3 + %lo(_C_LABEL(esym))]
+ sethi %hi(esym), %l3 ! store esym
+ stx %l4, [%l3 + %lo(esym)]
ldx [%o1+16], %l4
- sethi %hi(_C_LABEL(ssym)), %l3 ! store ssym
- stx %l4, [%l3 + %lo(_C_LABEL(ssym))]
+ sethi %hi(ssym), %l3 ! store ssym
+ stx %l4, [%l3 + %lo(ssym)]
1:
#endif /* defined(DDB) || NKSYMS > 0 */
/*
*/
1:
set 0x2000, %o0 ! fixed: 8192 contexts
- call _C_LABEL(bootstrap)
+ call bootstrap
clr %g4 ! Clear data segment pointer
/*
* stack now.
*/
- sethi %hi(_C_LABEL(cpus)), %g2
- ldx [%g2 + %lo(_C_LABEL(cpus))], %g2
+ sethi %hi(cpus), %g2
+ ldx [%g2 + %lo(cpus)], %g2
ldx [%g2 + CI_PADDR], %g2 ! Load the interrupt stack's PA
/*
* Call the routine passed in in cpu_info->ci_spinup.
*/
-_C_LABEL(cpu_initialize):
+cpu_initialize:
wrpr %g0, 0, %tl ! Make sure we're not in NUCLEUS mode
flushw
/* Change the trap base register */
- set _C_LABEL(trapbase), %l1
+ set trapbase, %l1
#ifdef SUN4V
- sethi %hi(_C_LABEL(cputyp)), %l0
- ld [%l0 + %lo(_C_LABEL(cputyp))], %l0
+ sethi %hi(cputyp), %l0
+ ld [%l0 + %lo(cputyp)], %l0
cmp %l0, CPU_SUN4V
bne,pt %icc, 1f
nop
- set _C_LABEL(trapbase_sun4v), %l1
+ set trapbase_sun4v, %l1
GET_MMFSA(%o1)
1:
#endif
- call _C_LABEL(prom_set_trap_table) ! Now we should be running 100% from our handlers
+ call prom_set_trap_table ! Now we should be running 100% from our handlers
mov %l1, %o0
wrpr %l1, 0, %tba ! Make sure the PROM didn't foul up.
wrpr %g0, WSTATE_KERN, %wstate
NOTREACHED
set 1f, %o0 ! Main should never come back here
- call _C_LABEL(panic)
+ call panic
nop
.data
1:
/* Set the dmmu tsb */
sethi %hi(0x1fff), %o2
- set _C_LABEL(tsb_dmmu), %o0
+ set tsb_dmmu, %o0
ldx [%o0], %o0
- set _C_LABEL(tsbsize), %o1
+ set tsbsize, %o1
or %o2, %lo(0x1fff), %o2
ld [%o1], %o1
andn %o0, %o2, %o0 ! Mask off size and split bits
/* Set the immu tsb */
sethi %hi(0x1fff), %o2
- set _C_LABEL(tsb_immu), %o0
+ set tsb_immu, %o0
ldx [%o0], %o0
- set _C_LABEL(tsbsize), %o1
+ set tsbsize, %o1
or %o2, %lo(0x1fff), %o2
ld [%o1], %o1
andn %o0, %o2, %o0 ! Mask off size and split bits
set tmpstack-CC64FSZ-BIAS, %sp
- call _C_LABEL(pmap_bootstrap_cpu)
+ call pmap_bootstrap_cpu
nop
ba,a,pt %xcc, cpu_initialize
* OpenFirmware entry point
*/
.align 8
- .globl _C_LABEL(openfirmware)
+ .globl openfirmware
.proc 1
FTYPE(openfirmware)
-_C_LABEL(openfirmware):
+openfirmware:
sethi %hi(romp), %o4
ldx [%o4+%lo(romp)], %o4
save %sp, -CC64FSZ, %sp
*
*/
.align 8
- .globl _C_LABEL(sp_tlb_flush_pte)
+ .globl sp_tlb_flush_pte
.proc 1
FTYPE(sp_tlb_flush_pte)
-_C_LABEL(sp_tlb_flush_pte):
+sp_tlb_flush_pte:
#ifdef DEBUG
set DATA_START, %o4 ! Forget any recent TLB misses
stx %g0, [%o4]
mov %i1, %o1
andn %i0, 0xfff, %o3
or %o3, 0x010, %o3
- call _C_LABEL(printf)
+ call printf
mov %i0, %o2
restore
.data
*
*/
.align 8
- .globl _C_LABEL(sp_tlb_flush_ctx)
+ .globl sp_tlb_flush_ctx
.proc 1
FTYPE(sp_tlb_flush_ctx)
-_C_LABEL(sp_tlb_flush_ctx):
+sp_tlb_flush_ctx:
#ifdef DEBUG
set DATA_START, %o4 ! Forget any recent TLB misses
stx %g0, [%o4]
*
*/
.align 8
- .globl _C_LABEL(us_dcache_flush_page)
+ .globl us_dcache_flush_page
.proc 1
FTYPE(us_dcache_flush_page)
-_C_LABEL(us_dcache_flush_page):
+us_dcache_flush_page:
!! Try using cache_flush_phys for a change.
END(us_dcache_flush_page)
.align 8
- .globl _C_LABEL(us3_dcache_flush_page)
+ .globl us3_dcache_flush_page
.proc 1
FTYPE(us3_dcache_flush_page)
-_C_LABEL(us3_dcache_flush_page):
+us3_dcache_flush_page:
ldxa [%g0] ASI_MCCR, %o1
btst MCCR_DCACHE_EN, %o1
bz,pn %icc, 1f
*
*/
.align 8
- .globl _C_LABEL(cache_flush_virt)
+ .globl cache_flush_virt
.proc 1
FTYPE(cache_flush_virt)
-_C_LABEL(cache_flush_virt):
+cache_flush_virt:
brz,pn %o1, 2f ! What? nothing to clear?
add %o0, %o1, %o2
mov 0x1ff, %o3
*/
.align 8
- .globl _C_LABEL(cache_flush_phys)
+ .globl cache_flush_phys
.proc 1
FTYPE(cache_flush_phys)
-_C_LABEL(cache_flush_phys):
+cache_flush_phys:
#ifdef DEBUG
tst %o2 ! Want to clear E$?
tnz 1 ! Error!
* work out.
*/
.section .rodata
- .globl _C_LABEL(sigcode)
-_C_LABEL(sigcode):
+ .globl sigcode
+sigcode:
/*
* XXX the `save' and `restore' below are unnecessary: should
* replace with simple arithmetic on %sp
restore %g0, SYS_sigreturn, %g1 ! get registers back & set syscall #
add %sp, BIAS + 128 + 16, %o0 ! compute scp
! andn %o0, 0x0f, %o0
- .globl _C_LABEL(sigcoderet)
-_C_LABEL(sigcoderet):
+ .globl sigcoderet
+sigcoderet:
t ST_SYSCALL ! sigreturn(scp)
! sigreturn does not return unless it fails
mov SYS_exit, %g1 ! exit(errno)
t ST_SYSCALL
- .globl _C_LABEL(esigcode)
-_C_LABEL(esigcode):
+ .globl esigcode
+esigcode:
- .globl _C_LABEL(sigfill)
-_C_LABEL(sigfill):
+ .globl sigfill
+sigfill:
unimp
-_C_LABEL(esigfill):
+esigfill:
- .globl _C_LABEL(sigfillsiz)
-_C_LABEL(sigfillsiz):
- .word _C_LABEL(esigfill) - _C_LABEL(sigfill)
+ .globl sigfillsiz
+sigfillsiz:
+ .word esigfill - sigfill
.text
#ifdef GPROF
.globl _mcount
#define ENTRY(x) \
- .globl _C_LABEL(x); _C_LABEL(x): ; \
+ .globl x; x: ; \
.data; \
.align 8; \
0: .uaword 0; .uaword 0; \
or %o0, %lo(0b), %o0; \
restore
#else /* GPROF */
-#define ENTRY(x) .globl _C_LABEL(x); _C_LABEL(x):
+#define ENTRY(x) .globl x; x:
#endif /* GPROF */
-#define ALTENTRY(x) .globl _C_LABEL(x); _C_LABEL(x):
+#define ALTENTRY(x) .globl x; x:
/*
* getfp() - get stack frame pointer
#ifdef DIAGNOSTIC
4:
sethi %hi(5f), %o0
- call _C_LABEL(panic)
+ call panic
or %lo(5f), %o0, %o0
.data
5:
* zero so it is safe to have interrupts going here.)
*/
ldx [%i1 + P_VMSPACE], %o3 ! vm = newproc->p_vmspace;
- sethi %hi(_C_LABEL(kernel_pmap_)), %o1
+ sethi %hi(kernel_pmap_), %o1
mov CTX_SECONDARY, %l5 ! Recycle %l5
ldx [%o3 + VM_PMAP], %o2 ! if (vm->vm_pmap != kernel_pmap_)
- or %o1, %lo(_C_LABEL(kernel_pmap_)), %o1
+ or %o1, %lo(kernel_pmap_), %o1
cmp %o2, %o1
bz,pn %xcc, Lsw_havectx ! Don't replace kernel context!
ld [%o2 + PM_CTX], %o0
nop
/* p does not have a context: call ctx_alloc to get one */
- call _C_LABEL(ctx_alloc) ! ctx_alloc(&vm->vm_pmap);
+ call ctx_alloc ! ctx_alloc(&vm->vm_pmap);
mov %o2, %o0
set DEMAP_CTX_SECONDARY, %o1 ! This context has been recycled
ENTRY(proc_trampoline)
#ifdef MULTIPROCESSOR
save %sp, -CC64FSZ, %sp
- call _C_LABEL(proc_trampoline_mp)
+ call proc_trampoline_mp
nop
restore
#endif
mov %o2, %o4
! %o0 = addr, %o1 = asi, %o4 = (1,2,4)
GET_CPCB(%o2) ! cpcb->pcb_onfault = Lfsprobe;
- set _C_LABEL(Lfsprobe), %o5
+ set Lfsprobe, %o5
stx %o5, [%o2 + PCB_ONFAULT]
or %o0, 0x9, %o3 ! if (PHYS_ASI(asi)) {
sub %o3, 0x1d, %o3
/*
* Fault handler for probeget
*/
- .globl _C_LABEL(Lfsprobe)
-_C_LABEL(Lfsprobe):
+ .globl Lfsprobe
+Lfsprobe:
stx %g0, [%o2 + PCB_ONFAULT]! error in r/w, clear pcb_onfault
mov -1, %o1
wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Restore default ASI
.data
_ALIGN
- .globl _C_LABEL(cecclast), _C_LABEL(ceccerrs)
-_C_LABEL(cecclast):
+ .globl cecclast, ceccerrs
+cecclast:
.xword 0
-_C_LABEL(ceccerrs):
+ceccerrs:
.word 0
_ALIGN
.text
#define MICROPERSEC (1000000)
.data
.align 16
- .globl _C_LABEL(cpu_clockrate)
-_C_LABEL(cpu_clockrate):
+ .globl cpu_clockrate
+cpu_clockrate:
!! Pretend we have a 200MHz clock -- cpu_attach will fix this
.xword 200000000
!! Here we'll store cpu_clockrate/1000000 so we can calculate usecs
*/
ENTRY(delay) ! %o0 = n
rdpr %tick, %o1 ! Take timer snapshot
- sethi %hi(_C_LABEL(cpu_clockrate)), %o2
+ sethi %hi(cpu_clockrate), %o2
sethi %hi(MICROPERSEC), %o3
- ldx [%o2 + %lo(_C_LABEL(cpu_clockrate) + 8)], %o4 ! Get scale factor
+ ldx [%o2 + %lo(cpu_clockrate + 8)], %o4 ! Get scale factor
brnz,pt %o4, 0f
or %o3, %lo(MICROPERSEC), %o3
!! Calculate ticks/usec
- ldx [%o2 + %lo(_C_LABEL(cpu_clockrate))], %o4 ! No, we need to calculate it
+ ldx [%o2 + %lo(cpu_clockrate)], %o4 ! No, we need to calculate it
udivx %o4, %o3, %o4
- stx %o4, [%o2 + %lo(_C_LABEL(cpu_clockrate) + 8)] ! Save it so we don't need to divide again
+ stx %o4, [%o2 + %lo(cpu_clockrate + 8)] ! Save it so we don't need to divide again
0:
mulx %o0, %o4, %o0 ! Convert usec -> ticks
.data
_ALIGN
#if defined(DDB) || NKSYMS > 0
- .globl _C_LABEL(esym)
-_C_LABEL(esym):
+ .globl esym
+esym:
.xword 0
- .globl _C_LABEL(ssym)
-_C_LABEL(ssym):
+ .globl ssym
+ssym:
.xword 0
#endif /* defined(DDB) || NKSYMS > 0 */
- .globl _C_LABEL(proc0paddr)
-_C_LABEL(proc0paddr):
- .xword _C_LABEL(u0) ! KVA of proc0 uarea
+ .globl proc0paddr
+proc0paddr:
+ .xword u0 ! KVA of proc0 uarea
#ifdef DEBUG
- .comm _C_LABEL(trapdebug), 4
- .comm _C_LABEL(pmapdebug), 4
+ .comm trapdebug, 4
+ .comm pmapdebug, 4
#endif /* DEBUG */
- .globl _C_LABEL(dlflush_start)
-_C_LABEL(dlflush_start):
+ .globl dlflush_start
+dlflush_start:
.xword dlflush1
.xword dlflush2
.xword dlflush3
-/* $OpenBSD: srt0.s,v 1.6 2006/07/09 19:36:57 miod Exp $ */
+/* $OpenBSD: srt0.s,v 1.7 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: srt0.s,v 1.1 2000/08/20 14:58:42 mrg Exp $ */
/*
.globl _esym
.data
_esym: .word 0 /* end of symbol table */
- .globl _C_LABEL(romp)
+ .globl romp
.align 8
.register %g2, #scratch
.register %g3, #scratch
-_C_LABEL(romp): .xword 0 /* openfirmware entry point */
+romp: .xword 0 /* openfirmware entry point */
/*
* Startup entry
*/
.text
- .globl _start, _C_LABEL(kernel_text)
- _C_LABEL(kernel_text) = _start
+ .globl _start, kernel_text
+ kernel_text = _start
_start:
nop ! For some reason this is needed to fixup the text section
*/
mov %o4, %g7 ! save prom vector pointer
- set _C_LABEL(romp), %g1
+ set romp, %g1
stx %o4, [%g1] ! It's initialized data, I hope
/*
/*
* XXXXXXXX Need to determine what params are passed
*/
- call _C_LABEL(setup)
+ call setup
nop
mov %i1, %o1
- call _C_LABEL(main)
+ call main
mov %i2, %o0
- call _C_LABEL(exit)
+ call exit
nop
- call _C_LABEL(_rtt)
+ call _rtt
nop
/*
* I$ flush. Really simple. Just flush over the whole range.
*/
.align 8
- .globl _C_LABEL(syncicache)
-_C_LABEL(syncicache):
+ .globl syncicache
+syncicache:
dec 4, %o1
flush %o0
- brgz,a,pt %o1, _C_LABEL(syncicache)
+ brgz,a,pt %o1, syncicache
inc 4, %o0
retl
nop
* and 64-bit cells. The cells we'll allocate off the stack for simplicity.
*/
.align 8
- .globl _C_LABEL(openfirmware)
+ .globl openfirmware
.proc 1
FTYPE(openfirmware)
-_C_LABEL(openfirmware):
+openfirmware:
andcc %sp, 1, %g0
bz,pt %icc, 1f
- sethi %hi(_C_LABEL(romp)), %o1
+ sethi %hi(romp), %o1
- ldx [%o1+%lo(_C_LABEL(romp))], %o4 ! v9 stack, just load the addr and callit
+ ldx [%o1+%lo(romp)], %o4 ! v9 stack, just load the addr and callit
save %sp, -CC64FSZ, %sp
mov %i0, %o0 ! Copy over our parameter
mov %g1, %l1
1: ! v8 -- need to screw with stack & params
save %sp, -CC64FSZ, %sp ! Get a new 64-bit stack frame
add %sp, -BIAS, %sp
- sethi %hi(_C_LABEL(romp)), %o1
+ sethi %hi(romp), %o1
rdpr %pstate, %l0
- ldx [%o1+%lo(_C_LABEL(romp))], %o1 ! Do the actual call
+ ldx [%o1+%lo(romp)], %o1 ! Do the actual call
srl %sp, 0, %sp
mov %i0, %o0
mov %g1, %l1
-/* $OpenBSD: bcopy.S,v 1.4 2013/06/15 19:16:53 miod Exp $ */
+/* $OpenBSD: bcopy.S,v 1.5 2022/12/08 01:25:45 guenther Exp $ */
/* $NetBSD: bcopy.S,v 1.2 2001/07/16 05:50:06 matt Exp $ */
/*-
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
- b PIC_SYM(_C_LABEL(memmove), PLT)
+ b PIC_SYM(memmove, PLT)
-/* $OpenBSD: bzero.S,v 1.3 2008/06/26 05:42:20 ray Exp $ */
+/* $OpenBSD: bzero.S,v 1.4 2022/12/08 01:25:46 guenther Exp $ */
/* $NetBSD: bzero.S,v 1.1 2000/12/29 20:51:57 bjh21 Exp $ */
/*-
ENTRY(bzero)
mov r2, r1
mov r1, #0
- b _C_LABEL(memset)
+ b memset
-/* $OpenBSD: divsi3.S,v 1.5 2017/10/26 15:23:59 jsg Exp $ */
+/* $OpenBSD: divsi3.S,v 1.6 2022/12/08 01:25:46 guenther Exp $ */
/* $NetBSD: divsi3.S,v 1.2 2001/11/13 20:06:40 chris Exp $ */
/*
L_overflow:
#if !defined(_KERNEL) && !defined(_STANDALONE)
mov r0, #8 /* SIGFPE */
- bl PIC_SYM(_C_LABEL(raise), PLT) /* raise it */
+ bl PIC_SYM(raise, PLT) /* raise it */
mov r0, #0
#else
/* XXX should cause a fatal error */
-/* $OpenBSD: htonl.S,v 1.3 2014/12/30 08:12:52 jsg Exp $ */
+/* $OpenBSD: htonl.S,v 1.4 2022/12/08 01:25:46 guenther Exp $ */
/* $NetBSD: byte_swap_4.S,v 1.1 2000/12/29 20:51:57 bjh21 Exp $ */
/*-
#include <machine/asm.h>
-_ENTRY(_C_LABEL(htonl))
-_ENTRY(_C_LABEL(ntohl))
-_ENTRY(_C_LABEL(bswap32))
+_ENTRY(htonl)
+_ENTRY(ntohl)
+_ENTRY(bswap32)
_PROF_PROLOGUE
eor r1, r0, r0, ror #16
bic r1, r1, #0x00FF0000
-/* $OpenBSD: htons.S,v 1.3 2014/12/30 08:12:52 jsg Exp $ */
+/* $OpenBSD: htons.S,v 1.4 2022/12/08 01:25:46 guenther Exp $ */
/* $NetBSD: byte_swap_2.S,v 1.1.20.1 2002/07/02 06:50:59 lukem Exp $ */
/*-
#include <machine/asm.h>
-_ENTRY(_C_LABEL(htons))
-_ENTRY(_C_LABEL(ntohs))
-_ENTRY(_C_LABEL(bswap16))
+_ENTRY(htons)
+_ENTRY(ntohs)
+_ENTRY(bswap16)
_PROF_PROLOGUE
and r1, r0, #0xff
mov r0, r0, lsr #8
-/* $OpenBSD: ffs.S,v 1.5 2019/11/05 08:16:43 mpi Exp $ */
+/* $OpenBSD: ffs.S,v 1.6 2022/12/08 01:25:46 guenther Exp $ */
/* $NetBSD: ffs.S,v 1.2 1998/11/26 19:32:10 eeh Exp $ */
/*
*/
ENTRY(ffs)
#ifdef PIC
- PICCY_SET(_C_LABEL(__ffstab), %o2, %o5)
+ PICCY_SET(__ffstab, %o2, %o5)
#else
- set _C_LABEL(__ffstab), %o2
+ set __ffstab, %o2
#endif
andcc %o0, 0xff, %o1 ! get low byte
be,a 1f ! try again if 0
add %o0, 24, %o0
END(ffs)
- .globl _C_LABEL(__ffstab)
-_C_LABEL(__ffstab):
+ .globl __ffstab
+__ffstab:
.byte -24,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 00-0f */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 10-1f */
.byte 6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 20-2f */