-/* $OpenBSD: acpi_machdep.c,v 1.103 2022/02/21 11:03:39 mpi Exp $ */
+/* $OpenBSD: acpi_machdep.c,v 1.104 2022/08/07 23:56:06 guenther Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
*
*/
if (acpi_savecpu()) {
/* Suspend path */
- KASSERT((curcpu()->ci_flags & CPUF_USERXSTATE) == 0);
+ KASSERT((curcpu()->ci_pflags & CPUPF_USERXSTATE) == 0);
wbinvd();
#ifdef HIBERNATE
ci->ci_idepth = 0;
ci->ci_handled_intr_level = IPL_NONE;
- ci->ci_flags &= ~CPUF_PRESENT;
+ atomic_clearbits_int(&ci->ci_flags, CPUF_PRESENT);
cpu_start_secondary(ci);
}
cpu_boot_secondary_processors();
-/* $OpenBSD: cpu.c,v 1.156 2022/04/26 08:35:30 claudio Exp $ */
+/* $OpenBSD: cpu.c,v 1.157 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
switch (caa->cpu_role) {
case CPU_ROLE_SP:
printf("(uniprocessor)\n");
- ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY;
+ atomic_setbits_int(&ci->ci_flags,
+ CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY);
cpu_intr_init(ci);
#ifndef SMALL_KERNEL
cpu_ucode_apply(ci);
case CPU_ROLE_BP:
printf("apid %d (boot processor)\n", caa->cpu_apicid);
- ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY;
+ atomic_setbits_int(&ci->ci_flags,
+ CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY);
cpu_intr_init(ci);
identifycpu(ci);
#ifdef MTRR
#endif /* NVMM > 0 */
#ifdef MULTIPROCESSOR
- ci->ci_flags |= CPUF_RUNNING;
+ atomic_setbits_int(&ci->ci_flags, CPUF_RUNNING);
/*
* Big hammer: flush all TLB entries, including ones from PTEs
* with the G bit set. This should only be necessary if TLB
int i;
u_long s;
- ci->ci_flags |= CPUF_AP;
+ atomic_setbits_int(&ci->ci_flags, CPUF_AP);
pmap_kenter_pa(MP_TRAMPOLINE, MP_TRAMPOLINE, PROT_READ | PROT_EXEC);
pmap_kenter_pa(MP_TRAMP_DATA, MP_TRAMP_DATA, PROT_READ | PROT_WRITE);
* off at this point.
*/
wbinvd();
- ci->ci_flags |= CPUF_PRESENT;
+ atomic_setbits_int(&ci->ci_flags, CPUF_PRESENT);
ci->ci_tsc_skew = 0; /* reset on resume */
tsc_sync_ap(ci);
-/* $OpenBSD: fpu.c,v 1.42 2020/11/30 02:56:42 jsg Exp $ */
+/* $OpenBSD: fpu.c,v 1.43 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: fpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*-
u_int32_t mxcsr, statbits;
u_int16_t cw;
- KASSERT(ci->ci_flags & CPUF_USERXSTATE);
- ci->ci_flags &= ~CPUF_USERXSTATE;
+ KASSERT(ci->ci_pflags & CPUPF_USERXSTATE);
+ ci->ci_pflags &= ~CPUPF_USERXSTATE;
fpusavereset(sfp);
if (type == T_XMM) {
struct cpu_info *ci = curcpu();
/* save curproc's FPU state if we haven't already */
- if (ci->ci_flags & CPUF_USERXSTATE) {
- ci->ci_flags &= ~CPUF_USERXSTATE;
+ if (ci->ci_pflags & CPUPF_USERXSTATE) {
+ ci->ci_pflags &= ~CPUPF_USERXSTATE;
fpusavereset(&curproc->p_addr->u_pcb.pcb_savefpu);
} else {
fpureset();
-# $OpenBSD: genassym.cf,v 1.42 2021/06/18 06:17:28 guenther Exp $
+# $OpenBSD: genassym.cf,v 1.43 2022/08/07 23:56:06 guenther Exp $
# Written by Artur Grabowski art@openbsd.org, Public Domain
include <sys/param.h>
endif
member CPU_INFO_GDT ci_gdt
member CPU_INFO_TSS ci_tss
-member CPU_INFO_FLAGS ci_flags
+member CPU_INFO_PFLAGS ci_pflags
member CPU_INFO_KERN_CR3 ci_kern_cr3
member CPU_INFO_USER_CR3 ci_user_cr3
member CPU_INFO_KERN_RSP ci_kern_rsp
member CPU_INFO_MDS_BUF ci_mds_buf
member CPU_INFO_MDS_TMP ci_mds_tmp
-export CPUF_USERSEGS
-export CPUF_USERXSTATE
+export CPUPF_USERSEGS
+export CPUPF_USERXSTATE
struct intrsource
member is_recurse
-/* $OpenBSD: identcpu.c,v 1.125 2022/07/12 04:46:00 jsg Exp $ */
+/* $OpenBSD: identcpu.c,v 1.126 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*
if (!strcmp(cpu_vendor, "GenuineIntel")) {
if ((ci->ci_family == 0x0f && ci->ci_model >= 0x03) ||
(ci->ci_family == 0x06 && ci->ci_model >= 0x0e)) {
- ci->ci_flags |= CPUF_CONST_TSC;
+ atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
}
} else if (!strcmp(cpu_vendor, "CentaurHauls")) {
/* VIA */
if (ci->ci_model >= 0x0f) {
- ci->ci_flags |= CPUF_CONST_TSC;
+ atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
}
} else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
if (cpu_apmi_edx & CPUIDEDX_ITSC) {
- /* Invariant TSC indicates constant TSC on
- * AMD.
- */
- ci->ci_flags |= CPUF_CONST_TSC;
+ /* Invariant TSC indicates constant TSC on AMD */
+ atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
}
}
/* Check if it's an invariant TSC */
if (cpu_apmi_edx & CPUIDEDX_ITSC)
- ci->ci_flags |= CPUF_INVAR_TSC;
+ atomic_setbits_int(&ci->ci_flags, CPUF_INVAR_TSC);
tsc_identify(ci);
}
-/* $OpenBSD: ipifuncs.c,v 1.36 2021/08/31 17:40:59 dv Exp $ */
+/* $OpenBSD: ipifuncs.c,v 1.37 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: ipifuncs.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*-
intr_disable();
lapic_disable();
wbinvd();
- ci->ci_flags &= ~CPUF_RUNNING;
+ atomic_clearbits_int(&ci->ci_flags, CPUF_RUNNING);
wbinvd();
for(;;) {
-/* $OpenBSD: locore.S,v 1.127 2021/12/31 10:40:30 jsg Exp $ */
+/* $OpenBSD: locore.S,v 1.128 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
* If the old proc ran in userspace then save the
* floating-point/"extended state" registers
*/
- testl $CPUF_USERXSTATE,CPUVAR(FLAGS)
+ testl $CPUPF_USERXSTATE,CPUVAR(PFLAGS)
jz .Lxstate_reset
movq %r13, %rdi
CODEPATCH_START
fxrstor64 (%rdi)
CODEPATCH_END(CPTAG_XRSTOR)
- andl $~CPUF_USERXSTATE,CPUVAR(FLAGS)
+ andl $~CPUPF_USERXSTATE,CPUVAR(PFLAGS)
.Lxstate_reset:
/*
* If the segment registers haven't been reset since the old proc
* ran in userspace then reset them now
*/
- testl $CPUF_USERSEGS,CPUVAR(FLAGS)
+ testl $CPUPF_USERSEGS,CPUVAR(PFLAGS)
jz restore_saved
- andl $~CPUF_USERSEGS,CPUVAR(FLAGS)
+ andl $~CPUPF_USERSEGS,CPUVAR(PFLAGS)
/* set %ds, %es, %fs, and %gs to expected value to prevent info leak */
movw $(GSEL(GUDATA_SEL, SEL_UPL)),%ax
jne intr_user_exit_post_ast
/* Restore FPU/"extended CPU state" if it's not already in the CPU */
- testl $CPUF_USERXSTATE,CPUVAR(FLAGS)
+ testl $CPUPF_USERXSTATE,CPUVAR(PFLAGS)
jz .Lsyscall_restore_xstate
/* Restore FS.base if it's not already in the CPU */
- testl $CPUF_USERSEGS,CPUVAR(FLAGS)
+ testl $CPUPF_USERSEGS,CPUVAR(PFLAGS)
jz .Lsyscall_restore_fsbase
.Lsyscall_restore_registers:
_ALIGN_TRAPS
/* in this case, need FS.base but not xstate, rarely happens */
.Lsyscall_restore_fsbase: /* CPU doesn't have curproc's FS.base */
- orl $CPUF_USERSEGS,CPUVAR(FLAGS)
+ orl $CPUPF_USERSEGS,CPUVAR(PFLAGS)
movq CPUVAR(CURPCB),%rdi
jmp .Lsyscall_restore_fsbase_real
_ALIGN_TRAPS
.Lsyscall_restore_xstate: /* CPU doesn't have curproc's xstate */
- orl $(CPUF_USERXSTATE|CPUF_USERSEGS),CPUVAR(FLAGS)
+ orl $(CPUPF_USERXSTATE|CPUPF_USERSEGS),CPUVAR(PFLAGS)
movq CPUVAR(CURPCB),%rdi
movq xsave_mask(%rip),%rdx
movl %edx,%eax
intr_user_exit_post_ast:
/* Restore FPU/"extended CPU state" if it's not already in the CPU */
- testl $CPUF_USERXSTATE,CPUVAR(FLAGS)
+ testl $CPUPF_USERXSTATE,CPUVAR(PFLAGS)
jz .Lintr_restore_xstate
#ifdef DIAGNOSTIC
#endif /* DIAGNOSTIC */
/* Restore FS.base if it's not already in the CPU */
- testl $CPUF_USERSEGS,CPUVAR(FLAGS)
+ testl $CPUPF_USERSEGS,CPUVAR(PFLAGS)
jz .Lintr_restore_fsbase
.Lintr_restore_registers:
.text
_ALIGN_TRAPS
.Lintr_restore_xstate: /* CPU doesn't have curproc's xstate */
- orl $CPUF_USERXSTATE,CPUVAR(FLAGS)
+ orl $CPUPF_USERXSTATE,CPUVAR(PFLAGS)
movq CPUVAR(CURPCB),%rdi
#if PCB_SAVEFPU != 0
addq $PCB_SAVEFPU,%rdi
testl %eax,%eax
jnz .Lintr_xrstor_faulted
.Lintr_restore_fsbase: /* CPU doesn't have curproc's FS.base */
- orl $CPUF_USERSEGS,CPUVAR(FLAGS)
+ orl $CPUPF_USERSEGS,CPUVAR(PFLAGS)
movq CPUVAR(CURPCB),%rdx
movq PCB_FSBASE(%rdx),%rdx
movl %edx,%eax
-/* $OpenBSD: machdep.c,v 1.278 2022/06/29 07:51:54 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.279 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
sss = (sizeof(ksc) + 15) & ~15;
/* Save FPU state to PCB if necessary, then copy it out */
- if (curcpu()->ci_flags & CPUF_USERXSTATE) {
- curcpu()->ci_flags &= ~CPUF_USERXSTATE;
+ if (curcpu()->ci_pflags & CPUPF_USERXSTATE) {
+ curcpu()->ci_pflags &= ~CPUPF_USERXSTATE;
fpusavereset(&p->p_addr->u_pcb.pcb_savefpu);
}
sp -= fpu_save_len;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
/* The reset state _is_ the userspace state for this thread now */
- curcpu()->ci_flags |= CPUF_USERXSTATE;
+ curcpu()->ci_pflags |= CPUPF_USERXSTATE;
return 0;
}
return (EINVAL);
/* Current state is obsolete; toss it and force a reload */
- if (curcpu()->ci_flags & CPUF_USERXSTATE) {
- curcpu()->ci_flags &= ~CPUF_USERXSTATE;
+ if (curcpu()->ci_pflags & CPUPF_USERXSTATE) {
+ curcpu()->ci_pflags &= ~CPUPF_USERXSTATE;
fpureset();
}
* This operates like the cpu_switchto() sequence: if we
* haven't reset %[defg]s already, do so now.
*/
- if (curcpu()->ci_flags & CPUF_USERSEGS) {
- curcpu()->ci_flags &= ~CPUF_USERSEGS;
+ if (curcpu()->ci_pflags & CPUPF_USERSEGS) {
+ curcpu()->ci_pflags &= ~CPUPF_USERSEGS;
__asm volatile(
"movw %%ax,%%ds\n\t"
"movw %%ax,%%es\n\t"
memcpy(&p->p_addr->u_pcb.pcb_savefpu,
&proc0.p_addr->u_pcb.pcb_savefpu, fpu_save_len);
- if (curcpu()->ci_flags & CPUF_USERXSTATE) {
+ if (curcpu()->ci_pflags & CPUPF_USERXSTATE) {
/* state in CPU is obsolete; reset it */
fpureset();
} else {
/* the reset state _is_ the userspace state now */
- curcpu()->ci_flags |= CPUF_USERXSTATE;
+ curcpu()->ci_pflags |= CPUPF_USERXSTATE;
}
/* To reset all registers we have to return via iretq */
-/* $OpenBSD: vm_machdep.c,v 1.45 2022/02/05 09:37:06 kettenis Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.46 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: vm_machdep.c,v 1.1 2003/04/26 18:39:33 fvdl Exp $ */
/*-
struct switchframe *sf;
/* Save the fpu h/w state to p1's pcb so that we can copy it. */
- if (p1 != &proc0 && (ci->ci_flags & CPUF_USERXSTATE))
+ if (p1 != &proc0 && (ci->ci_pflags & CPUPF_USERXSTATE))
fpusave(&pcb1->pcb_savefpu);
p2->p_md.md_flags = p1->p_md.md_flags;
-/* $OpenBSD: vmm.c,v 1.318 2022/07/12 04:52:38 jsg Exp $ */
+/* $OpenBSD: vmm.c,v 1.319 2022/08/07 23:56:06 guenther Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
}
}
- ci->ci_flags |= CPUF_VMM;
+ atomic_setbits_int(&ci->ci_flags, CPUF_VMM);
}
/*
lcr4(cr4);
}
- ci->ci_flags &= ~CPUF_VMM;
+ atomic_clearbits_int(&ci->ci_flags, CPUF_VMM);
}
/*
rw_assert_wrlock(&vcpu->vc_lock);
/* save vmm's FPU state if we haven't already */
- if (ci->ci_flags & CPUF_USERXSTATE) {
- ci->ci_flags &= ~CPUF_USERXSTATE;
+ if (ci->ci_pflags & CPUPF_USERXSTATE) {
+ ci->ci_pflags &= ~CPUPF_USERXSTATE;
fpusavereset(&curproc->p_addr->u_pcb.pcb_savefpu);
}
-/* $OpenBSD: cpu.h,v 1.145 2022/07/12 04:46:00 jsg Exp $ */
+/* $OpenBSD: cpu.h,v 1.146 2022/08/07 23:56:06 guenther Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
struct svm vcc_svm;
};
+/*
+ * Locks used to protect struct members in this file:
+ * I immutable after creation
+ * a atomic operations
+ * o owned (read/modified only) by this CPU
+ */
struct x86_64_tss;
struct cpu_info {
/*
* the part that is *not* visible begins, so don't put anything
* above it that must be kept hidden from userspace!
*/
- u_int64_t ci_kern_cr3; /* U+K page table */
- u_int64_t ci_scratch; /* for U<-->K transition */
+ u_int64_t ci_kern_cr3; /* [o] U+K page table */
+ u_int64_t ci_scratch; /* [o] for U<-->K transition */
#define ci_PAGEALIGN ci_dev
- struct device *ci_dev;
- struct cpu_info *ci_self;
+ struct device *ci_dev; /* [I] */
+ struct cpu_info *ci_self; /* [I] */
struct schedstate_percpu ci_schedstate; /* scheduler state */
- struct cpu_info *ci_next;
+ struct cpu_info *ci_next; /* [I] */
- struct proc *ci_curproc;
- u_int ci_cpuid;
- u_int ci_apicid;
- u_int ci_acpi_proc_id;
- u_int32_t ci_randseed;
+ struct proc *ci_curproc; /* [o] */
+ u_int ci_cpuid; /* [I] */
+ u_int ci_apicid; /* [I] */
+ u_int ci_acpi_proc_id; /* [I] */
+ u_int32_t ci_randseed; /* [o] */
- u_int64_t ci_kern_rsp; /* kernel-only stack */
- u_int64_t ci_intr_rsp; /* U<-->K trampoline stack */
- u_int64_t ci_user_cr3; /* U-K page table */
+ u_int64_t ci_kern_rsp; /* [o] kernel-only stack */
+ u_int64_t ci_intr_rsp; /* [o] U<-->K trampoline stack */
+ u_int64_t ci_user_cr3; /* [o] U-K page table */
/* bits for mitigating Micro-architectural Data Sampling */
- char ci_mds_tmp[32]; /* 32byte aligned */
- void *ci_mds_buf;
+ char ci_mds_tmp[32]; /* [o] 32byte aligned */
+ void *ci_mds_buf; /* [I] */
struct pmap *ci_proc_pmap; /* last userspace pmap */
- struct pcb *ci_curpcb;
- struct pcb *ci_idle_pcb;
+ struct pcb *ci_curpcb; /* [o] */
+ struct pcb *ci_idle_pcb; /* [o] */
+
+ u_int ci_pflags; /* [o] */
+#define CPUPF_USERSEGS 0x01 /* CPU has curproc's segs and FS.base */
+#define CPUPF_USERXSTATE 0x02 /* CPU has curproc's xsave state */
struct intrsource *ci_isources[MAX_INTR_SOURCES];
u_int64_t ci_ipending;
int ci_mutex_level;
#endif
- volatile u_int ci_flags;
- u_int32_t ci_ipis;
-
- u_int32_t ci_feature_flags;
- u_int32_t ci_feature_eflags;
- u_int32_t ci_feature_sefflags_ebx;
- u_int32_t ci_feature_sefflags_ecx;
- u_int32_t ci_feature_sefflags_edx;
- u_int32_t ci_feature_amdspec_ebx;
- u_int32_t ci_feature_tpmflags;
- u_int32_t ci_pnfeatset;
- u_int32_t ci_efeature_eax;
- u_int32_t ci_efeature_ecx;
- u_int32_t ci_brand[12];
- u_int32_t ci_signature;
- u_int32_t ci_family;
- u_int32_t ci_model;
- u_int32_t ci_cflushsz;
-
- int ci_inatomic;
+ volatile u_int ci_flags; /* [a] */
+ u_int32_t ci_ipis; /* [a] */
+
+ u_int32_t ci_feature_flags; /* [I] */
+ u_int32_t ci_feature_eflags; /* [I] */
+ u_int32_t ci_feature_sefflags_ebx;/* [I] */
+ u_int32_t ci_feature_sefflags_ecx;/* [I] */
+ u_int32_t ci_feature_sefflags_edx;/* [I] */
+ u_int32_t ci_feature_amdspec_ebx; /* [I] */
+ u_int32_t ci_feature_tpmflags; /* [I] */
+ u_int32_t ci_pnfeatset; /* [I] */
+ u_int32_t ci_efeature_eax; /* [I] */
+ u_int32_t ci_efeature_ecx; /* [I] */
+ u_int32_t ci_brand[12]; /* [I] */
+ u_int32_t ci_signature; /* [I] */
+ u_int32_t ci_family; /* [I] */
+ u_int32_t ci_model; /* [I] */
+ u_int32_t ci_cflushsz; /* [I] */
+
+ int ci_inatomic; /* [o] */
#define __HAVE_CPU_TOPOLOGY
- u_int32_t ci_smt_id;
- u_int32_t ci_core_id;
- u_int32_t ci_pkg_id;
+ u_int32_t ci_smt_id; /* [I] */
+ u_int32_t ci_core_id; /* [I] */
+ u_int32_t ci_pkg_id; /* [I] */
- struct cpu_functions *ci_func;
- void (*cpu_setup)(struct cpu_info *);
- void (*ci_info)(struct cpu_info *);
+ struct cpu_functions *ci_func; /* [I] */
+ void (*cpu_setup)(struct cpu_info *); /* [I] */
- struct device *ci_acpicpudev;
- volatile u_int ci_mwait;
+ struct device *ci_acpicpudev; /* [I] */
+ volatile u_int ci_mwait; /* [a] */
#define MWAIT_IN_IDLE 0x1 /* don't need IPI to wake */
#define MWAIT_KEEP_IDLING 0x2 /* cleared by other cpus to wake me */
#define MWAIT_ONLY 0x4 /* set if all idle states use mwait */
int ci_want_resched;
- struct x86_64_tss *ci_tss;
- void *ci_gdt;
+ struct x86_64_tss *ci_tss; /* [o] */
+ void *ci_gdt; /* [o] */
volatile int ci_ddb_paused;
#define CI_DDB_RUNNING 0
#define CPUF_IDENTIFIED 0x0020 /* CPU has been identified */
#define CPUF_CONST_TSC 0x0040 /* CPU has constant TSC */
-#define CPUF_USERSEGS 0x0080 /* CPU has curproc's segs and FS.base */
#define CPUF_INVAR_TSC 0x0100 /* CPU has invariant TSC */
-#define CPUF_USERXSTATE 0x0200 /* CPU has curproc's xsave state */
#define CPUF_SYNCTSC 0x0800 /* Synchronize TSC */
#define CPUF_PRESENT 0x1000 /* CPU is present */