-/* $OpenBSD: locore.S,v 1.124 2021/06/01 21:12:11 guenther Exp $ */
+/* $OpenBSD: locore.S,v 1.125 2021/06/18 06:17:28 guenther Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
RETGUARD_SETUP_OFF(cpu_switchto, r11, 6*8)
/* don't switch cr3 to the same thing it already was */
- movq %cr3,%rax
- cmpq PCB_CR3(%r13),%rax
- movq PCB_CR3(%r13),%rax /* flags from cmpq unchanged */
+ movq PCB_CR3(%r13),%rax
+ movq %cr3,%rdi
+ xorq %rax,%rdi
+ btrq $63,%rdi /* ignore CR3_REUSE_PCID */
+ testq %rdi,%rdi
jz .Lsame_cr3
- /* set the new pmap's bit for the cpu */
- testq %rbx,%rbx
- jz .Lno_new_pmap
- lock
- btsq %r9,PM_CPUS(%rbx)
#ifdef DIAGNOSTIC
- jc _C_LABEL(switch_pmcpu_set)
+ /* verify ci_proc_pmap had been updated properly */
+ cmpq %rcx,CPUVAR(PROC_PMAP)
+ jnz .Lbogus_proc_pmap
#endif
-.Lno_new_pmap:
+ /* record which pmap this CPU should get IPIs for */
+ movq %rbx,CPUVAR(PROC_PMAP)
+.Lset_cr3:
movq %rax,%cr3 /* %rax used below too */
- /* clear the old pmap's bit for the cpu */
- testq %rcx,%rcx
- jz .Lno_old_pmap
- lock
- btrq %r9,PM_CPUS(%rcx)
-.Lno_old_pmap:
-
.Lsame_cr3:
/*
* If we switched from a userland thread with a shallow call stack
popq %rbx
RETGUARD_CHECK(cpu_switchto, r11)
ret
+
+#ifdef DIAGNOSTIC
+ .globl _C_LABEL(panic)
+.Lbogus_proc_pmap:
+ leaq bogus_proc_pmap,%rdi
+ call _C_LABEL(panic)
+ int3 /* NOTREACHED */
+ .pushsection .rodata
+bogus_proc_pmap:
+ .asciz "curcpu->ci_proc_pmap didn't point to previous pmap"
+ .popsection
+#endif /* DIAGNOSTIC */
END(cpu_switchto)
ENTRY(cpu_idle_enter)
ret
END(cpu_idle_cycle)
- .globl _C_LABEL(panic)
-
-#ifdef DIAGNOSTIC
-NENTRY(switch_pmcpu_set)
- leaq switch_active(%rip),%rdi
- call _C_LABEL(panic)
-END(switch_pmcpu_set)
- /* NOTREACHED */
-
- .section .rodata
-switch_active:
- .asciz "activate already active pmap"
- .text
-#endif /* DIAGNOSTIC */
/*
* savectx(struct pcb *pcb);
* Update pcb, saving current processor state.
-/* $OpenBSD: pmap.c,v 1.144 2021/06/16 09:02:21 mpi Exp $ */
+/* $OpenBSD: pmap.c,v 1.145 2021/06/18 06:17:28 guenther Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
vaddr_t, struct pg_to_free *);
void pmap_freepage(struct pmap *, struct vm_page *, int, struct pg_to_free *);
#ifdef MULTIPROCESSOR
-static int pmap_is_active(struct pmap *, int);
+static int pmap_is_active(struct pmap *, struct cpu_info *);
#endif
paddr_t pmap_map_ptes(struct pmap *);
struct pv_entry *pmap_remove_pv(struct vm_page *, struct pmap *, vaddr_t);
* of course the kernel is always loaded
*/
-static __inline int
+static inline int
pmap_is_curpmap(struct pmap *pmap)
{
return((pmap == pmap_kernel()) ||
*/
#ifdef MULTIPROCESSOR
-static __inline int
-pmap_is_active(struct pmap *pmap, int cpu_id)
+static inline int
+pmap_is_active(struct pmap *pmap, struct cpu_info *ci)
{
- return (pmap == pmap_kernel() ||
- (pmap->pm_cpus & (1ULL << cpu_id)) != 0);
+ return pmap == pmap_kernel() || pmap == ci->ci_proc_pmap;
}
#endif
-static __inline u_int
+static inline u_int
pmap_pte2flags(u_long pte)
{
return (((pte & PG_U) ? PG_PMAP_REF : 0) |
}
pmap->pm_stats.wired_count = 0;
pmap->pm_stats.resident_count = 1; /* count the PDP allocd below */
- pmap->pm_cpus = 0;
pmap->pm_type = PMAP_TYPE_NORMAL;
/* allocate PDP */
return;
}
- /*
- * reference count is zero, free pmap resources and then free pmap.
- */
-
-#ifdef DIAGNOSTIC
- if (__predict_false(pmap->pm_cpus != 0))
- printf("%s: pmap %p cpus=0x%llx\n", __func__,
- (void *)pmap, pmap->pm_cpus);
-#endif
-
/*
* remove it from global list of pmaps
*/
pcb->pcb_cr3 |= (pmap != pmap_kernel()) ? cr3_pcid_proc :
(PCID_KERN | cr3_reuse_pcid);
- if (p == curproc) {
- lcr3(pcb->pcb_cr3);
+ if (p != curproc)
+ return;
+
+ if ((p->p_flag & P_SYSTEM) == 0) {
+ struct cpu_info *self = curcpu();
+
+ /* mark the pmap in use by this processor */
+ self->ci_proc_pmap = pmap;
/* in case we return to userspace without context switching */
if (cpu_meltdown) {
- struct cpu_info *self = curcpu();
-
self->ci_kern_cr3 = pcb->pcb_cr3 | cr3_reuse_pcid;
self->ci_user_cr3 = pmap->pm_pdirpa_intel |
cr3_pcid_proc_intel;
}
-
- /*
- * mark the pmap in use by this processor.
- */
- x86_atomic_setbits_u64(&pmap->pm_cpus, (1ULL << cpu_number()));
}
+
+ lcr3(pcb->pcb_cr3);
}
/*
void
pmap_deactivate(struct proc *p)
{
- struct pmap *pmap = p->p_vmspace->vm_map.pmap;
+ if ((p->p_flag & P_SYSTEM) == 0) {
+ struct cpu_info *self = curcpu();
- /*
- * mark the pmap no longer in use by this processor.
- */
- x86_atomic_clearbits_u64(&pmap->pm_cpus, (1ULL << cpu_number()));
+ /*
+ * mark the pmap no longer in use by this processor.
+ */
+ KASSERT(self->ci_proc_pmap == p->p_vmspace->vm_map.pmap);
+ self->ci_proc_pmap = NULL;
+ }
}
/*
CPU_INFO_FOREACH(cii, ci) {
if (ci == self || !(ci->ci_flags & CPUF_RUNNING))
continue;
- if (!is_kva && !pmap_is_active(pm, ci->ci_cpuid))
+ if (!is_kva && !pmap_is_active(pm, ci))
continue;
mask |= (1ULL << ci->ci_cpuid);
wait++;
CPU_INFO_FOREACH(cii, ci) {
if (ci == self || !(ci->ci_flags & CPUF_RUNNING))
continue;
- if (!is_kva && !pmap_is_active(pm, ci->ci_cpuid))
+ if (!is_kva && !pmap_is_active(pm, ci))
continue;
mask |= (1ULL << ci->ci_cpuid);
wait++;
KASSERT(pm != pmap_kernel());
CPU_INFO_FOREACH(cii, ci) {
- if (ci == self || !pmap_is_active(pm, ci->ci_cpuid) ||
+ if (ci == self || !pmap_is_active(pm, ci) ||
!(ci->ci_flags & CPUF_RUNNING))
continue;
mask |= (1ULL << ci->ci_cpuid);