descriptor (pted) pool in the [riscv64] pmap implementation. This
significantly reduces the side-effects of lock contention on the kernel
map lock that is (incorrectly) translated into excessive page daemon
wakeups. This is not a perfect solution but it does lead to significant
speedups [on the Hifive Unmatched]
Improvement and commit message adapted from kettenis' rev 1.110 commit
to arm64/pmap.c. ok phessler@ kettenis@
-/* $OpenBSD: pmap.h,v 1.9 2023/12/11 22:12:53 kettenis Exp $ */
+/* $OpenBSD: pmap.h,v 1.10 2023/12/13 18:26:41 jca Exp $ */
/*
* Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
struct pv_entry;
/* investigate */
-#define pmap_init_percpu() do { /* nothing */ } while (0)
#define pmap_unuse_final(p) do { /* nothing */ } while (0)
int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t);
void pmap_postinit(void);
+void pmap_init_percpu(void);
#endif /* _KERNEL && !_LOCORE */
-/* $OpenBSD: pmap.c,v 1.36 2023/11/28 09:10:18 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.37 2023/12/13 18:26:41 jca Exp $ */
/*
* Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
npages * PAGE_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
}
+void
+pmap_init_percpu(void)
+{
+ pool_cache_init(&pmap_pted_pool);
+ pool_cache_init(&pmap_vp_pool);
+}
+
void
pmap_update(pmap_t pm)
{