From: claudio Date: Thu, 14 Dec 2023 11:58:09 +0000 (+0000) Subject: Bring default logic to set nkmempages into the 21st century. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=ca803f33a3107b49c2f500d66d23348002892cfb;p=openbsd Bring default logic to set nkmempages into the 21st century. The new logic is: Up to 1G physmem use physical memory / 4, above 1G add an extra 16MB per 1G of memory. Clamp it down depending on available kernel virtual address space - up and including 512M -> 64MB (macppc, arm, sh) - between 512M and 1024M -> 128MB (hppa, i386, mips, luna88k) - over 1024M clamping to VM_KERNEL_SPACE_SIZE / 4 The result is much more malloc(9) space on 64bit archs with lots of memory and large kva space. Note: amd64 only has 4G of kva and therefor nkmempages is limited to 262144 As a side-effect NKMEMPAGES_MAX and nkmempages_max are no longer used. Tested and OK miod@ --- diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index 104a6bda605..4c1446bb626 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_malloc.c,v 1.149 2023/11/29 11:47:15 claudio Exp $ */ +/* $OpenBSD: kern_malloc.c,v 1.150 2023/12/14 11:58:09 claudio Exp $ */ /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ /* @@ -87,15 +87,6 @@ struct vm_map *kmem_map = NULL; #endif u_int nkmempages = NKMEMPAGES; -/* - * Defaults for upper-bounds for the kmem_map page count. - * Can be overridden by kernel config options. - */ -#ifndef NKMEMPAGES_MAX -#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT -#endif -u_int nkmempages_max = NKMEMPAGES_MAX; - struct mutex malloc_mtx = MUTEX_INITIALIZER(IPL_VM); struct kmembuckets bucket[MINBUCKET + 16]; #ifdef KMEMSTATS @@ -515,16 +506,26 @@ kmeminit_nkmempages(void) /* * We use the following (simple) formula: * - * - Starting point is physical memory / 4. - * - * - Clamp it down to nkmempages_max. + * Up to 1G physmem use physical memory / 4, + * above 1G add an extra 16MB per 1G of memory. * - * - Round it up to nkmempages_min. + * Clamp it down depending on VM_KERNEL_SPACE_SIZE + * - up and including 512M -> 64MB + * - between 512M and 1024M -> 128MB + * - over 1024M clamping to VM_KERNEL_SPACE_SIZE / 4 */ - npages = physmem / 4; - - if (npages > nkmempages_max) - npages = nkmempages_max; + npages = MIN(physmem, atop(1024 * 1024 * 1024)) / 4; + if (physmem > atop(1024 * 1024 * 1024)) + npages += (physmem - atop(1024 * 1024 * 1024)) / 64; + + if (VM_KERNEL_SPACE_SIZE <= 512 * 1024 * 1024) { + if (npages > atop(64 * 1024 * 1024)) + npages = atop(64 * 1024 * 1024); + } else if (VM_KERNEL_SPACE_SIZE <= 1024 * 1024 * 1024) { + if (npages > atop(128 * 1024 * 1024)) + npages = atop(128 * 1024 * 1024); + } else if (npages > atop(VM_KERNEL_SPACE_SIZE) / 4) + npages = atop(VM_KERNEL_SPACE_SIZE) / 4; nkmempages = npages; } @@ -573,7 +574,8 @@ kmeminit(void) bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; } for (indx = 0; indx < M_LAST; indx++) - kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10; + kmemstats[indx].ks_limit = + (long)nkmempages * PAGE_SIZE * 6 / 10; #endif }