The contention on uvm_lock_fpageq() is now reduced by using per-CPU caches,
so we want to put pages on the cache and not give them back directly to the
allocator.
ok kettenis@
-/* $OpenBSD: uvm_amap.c,v 1.93 2024/04/16 08:53:02 mpi Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.94 2024/04/17 13:17:31 mpi Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
int slot;
struct vm_anon *anon;
struct vm_amap_chunk *chunk;
- struct pglist pgl;
KASSERT(rw_write_held(amap->am_lock));
KASSERT(amap->am_ref == 0);
return;
}
- TAILQ_INIT(&pgl);
amap_list_remove(amap);
AMAP_CHUNK_FOREACH(chunk, amap) {
*/
refs = --anon->an_ref;
if (refs == 0) {
- uvm_anfree_list(anon, &pgl);
+ uvm_anfree(anon);
}
}
}
- /* free the pages */
- uvm_pglistfree(&pgl);
/*
* Finally, destroy the amap.