Remove a micro optimization to free pages in batch in amap_wipeout().
authormpi <mpi@openbsd.org>
Wed, 17 Apr 2024 13:17:31 +0000 (13:17 +0000)
committermpi <mpi@openbsd.org>
Wed, 17 Apr 2024 13:17:31 +0000 (13:17 +0000)
The contention on uvm_lock_fpageq() is now reduced by using per-CPU caches,
so we want to put pages on the cache and not give them back directly to the
allocator.

ok kettenis@

sys/uvm/uvm_amap.c

index fb85a3c..1d1a5f7 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_amap.c,v 1.93 2024/04/16 08:53:02 mpi Exp $       */
+/*     $OpenBSD: uvm_amap.c,v 1.94 2024/04/17 13:17:31 mpi Exp $       */
 /*     $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $        */
 
 /*
@@ -482,7 +482,6 @@ amap_wipeout(struct vm_amap *amap)
        int slot;
        struct vm_anon *anon;
        struct vm_amap_chunk *chunk;
-       struct pglist pgl;
 
        KASSERT(rw_write_held(amap->am_lock));
        KASSERT(amap->am_ref == 0);
@@ -495,7 +494,6 @@ amap_wipeout(struct vm_amap *amap)
                return;
        }
 
-       TAILQ_INIT(&pgl);
        amap_list_remove(amap);
 
        AMAP_CHUNK_FOREACH(chunk, amap) {
@@ -515,12 +513,10 @@ amap_wipeout(struct vm_amap *amap)
                         */
                        refs = --anon->an_ref;
                        if (refs == 0) {
-                               uvm_anfree_list(anon, &pgl);
+                               uvm_anfree(anon);
                        }
                }
        }
-       /* free the pages */
-       uvm_pglistfree(&pgl);
 
        /*
         * Finally, destroy the amap.