From 2a71da199878e7fa35aa125dc803f5d67665f3a9 Mon Sep 17 00:00:00 2001 From: mpi Date: Tue, 5 Oct 2021 15:37:21 +0000 Subject: [PATCH] Unref/free amaps before grabbing the KERNEL_LOCK(). This is possible now that amaps & anons are protected by a per-map rwlock. Tested by many as part of a bigger diff. ok kettenis@ --- sys/uvm/uvm_map.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 3c4e8851f4b..e36c761b425 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.277 2021/06/17 16:10:39 mpi Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.278 2021/10/05 15:37:21 mpi Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -1570,9 +1570,15 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags) int waitok = flags & UVM_PLA_WAITOK; TAILQ_FOREACH_SAFE(entry, deadq, dfree.deadq, tmp) { + /* Drop reference to amap, if we've got one. */ + if (entry->aref.ar_amap) + amap_unref(entry->aref.ar_amap, + entry->aref.ar_pageoff, + atop(entry->end - entry->start), + flags & AMAP_REFALL); + /* Skip entries for which we have to grab the kernel lock. */ - if (entry->aref.ar_amap || UVM_ET_ISSUBMAP(entry) || - UVM_ET_ISOBJ(entry)) + if (UVM_ET_ISSUBMAP(entry) || UVM_ET_ISOBJ(entry)) continue; TAILQ_REMOVE(deadq, entry, dfree.deadq); @@ -1586,13 +1592,6 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags) while ((entry = TAILQ_FIRST(deadq)) != NULL) { if (waitok) uvm_pause(); - /* Drop reference to amap, if we've got one. */ - if (entry->aref.ar_amap) - amap_unref(entry->aref.ar_amap, - entry->aref.ar_pageoff, - atop(entry->end - entry->start), - flags & AMAP_REFALL); - /* Drop reference to our backing object, if we've got one. */ if (UVM_ET_ISSUBMAP(entry)) { /* ... unlikely to happen, but play it safe */ -- 2.20.1