Move uvm_exit() outside of the KERNEL_LOCK() in the reaper.
authormpi <mpi@openbsd.org>
Wed, 24 Jul 2024 12:17:31 +0000 (12:17 +0000)
committermpi <mpi@openbsd.org>
Wed, 24 Jul 2024 12:17:31 +0000 (12:17 +0000)
Use atomic operations to reference count VM spaces.

Tested by claudio@, bluhm@, sthen@, jca@

ok jca@, claudio@

sys/kern/kern_exit.c
sys/uvm/uvm_extern.h
sys/uvm/uvm_map.c

index 1e3347e..b4151c9 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: kern_exit.c,v 1.225 2024/07/22 08:18:53 claudio Exp $ */
+/*     $OpenBSD: kern_exit.c,v 1.226 2024/07/24 12:17:31 mpi Exp $     */
 /*     $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $  */
 
 /*
@@ -458,8 +458,6 @@ reaper(void *arg)
 
                WITNESS_THREAD_EXIT(p);
 
-               KERNEL_LOCK();
-
                /*
                 * Free the VM resources we're still holding on to.
                 * We must do this from a valid thread because doing
@@ -470,13 +468,16 @@ reaper(void *arg)
 
                if (p->p_flag & P_THREAD) {
                        /* Just a thread */
+                       KERNEL_LOCK();
                        proc_free(p);
+                       KERNEL_UNLOCK();
                } else {
                        struct process *pr = p->p_p;
 
                        /* Release the rest of the process's vmspace */
                        uvm_exit(pr);
 
+                       KERNEL_LOCK();
                        if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
                                /* Process is now a true zombie. */
                                atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
@@ -493,9 +494,8 @@ reaper(void *arg)
                                /* No one will wait for us, just zap it. */
                                process_zap(pr);
                        }
+                       KERNEL_UNLOCK();
                }
-
-               KERNEL_UNLOCK();
        }
 }
 
index 0b9a135..847eb35 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_extern.h,v 1.174 2024/04/02 08:39:17 deraadt Exp $        */
+/*     $OpenBSD: uvm_extern.h,v 1.175 2024/07/24 12:17:31 mpi Exp $    */
 /*     $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $      */
 
 /*
@@ -195,11 +195,12 @@ struct pmap;
  *  Locks used to protect struct members in this file:
  *     K       kernel lock
  *     I       immutable after creation
+ *     a       atomic operations
  *     v       vm_map's lock
  */
 struct vmspace {
        struct  vm_map vm_map;  /* VM address map */
-       int     vm_refcnt;      /* [K] number of references */
+       int     vm_refcnt;      /* [a] number of references */
        caddr_t vm_shm;         /* SYS5 shared memory private data XXX */
 /* we copy from vm_startcopy to the end of the structure on fork */
 #define vm_startcopy vm_rssize
index 5da14db..5fbcfe5 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_map.c,v 1.329 2024/06/02 15:31:57 deraadt Exp $   */
+/*     $OpenBSD: uvm_map.c,v 1.330 2024/07/24 12:17:31 mpi Exp $       */
 /*     $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
 
 /*
@@ -1346,7 +1346,6 @@ void
 uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
 {
        struct vm_map_entry *entry, *tmp;
-       int waitok = flags & UVM_PLA_WAITOK;
 
        TAILQ_FOREACH_SAFE(entry, deadq, dfree.deadq, tmp) {
                /* Drop reference to amap, if we've got one. */
@@ -1356,21 +1355,6 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
                            atop(entry->end - entry->start),
                            flags & AMAP_REFALL);
 
-               /* Skip entries for which we have to grab the kernel lock. */
-               if (UVM_ET_ISSUBMAP(entry) || UVM_ET_ISOBJ(entry))
-                       continue;
-
-               TAILQ_REMOVE(deadq, entry, dfree.deadq);
-               uvm_mapent_free(entry);
-       }
-
-       if (TAILQ_EMPTY(deadq))
-               return;
-
-       KERNEL_LOCK();
-       while ((entry = TAILQ_FIRST(deadq)) != NULL) {
-               if (waitok)
-                       uvm_pause();
                /* Drop reference to our backing object, if we've got one. */
                if (UVM_ET_ISSUBMAP(entry)) {
                        /* ... unlikely to happen, but play it safe */
@@ -1381,11 +1365,9 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
                            entry->object.uvm_obj);
                }
 
-               /* Step to next. */
                TAILQ_REMOVE(deadq, entry, dfree.deadq);
                uvm_mapent_free(entry);
        }
-       KERNEL_UNLOCK();
 }
 
 void
@@ -2476,10 +2458,6 @@ uvm_map_teardown(struct vm_map *map)
 #endif
        int                      i;
 
-       KERNEL_ASSERT_LOCKED();
-       KERNEL_UNLOCK();
-       KERNEL_ASSERT_UNLOCKED();
-
        KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
 
        vm_map_lock(map);
@@ -2535,9 +2513,7 @@ uvm_map_teardown(struct vm_map *map)
                numq++;
        KASSERT(numt == numq);
 #endif
-       uvm_unmap_detach(&dead_entries, UVM_PLA_WAITOK);
-
-       KERNEL_LOCK();
+       uvm_unmap_detach(&dead_entries, 0);
 
        pmap_destroy(map->pmap);
        map->pmap = NULL;
@@ -3417,10 +3393,8 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
 void
 uvmspace_addref(struct vmspace *vm)
 {
-       KERNEL_ASSERT_LOCKED();
        KASSERT(vm->vm_refcnt > 0);
-
-       vm->vm_refcnt++;
+       atomic_inc_int(&vm->vm_refcnt);
 }
 
 /*
@@ -3429,9 +3403,7 @@ uvmspace_addref(struct vmspace *vm)
 void
 uvmspace_free(struct vmspace *vm)
 {
-       KERNEL_ASSERT_LOCKED();
-
-       if (--vm->vm_refcnt == 0) {
+       if (atomic_dec_int_nv(&vm->vm_refcnt) == 0) {
                /*
                 * lock the map, to wait out all other references to it.  delete
                 * all of the mappings and pages they hold, then call the pmap
@@ -3439,8 +3411,11 @@ uvmspace_free(struct vmspace *vm)
                 */
 #ifdef SYSVSHM
                /* Get rid of any SYSV shared memory segments. */
-               if (vm->vm_shm != NULL)
+               if (vm->vm_shm != NULL) {
+                       KERNEL_LOCK();
                        shmexit(vm);
+                       KERNEL_UNLOCK();
+               }
 #endif
 
                uvm_map_teardown(&vm->vm_map);