the uvm_km_putpage is calling into tangly uvm guts again on not pmap direct.
authortedu <tedu@openbsd.org>
Thu, 15 Jul 2010 00:14:17 +0000 (00:14 +0000)
committertedu <tedu@openbsd.org>
Thu, 15 Jul 2010 00:14:17 +0000 (00:14 +0000)
go back to something more like the previous design, and have the thread do
the heavy lifting.  solves vmmaplk panics.
ok deraadt oga thib
[and even simple diffs are hard to get perfect. help from mdempsky and deraadt]

sys/uvm/uvm_km.c
sys/uvm/uvm_km.h

index 64b3f0b..aee736d 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_km.c,v 1.83 2010/07/02 23:12:38 thib Exp $        */
+/*     $OpenBSD: uvm_km.c,v 1.84 2010/07/15 00:14:17 tedu Exp $        */
 /*     $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $      */
 
 /* 
@@ -707,6 +707,7 @@ struct uvm_km_pages uvm_km_pages;
 
 void uvm_km_createthread(void *);
 void uvm_km_thread(void *);
+struct uvm_km_free_page *uvm_km_doputpage(struct uvm_km_free_page *);
 
 /*
  * Allocate the initial reserve, and create the thread which will
@@ -768,34 +769,47 @@ uvm_km_thread(void *arg)
 {
        vaddr_t pg[16];
        int i;
+       int allocmore = 0;
+       struct uvm_km_free_page *fp = NULL;
 
        for (;;) {
                mtx_enter(&uvm_km_pages.mtx);
-               if (uvm_km_pages.free >= uvm_km_pages.lowat) {
+               if (uvm_km_pages.free >= uvm_km_pages.lowat &&
+                   uvm_km_pages.freelist == NULL) {
                        msleep(&uvm_km_pages.km_proc, &uvm_km_pages.mtx,
                            PVM, "kmalloc", 0);
                }
+               allocmore = uvm_km_pages.free < uvm_km_pages.lowat;
+               fp = uvm_km_pages.freelist;
+               uvm_km_pages.freelist = NULL;
+               uvm_km_pages.freelistlen = 0;
                mtx_leave(&uvm_km_pages.mtx);
 
-               for (i = 0; i < nitems(pg); i++) {
-                       pg[i] = (vaddr_t)uvm_km_kmemalloc(kernel_map, NULL,
-                           PAGE_SIZE, UVM_KMF_VALLOC);
-               }
+               if (allocmore) {
+                       for (i = 0; i < nitems(pg); i++) {
+                               pg[i] = (vaddr_t)uvm_km_kmemalloc(kernel_map,
+                                   NULL, PAGE_SIZE, UVM_KMF_VALLOC);
+                       }
+       
+                       mtx_enter(&uvm_km_pages.mtx);
+                       for (i = 0; i < nitems(pg); i++) {
+                               if (uvm_km_pages.free ==
+                                   nitems(uvm_km_pages.page))
+                                       break;
+                               else
+                                       uvm_km_pages.page[uvm_km_pages.free++]
+                                           = pg[i];
+                       }
+                       wakeup(&uvm_km_pages.free);
+                       mtx_leave(&uvm_km_pages.mtx);
 
-               mtx_enter(&uvm_km_pages.mtx);
-               for (i = 0; i < nitems(pg); i++) {
-                       if (uvm_km_pages.free == nitems(uvm_km_pages.page))
-                               break;
-                       else
-                               uvm_km_pages.page[uvm_km_pages.free++] = pg[i];
+                       /* Cleanup left-over pages (if any). */
+                       for (; i < nitems(pg); i++)
+                               uvm_km_free(kernel_map, pg[i], PAGE_SIZE);
+               }
+               while (fp) {
+                       fp = uvm_km_doputpage(fp);
                }
-
-               wakeup(&uvm_km_pages.free);
-               mtx_leave(&uvm_km_pages.mtx);
-
-               /* Cleanup left-over pages (if any). */
-               for (; i < nitems(pg); i++)
-                       uvm_km_free(kernel_map, pg[i], PAGE_SIZE);
        }
 }
 #endif
@@ -860,14 +874,35 @@ uvm_km_getpage_pla(int flags, int *slowdown, paddr_t low, paddr_t high,
 void
 uvm_km_putpage(void *v)
 {
+#ifdef __HAVE_PMAP_DIRECT
        vaddr_t va = (vaddr_t)v;
        struct vm_page *pg;
 
-#ifdef __HAVE_PMAP_DIRECT
        pg = pmap_unmap_direct(va);
+
+       uvm_pagefree(pg);
 #else  /* !__HAVE_PMAP_DIRECT */
+       struct uvm_km_free_page *fp = v;
+
+       mtx_enter(&uvm_km_pages.mtx);
+       fp->next = uvm_km_pages.freelist;
+       uvm_km_pages.freelist = fp;
+       if (uvm_km_pages.freelistlen++ > 16)
+               wakeup(&uvm_km_pages.km_proc);
+       mtx_leave(&uvm_km_pages.mtx);
+#endif /* !__HAVE_PMAP_DIRECT */
+}
+
+#ifndef __HAVE_PMAP_DIRECT
+struct uvm_km_free_page *
+uvm_km_doputpage(struct uvm_km_free_page *fp)
+{
+       vaddr_t va = (vaddr_t)fp;
+       struct vm_page *pg;
        int     freeva = 1;
        paddr_t pa;
+       struct uvm_km_free_page *nextfp = fp->next;
+
        if (!pmap_extract(pmap_kernel(), va, &pa))
                panic("lost pa");
        pg = PHYS_TO_VM_PAGE(pa);
@@ -886,7 +921,8 @@ uvm_km_putpage(void *v)
 
        if (freeva)
                uvm_km_free(kernel_map, va, PAGE_SIZE);
-#endif /* !__HAVE_PMAP_DIRECT */
 
        uvm_pagefree(pg);
+       return (nextfp);
 }
+#endif /* !__HAVE_PMAP_DIRECT */
index b14f2e6..76a2ce9 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: uvm_km.h,v 1.10 2010/06/28 04:20:29 miod Exp $        */
+/*     $OpenBSD: uvm_km.h,v 1.11 2010/07/15 00:14:17 tedu Exp $        */
 /*     $NetBSD: uvm_km.h,v 1.9 1999/06/21 17:25:11 thorpej Exp $       */
 
 /*
@@ -58,6 +58,9 @@ void uvm_km_pgremove_intrsafe(vaddr_t, vaddr_t);
 #define UVM_KM_PAGES_LOWAT_MAX (2048)
 #define UVM_KM_PAGES_HIWAT_MAX (4 * UVM_KM_PAGES_LOWAT_MAX)
 
+struct uvm_km_free_page {
+       struct uvm_km_free_page *next;
+};
 struct uvm_km_pages {
        struct  mutex mtx;
 
@@ -69,6 +72,9 @@ struct uvm_km_pages {
        int     free;
        vaddr_t page[UVM_KM_PAGES_HIWAT_MAX];
 
+       struct uvm_km_free_page *freelist;
+       int freelistlen;
+
        struct  proc *km_proc;
 };