-/* $OpenBSD: uvm_km.c,v 1.83 2010/07/02 23:12:38 thib Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.84 2010/07/15 00:14:17 tedu Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
void uvm_km_createthread(void *);
void uvm_km_thread(void *);
+struct uvm_km_free_page *uvm_km_doputpage(struct uvm_km_free_page *);
/*
* Allocate the initial reserve, and create the thread which will
{
vaddr_t pg[16];
int i;
+ int allocmore = 0;
+ struct uvm_km_free_page *fp = NULL;
for (;;) {
mtx_enter(&uvm_km_pages.mtx);
- if (uvm_km_pages.free >= uvm_km_pages.lowat) {
+ if (uvm_km_pages.free >= uvm_km_pages.lowat &&
+ uvm_km_pages.freelist == NULL) {
msleep(&uvm_km_pages.km_proc, &uvm_km_pages.mtx,
PVM, "kmalloc", 0);
}
+ allocmore = uvm_km_pages.free < uvm_km_pages.lowat;
+ fp = uvm_km_pages.freelist;
+ uvm_km_pages.freelist = NULL;
+ uvm_km_pages.freelistlen = 0;
mtx_leave(&uvm_km_pages.mtx);
- for (i = 0; i < nitems(pg); i++) {
- pg[i] = (vaddr_t)uvm_km_kmemalloc(kernel_map, NULL,
- PAGE_SIZE, UVM_KMF_VALLOC);
- }
+ if (allocmore) {
+ for (i = 0; i < nitems(pg); i++) {
+ pg[i] = (vaddr_t)uvm_km_kmemalloc(kernel_map,
+ NULL, PAGE_SIZE, UVM_KMF_VALLOC);
+ }
+
+ mtx_enter(&uvm_km_pages.mtx);
+ for (i = 0; i < nitems(pg); i++) {
+ if (uvm_km_pages.free ==
+ nitems(uvm_km_pages.page))
+ break;
+ else
+ uvm_km_pages.page[uvm_km_pages.free++]
+ = pg[i];
+ }
+ wakeup(&uvm_km_pages.free);
+ mtx_leave(&uvm_km_pages.mtx);
- mtx_enter(&uvm_km_pages.mtx);
- for (i = 0; i < nitems(pg); i++) {
- if (uvm_km_pages.free == nitems(uvm_km_pages.page))
- break;
- else
- uvm_km_pages.page[uvm_km_pages.free++] = pg[i];
+ /* Cleanup left-over pages (if any). */
+ for (; i < nitems(pg); i++)
+ uvm_km_free(kernel_map, pg[i], PAGE_SIZE);
+ }
+ while (fp) {
+ fp = uvm_km_doputpage(fp);
}
-
- wakeup(&uvm_km_pages.free);
- mtx_leave(&uvm_km_pages.mtx);
-
- /* Cleanup left-over pages (if any). */
- for (; i < nitems(pg); i++)
- uvm_km_free(kernel_map, pg[i], PAGE_SIZE);
}
}
#endif
void
uvm_km_putpage(void *v)
{
+#ifdef __HAVE_PMAP_DIRECT
vaddr_t va = (vaddr_t)v;
struct vm_page *pg;
-#ifdef __HAVE_PMAP_DIRECT
pg = pmap_unmap_direct(va);
+
+ uvm_pagefree(pg);
#else /* !__HAVE_PMAP_DIRECT */
+ struct uvm_km_free_page *fp = v;
+
+ mtx_enter(&uvm_km_pages.mtx);
+ fp->next = uvm_km_pages.freelist;
+ uvm_km_pages.freelist = fp;
+ if (uvm_km_pages.freelistlen++ > 16)
+ wakeup(&uvm_km_pages.km_proc);
+ mtx_leave(&uvm_km_pages.mtx);
+#endif /* !__HAVE_PMAP_DIRECT */
+}
+
+#ifndef __HAVE_PMAP_DIRECT
+struct uvm_km_free_page *
+uvm_km_doputpage(struct uvm_km_free_page *fp)
+{
+ vaddr_t va = (vaddr_t)fp;
+ struct vm_page *pg;
int freeva = 1;
paddr_t pa;
+ struct uvm_km_free_page *nextfp = fp->next;
+
if (!pmap_extract(pmap_kernel(), va, &pa))
panic("lost pa");
pg = PHYS_TO_VM_PAGE(pa);
if (freeva)
uvm_km_free(kernel_map, va, PAGE_SIZE);
-#endif /* !__HAVE_PMAP_DIRECT */
uvm_pagefree(pg);
+ return (nextfp);
}
+#endif /* !__HAVE_PMAP_DIRECT */