-/* $OpenBSD: uvm_extern.h,v 1.161 2022/05/14 15:25:57 kettenis Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.162 2022/06/07 12:07:45 kettenis Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
vaddr_t uvm_km_alloc1(vm_map_t, vsize_t, vsize_t, boolean_t);
void uvm_km_free(vm_map_t, vaddr_t, vsize_t);
-void uvm_km_free_wakeup(vm_map_t, vaddr_t, vsize_t);
vaddr_t uvm_km_kmemalloc_pla(struct vm_map *,
struct uvm_object *, vsize_t, vsize_t, int,
paddr_t, paddr_t, paddr_t, paddr_t, int);
#define uvm_km_kmemalloc(map, obj, sz, flags) \
uvm_km_kmemalloc_pla(map, obj, sz, 0, flags, 0, (paddr_t)-1, 0, 0, 0)
-vaddr_t uvm_km_valloc_prefer_wait(vm_map_t, vsize_t, voff_t);
struct vm_map *uvm_km_suballoc(vm_map_t, vaddr_t *, vaddr_t *,
vsize_t, int, boolean_t, vm_map_t);
/*
-/* $OpenBSD: uvm_km.c,v 1.149 2022/05/14 15:25:57 kettenis Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.150 2022/06/07 12:07:45 kettenis Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
uvm_unmap(map, trunc_page(addr), round_page(addr+size));
}
-/*
- * uvm_km_free_wakeup: free an area of kernel memory and wake up
- * anyone waiting for vm space.
- *
- * => XXX: "wanted" bit + unlock&wait on other end?
- */
-void
-uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, vsize_t size)
-{
- struct uvm_map_deadq dead_entries;
-
- vm_map_lock(map);
- TAILQ_INIT(&dead_entries);
- uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size),
- &dead_entries, FALSE, TRUE);
- wakeup(map);
- vm_map_unlock(map);
-
- uvm_unmap_detach(&dead_entries, 0);
-}
-
/*
* uvm_km_alloc1: allocate wired down memory in the kernel map.
*
return kva;
}
-/*
- * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
- *
- * => memory is not allocated until fault time
- * => if no room in map, wait for space to free, unless requested size
- * is larger than map (in which case we return 0)
- */
-vaddr_t
-uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
-{
- vaddr_t kva;
-
- KASSERT(vm_map_pmap(map) == pmap_kernel());
-
- size = round_page(size);
- if (size > vm_map_max(map) - vm_map_min(map))
- return 0;
-
- while (1) {
- kva = vm_map_min(map); /* hint */
-
- /*
- * allocate some virtual space. will be demand filled
- * by kernel_object.
- */
- if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
- prefer, 0,
- UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
- MAP_INHERIT_NONE, MADV_RANDOM, 0)) == 0)) {
- return kva;
- }
-
- /* failed. sleep for a while (on map) */
- tsleep_nsec(map, PVM, "vallocwait", INFSLP);
- }
- /*NOTREACHED*/
-}
-
#if defined(__HAVE_PMAP_DIRECT)
/*
* uvm_km_page allocator, __HAVE_PMAP_DIRECT arch