-/* $OpenBSD: pmap7.c,v 1.62 2022/02/01 19:57:28 kettenis Exp $ */
+/* $OpenBSD: pmap7.c,v 1.63 2022/02/21 19:15:58 kettenis Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
/* Allocate a L1 page table */
for (;;) {
- va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
+ va = (vaddr_t)km_alloc(L1_TABLE_SIZE, &kv_any, &kp_none,
+ &kd_nowait);
if (va != 0)
break;
uvm_wait("alloc_l1_va");
uvm_pglistfree(&mlist);
/* free backing va */
- uvm_km_free(kernel_map, (vaddr_t)l1->l1_kva, L1_TABLE_SIZE);
+ km_free(l1->l1_kva, L1_TABLE_SIZE, &kv_any, &kp_none);
free(l1, M_VMPMAP, 0);
}
-/* $OpenBSD: vm_machdep.c,v 1.27 2021/05/16 06:20:28 jsg Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.28 2022/02/21 19:15:58 kettenis Exp $ */
/* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */
/*
sched_exit(p);
}
+struct kmem_va_mode kv_physwait = {
+ .kv_map = &phys_map,
+ .kv_wait = 1,
+};
+
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
vaddr_t faddr, taddr, off;
paddr_t fpa;
-
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
-
faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
off = (vaddr_t)bp->b_data - faddr;
len = round_page(off + len);
- taddr = uvm_km_valloc_wait(phys_map, len);
+ taddr = (vaddr_t)km_alloc(len, &kv_physwait, &kp_none, &kd_waitok);
bp->b_data = (caddr_t)(taddr + off);
-
/*
* The region is locked, so we expect that pmap_pte() will return
* non-NULL.
+ * XXX: unwise to expect this in a multithreaded environment.
+ * anything can happen to a pmap between the time we lock a
+ * region, release the pmap lock, and then relock it for
+ * the pmap_extract().
+ *
+ * no need to flush TLB since we expect nothing to be mapped
+ * where we we just allocated (TLB will be flushed when our
+ * mapping is removed).
*/
while (len) {
(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr, &fpa);
- pmap_enter(pmap_kernel(), taddr, fpa,
- PROT_READ | PROT_WRITE,
- PROT_READ | PROT_WRITE | PMAP_WIRED);
+ pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
len -= PAGE_SIZE;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
-
- /*
- * Make sure the cache does not have dirty data for the
- * pages we had mapped.
- */
addr = trunc_page((vaddr_t)bp->b_data);
off = (vaddr_t)bp->b_data - addr;
len = round_page(off + len);
-
- pmap_remove(pmap_kernel(), addr, addr + len);
+ pmap_kremove(addr, len);
pmap_update(pmap_kernel());
- uvm_km_free_wakeup(phys_map, addr, len);
+ km_free((void *)addr, len, &kv_physwait, &kp_none);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}