-/* $OpenBSD: uvm_map.c,v 1.302 2022/10/31 10:46:24 mpi Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.303 2022/11/04 09:36:44 mpi Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
vaddr_t stack_begin, stack_end; /* Position of stack. */
KASSERT(map->flags & VM_MAP_ISVMSPACE);
+ vm_map_assert_anylock(map);
+
vm = (struct vmspace *)map;
stack_begin = MIN((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
stack_end = MAX((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
if (addr + sz < addr)
return 0;
+ vm_map_assert_anylock(map);
+
/*
* Kernel memory above uvm_maxkaddr is considered unavailable.
*/
entry->guard = 0;
entry->fspace = 0;
+ vm_map_assert_wrlock(map);
+
/* Reset free space in first. */
free = uvm_map_uaddr_e(map, first);
uvm_mapent_free_remove(map, free, first);
uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
struct vm_map_entry **entry)
{
+ vm_map_assert_anylock(map);
+
*entry = uvm_map_entrybyaddr(&map->addr, address);
return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
(*entry)->start <= address && (*entry)->end > address;
vaddr_t end = addr + sz;
struct vm_map_entry *first, *iter, *prev = NULL;
+ vm_map_assert_anylock(map);
+
if (!uvm_map_lookup_entry(map, addr, &first)) {
printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n",
addr, end, map);
vaddr_t addr; /* Start of freed range. */
vaddr_t end; /* End of freed range. */
+ UVM_MAP_REQ_WRITE(map);
+
prev = *prev_ptr;
if (prev == entry)
*prev_ptr = prev = NULL;
if (start >= end)
return 0;
- if ((map->flags & VM_MAP_INTRSAFE) == 0)
- splassert(IPL_NONE);
- else
- splassert(IPL_VM);
+ vm_map_assert_wrlock(map);
/* Find first affected entry. */
entry = uvm_map_entrybyaddr(&map->addr, start);
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
+ vm_map_lock(map);
+
/* Remove address selectors. */
uvm_addr_destroy(map->uaddr_exe);
map->uaddr_exe = NULL;
entry = TAILQ_NEXT(entry, dfree.deadq);
}
+ vm_map_unlock(map);
+
#ifdef VMMAP_DEBUG
numt = numq = 0;
RBT_FOREACH(entry, uvm_map_addr, &map->addr)
{
struct vm_map_entry *entry;
+ vm_map_assert_anylock(map);
+
if (start < map->min_offset || end > map->max_offset || start > end)
return FALSE;
if (start == end)
*/
TAILQ_INIT(&dead);
uvm_tree_sanity(map, __FILE__, __LINE__);
+ vm_map_lock(map);
uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
TRUE, FALSE, FALSE);
+ vm_map_unlock(map);
pmap_destroy(map->pmap);
KASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
free(map, M_VMMAP, sizeof *map);
vaddr_t b_start, vaddr_t b_end, vaddr_t s_start, vaddr_t s_end, int flags)
{
KDASSERT(b_end >= b_start && s_end >= s_start);
+ vm_map_assert_wrlock(map);
/* Clear all free lists. */
uvm_map_freelist_update_clear(map, dead);
KDASSERT((entry != NULL && VMMAP_FREE_END(entry) == min) ||
min == map->min_offset);
+ UVM_MAP_REQ_WRITE(map);
+
/*
* During the function, entfree will always point at the uaddr state
* for entry.
wakeup(&map->flags);
}
+void
+vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
+{
+ LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, line));
+ if ((map->flags & VM_MAP_INTRSAFE) == 0)
+ rw_assert_anylock(&map->lock);
+ else
+ MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
+{
+ LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
+ if ((map->flags & VM_MAP_INTRSAFE) == 0) {
+ splassert(IPL_NONE);
+ rw_assert_wrlock(&map->lock);
+ } else
+ MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
#ifndef SMALL_KERNEL
int
uvm_map_fill_vmmap(struct vm_map *map, struct kinfo_vmentry *kve,
-/* $OpenBSD: uvm_map.h,v 1.79 2022/10/21 19:13:33 deraadt Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.80 2022/11/04 09:36:44 mpi Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
void vm_map_upgrade_ln(struct vm_map*, char*, int);
void vm_map_busy_ln(struct vm_map*, char*, int);
void vm_map_unbusy_ln(struct vm_map*, char*, int);
+void vm_map_assert_anylock_ln(struct vm_map*, char*, int);
+void vm_map_assert_wrlock_ln(struct vm_map*, char*, int);
#ifdef DIAGNOSTIC
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__)
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__)
#define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_anylock(map) \
+ vm_map_assert_anylock_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_wrlock(map) \
+ vm_map_assert_wrlock_ln(map, __FILE__, __LINE__)
#else
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0)
#define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0)
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0)
#define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0)
+#define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, NULL, 0)
+#define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, NULL, 0)
#endif
void uvm_map_lock_entry(struct vm_map_entry *);