-/* $OpenBSD: uvm_map.c,v 1.284 2022/02/10 10:15:35 kn Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.285 2022/02/11 09:25:04 kn Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
* Fills in *start_ptr and *end_ptr to be the first and last entry describing
* the space.
* If called with prefilled *start_ptr and *end_ptr, they are to be correct.
- *
- * map must be at least read-locked.
*/
int
uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr,
struct uvm_map_addr *atree;
struct vm_map_entry *i, *i_end;
- vm_map_assert_anylock(map);
-
if (addr + sz < addr)
return 0;
/*
* Invoke each address selector until an address is found.
* Will not invoke uaddr_exe.
- *
- * => caller must at least have read-locked map
*/
int
uvm_map_findspace(struct vm_map *map, struct vm_map_entry**first,
struct uvm_addr_state *uaddr;
int i;
- vm_map_assert_anylock(map);
-
/*
* Allocation for sz bytes at any address,
* using the addr selectors in order.
* uvm_map: establish a valid mapping in map
*
* => *addr and sz must be a multiple of PAGE_SIZE.
- * => map must be unlocked (we will lock it)
+ * => map must be unlocked.
* => <uobj,uoffset> value meanings (4 cases):
* [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
* [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
struct vm_map_entry **entry)
{
- vm_map_assert_anylock(map);
-
*entry = uvm_map_entrybyaddr(&map->addr, address);
return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
(*entry)->start <= address && (*entry)->end > address;
* If remove_holes, then remove ET_HOLE entries as well.
* If markfree, entry will be properly marked free, otherwise, no replacement
* entry will be put in the tree (corrupting the tree).
- *
- * => map must be locked by caller
*/
void
uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
{
struct vm_map_entry *prev_hint, *next, *entry;
- vm_map_assert_wrlock(map);
-
start = MAX(start, map->min_offset);
end = MIN(end, map->max_offset);
if (start >= end)
{
struct vm_map_entry *iter;
- vm_map_assert_wrlock(map);
-
for (iter = first; iter != end;
iter = RBT_NEXT(uvm_map_addr, iter)) {
KDASSERT(iter->start >= start_addr && iter->end <= end_addr);
/*
* uvm_map_pageable: set pageability of a range in a map.
*
- * => map must never be read-locked
- * => if lockflags has UVM_LK_ENTER set, map is already write-locked
- * => if lockflags has UVM_LK_EXIT set, don't unlock map on exit
+ * Flags:
+ * UVM_LK_ENTER: map is already locked by caller
+ * UVM_LK_EXIT: don't unlock map on exit
*
* The full range must be in use (entries may not have fspace != 0).
* UVM_ET_HOLE counts as unmapped.
* uvm_map_pageable_all: special case of uvm_map_pageable - affects
* all mapped regions.
*
- * => map must not be locked.
- * => if no flags are specified, all ragions are unwired.
+ * Map must not be locked.
+ * If no flags are specified, all ragions are unwired.
*/
int
uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
UVM_ASSERT(map, addr == vm_map_max(map), file, line);
}
-/*
- * map must be at least read-locked.
- */
void
uvm_tree_size_chk(struct vm_map *map, char *file, int line)
{
struct vm_map_entry *iter;
vsize_t size;
- vm_map_assert_anylock(map);
-
size = 0;
RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
if (!UVM_ET_ISHOLE(iter))
* uvm_map_checkprot: check protection in map
*
* => must allow specific protection in a fully allocated region.
- * => map must be read or write locked by caller.
+ * => map mut be read or write locked by caller.
*/
boolean_t
uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
{
struct vm_map_entry *entry;
- vm_map_assert_anylock(map);
-
if (start < map->min_offset || end > map->max_offset || start > end)
return FALSE;
if (start == end)
/*
* UVM_MAP_CLIP_END implementation
- *
- * => caller should use UVM_MAP_CLIP_END macro rather than calling
- * this directly
- * => map must be locked by caller
*/
void
uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
{
struct vm_map_entry *tmp;
- vm_map_assert_wrlock(map);
-
KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr);
tmp = uvm_mapent_alloc(map, 0);
* Since uvm_map_splitentry turns the original entry into the lowest
* entry (address wise) we do a swap between the new entry and the original
* entry, prior to calling uvm_map_splitentry.
- *
- * => caller should use UVM_MAP_CLIP_START macro rather than calling
- * this directly
- * => map must be locked by caller
*/
void
uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
struct vm_map_entry *tmp;
struct uvm_addr_state *free;
- vm_map_assert_wrlock(map);
-
/* Unlink original. */
free = uvm_map_uaddr_e(map, entry);
uvm_mapent_free_remove(map, free, entry);
wakeup(&map->flags);
}
-void
-vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
-{
- LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, line));
- if ((map->flags & VM_MAP_INTRSAFE) == 0)
- rw_assert_anylock(&map->lock);
- else
- MUTEX_ASSERT_LOCKED(&map->mtx);
-}
-
-void
-vm_map_assert_rdlock_ln(struct vm_map *map, char *file, int line)
-{
- LPRINTF(("map assert read locked: %p (at %s %d)\n", map, file, line));
- if ((map->flags & VM_MAP_INTRSAFE) == 0)
- rw_assert_rdlock(&map->lock);
- else
- MUTEX_ASSERT_LOCKED(&map->mtx);
-}
-
-void
-vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
-{
- LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
- if ((map->flags & VM_MAP_INTRSAFE) == 0)
- rw_assert_wrlock(&map->lock);
- else
- MUTEX_ASSERT_LOCKED(&map->mtx);
-}
-
-void
-vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line)
-{
- LPRINTF(("map assert unlocked: %p (at %s %d)\n", map, file, line));
- if ((map->flags & VM_MAP_INTRSAFE) == 0)
- rw_assert_unlocked(&map->lock);
- else
- MUTEX_ASSERT_UNLOCKED(&map->mtx);
-}
-
#ifndef SMALL_KERNEL
int
uvm_map_fill_vmmap(struct vm_map *map, struct kinfo_vmentry *kve,
-/* $OpenBSD: uvm_map.h,v 1.72 2022/02/10 10:15:35 kn Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.73 2022/02/11 09:25:04 kn Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
void vm_map_upgrade_ln(struct vm_map*, char*, int);
void vm_map_busy_ln(struct vm_map*, char*, int);
void vm_map_unbusy_ln(struct vm_map*, char*, int);
-void vm_map_assert_anylock_ln(struct vm_map*, char*, int);
-void vm_map_assert_rdlock_ln(struct vm_map*, char*, int);
-void vm_map_assert_wrlock_ln(struct vm_map*, char*, int);
-void vm_map_assert_unlocked_ln(struct vm_map*, char*, int);
#ifdef DIAGNOSTIC
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__)
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__)
#define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__)
-#define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, __FILE__, __LINE__)
-#define vm_map_assert_rdlock(map) vm_map_assert_rdlock_ln(map, __FILE__, __LINE__)
-#define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, __FILE__, __LINE__)
-#define vm_map_assert_unlocked(map) vm_map_assert_unlocked_ln(map, __FILE__, __LINE__)
#else
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0)
#define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0)
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0)
#define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0)
-#define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, NULL, 0)
-#define vm_map_assert_rdlock(map) vm_map_assert_rdlock_ln(map, NULL, 0)
-#define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, NULL, 0)
-#define vm_map_assert_unlocked(map) vm_map_assert_unlocked_ln(map, NULL, 0)
#endif
void uvm_map_lock_entry(struct vm_map_entry *);