From 3ca4486c5e48ba18e59dae0787ab04202c0e20c4 Mon Sep 17 00:00:00 2001 From: kn Date: Thu, 10 Feb 2022 10:15:35 +0000 Subject: [PATCH] Assert vm map locks Introduce vm_map_assert_{wrlock,rdlock,anylock,unlocked}() in rwlock(9) fashion and back up function comments about locking assumptions with proper assertions. Also add new comments/assertions based on code analysis and sync with NetBSD as much as possible. vm_map_lock() and vm_map_lock_read() are used for exclusive and shared access respectively; currently no code path is purely protected by vm_map_lock_read() alone, i.e. functions called with a read lock held by the callee are also called with a write lock elsewhere. Thus only vm_map_assert_{wrlock,anylock}() are used as of now. This should help with unlocking UVM related syscalls Tested as part of a larger diff through - amd64 package bulk build by naddy - amd64, arm64, powerpc64 base builds and regress by bluhm - amd64 and sparc64 base builds and regress by me Input mpi Feedback OK kettenis --- sys/uvm/uvm_fault.c | 3 +- sys/uvm/uvm_map.c | 91 +++++++++++++++++++++++++++++++++++++++++---- sys/uvm/uvm_map.h | 14 ++++++- 3 files changed, 98 insertions(+), 10 deletions(-) diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index e7a54be4c5a..e2a8186fd14 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_fault.c,v 1.126 2022/02/03 19:57:11 guenther Exp $ */ +/* $OpenBSD: uvm_fault.c,v 1.127 2022/02/10 10:15:35 kn Exp $ */ /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ /* @@ -1616,6 +1616,7 @@ uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end) struct vm_page *pg; KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); + vm_map_assert_anylock(map); /* * we assume that the area we are unwiring has actually been wired diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index f37e75d038d..56fd76968ea 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.283 2022/02/10 10:14:02 kn Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.284 2022/02/10 10:15:35 kn Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -805,6 +805,8 @@ uvm_map_sel_limits(vaddr_t *min, vaddr_t *max, vsize_t sz, int guardpg, * Fills in *start_ptr and *end_ptr to be the first and last entry describing * the space. * If called with prefilled *start_ptr and *end_ptr, they are to be correct. + * + * map must be at least read-locked. */ int uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr, @@ -815,6 +817,8 @@ uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr, struct uvm_map_addr *atree; struct vm_map_entry *i, *i_end; + vm_map_assert_anylock(map); + if (addr + sz < addr) return 0; @@ -892,6 +896,8 @@ uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr, /* * Invoke each address selector until an address is found. * Will not invoke uaddr_exe. + * + * => caller must at least have read-locked map */ int uvm_map_findspace(struct vm_map *map, struct vm_map_entry**first, @@ -901,6 +907,8 @@ uvm_map_findspace(struct vm_map *map, struct vm_map_entry**first, struct uvm_addr_state *uaddr; int i; + vm_map_assert_anylock(map); + /* * Allocation for sz bytes at any address, * using the addr selectors in order. @@ -1160,7 +1168,7 @@ unlock: * uvm_map: establish a valid mapping in map * * => *addr and sz must be a multiple of PAGE_SIZE. - * => map must be unlocked. + * => map must be unlocked (we will lock it) * => value meanings (4 cases): * [1] == uoffset is a hint for PMAP_PREFER * [2] == don't PMAP_PREFER @@ -1821,6 +1829,8 @@ boolean_t uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, struct vm_map_entry **entry) { + vm_map_assert_anylock(map); + *entry = uvm_map_entrybyaddr(&map->addr, address); return *entry != NULL && !UVM_ET_ISHOLE(*entry) && (*entry)->start <= address && (*entry)->end > address; @@ -2206,6 +2216,8 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry) * If remove_holes, then remove ET_HOLE entries as well. * If markfree, entry will be properly marked free, otherwise, no replacement * entry will be put in the tree (corrupting the tree). + * + * => map must be locked by caller */ void uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end, @@ -2214,6 +2226,8 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end, { struct vm_map_entry *prev_hint, *next, *entry; + vm_map_assert_wrlock(map); + start = MAX(start, map->min_offset); end = MIN(end, map->max_offset); if (start >= end) @@ -2304,6 +2318,8 @@ uvm_map_pageable_pgon(struct vm_map *map, struct vm_map_entry *first, { struct vm_map_entry *iter; + vm_map_assert_wrlock(map); + for (iter = first; iter != end; iter = RBT_NEXT(uvm_map_addr, iter)) { KDASSERT(iter->start >= start_addr && iter->end <= end_addr); @@ -2457,9 +2473,9 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first, /* * uvm_map_pageable: set pageability of a range in a map. * - * Flags: - * UVM_LK_ENTER: map is already locked by caller - * UVM_LK_EXIT: don't unlock map on exit + * => map must never be read-locked + * => if lockflags has UVM_LK_ENTER set, map is already write-locked + * => if lockflags has UVM_LK_EXIT set, don't unlock map on exit * * The full range must be in use (entries may not have fspace != 0). * UVM_ET_HOLE counts as unmapped. @@ -2586,8 +2602,8 @@ out: * uvm_map_pageable_all: special case of uvm_map_pageable - affects * all mapped regions. * - * Map must not be locked. - * If no flags are specified, all ragions are unwired. + * => map must not be locked. + * => if no flags are specified, all ragions are unwired. */ int uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit) @@ -2977,12 +2993,17 @@ uvm_tree_sanity(struct vm_map *map, char *file, int line) UVM_ASSERT(map, addr == vm_map_max(map), file, line); } +/* + * map must be at least read-locked. + */ void uvm_tree_size_chk(struct vm_map *map, char *file, int line) { struct vm_map_entry *iter; vsize_t size; + vm_map_assert_anylock(map); + size = 0; RBT_FOREACH(iter, uvm_map_addr, &map->addr) { if (!UVM_ET_ISHOLE(iter)) @@ -4269,7 +4290,7 @@ uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end, * uvm_map_checkprot: check protection in map * * => must allow specific protection in a fully allocated region. - * => map mut be read or write locked by caller. + * => map must be read or write locked by caller. */ boolean_t uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end, @@ -4277,6 +4298,8 @@ uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end, { struct vm_map_entry *entry; + vm_map_assert_anylock(map); + if (start < map->min_offset || end > map->max_offset || start > end) return FALSE; if (start == end) @@ -4803,12 +4826,18 @@ flush_object: /* * UVM_MAP_CLIP_END implementation + * + * => caller should use UVM_MAP_CLIP_END macro rather than calling + * this directly + * => map must be locked by caller */ void uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr) { struct vm_map_entry *tmp; + vm_map_assert_wrlock(map); + KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr); tmp = uvm_mapent_alloc(map, 0); @@ -4824,6 +4853,10 @@ uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr) * Since uvm_map_splitentry turns the original entry into the lowest * entry (address wise) we do a swap between the new entry and the original * entry, prior to calling uvm_map_splitentry. + * + * => caller should use UVM_MAP_CLIP_START macro rather than calling + * this directly + * => map must be locked by caller */ void uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr) @@ -4831,6 +4864,8 @@ uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr) struct vm_map_entry *tmp; struct uvm_addr_state *free; + vm_map_assert_wrlock(map); + /* Unlink original. */ free = uvm_map_uaddr_e(map, entry); uvm_mapent_free_remove(map, free, entry); @@ -5560,6 +5595,46 @@ vm_map_unbusy_ln(struct vm_map *map, char *file, int line) wakeup(&map->flags); } +void +vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line) +{ + LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, line)); + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_anylock(&map->lock); + else + MUTEX_ASSERT_LOCKED(&map->mtx); +} + +void +vm_map_assert_rdlock_ln(struct vm_map *map, char *file, int line) +{ + LPRINTF(("map assert read locked: %p (at %s %d)\n", map, file, line)); + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_rdlock(&map->lock); + else + MUTEX_ASSERT_LOCKED(&map->mtx); +} + +void +vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line) +{ + LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line)); + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_wrlock(&map->lock); + else + MUTEX_ASSERT_LOCKED(&map->mtx); +} + +void +vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line) +{ + LPRINTF(("map assert unlocked: %p (at %s %d)\n", map, file, line)); + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_unlocked(&map->lock); + else + MUTEX_ASSERT_UNLOCKED(&map->mtx); +} + #ifndef SMALL_KERNEL int uvm_map_fill_vmmap(struct vm_map *map, struct kinfo_vmentry *kve, diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h index 14aeba504bc..543760e153c 100644 --- a/sys/uvm/uvm_map.h +++ b/sys/uvm/uvm_map.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.h,v 1.71 2021/12/15 12:53:53 mpi Exp $ */ +/* $OpenBSD: uvm_map.h,v 1.72 2022/02/10 10:15:35 kn Exp $ */ /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ /* @@ -419,6 +419,10 @@ void vm_map_downgrade_ln(struct vm_map*, char*, int); void vm_map_upgrade_ln(struct vm_map*, char*, int); void vm_map_busy_ln(struct vm_map*, char*, int); void vm_map_unbusy_ln(struct vm_map*, char*, int); +void vm_map_assert_anylock_ln(struct vm_map*, char*, int); +void vm_map_assert_rdlock_ln(struct vm_map*, char*, int); +void vm_map_assert_wrlock_ln(struct vm_map*, char*, int); +void vm_map_assert_unlocked_ln(struct vm_map*, char*, int); #ifdef DIAGNOSTIC #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__) @@ -430,6 +434,10 @@ void vm_map_unbusy_ln(struct vm_map*, char*, int); #define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__) #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) +#define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, __FILE__, __LINE__) +#define vm_map_assert_rdlock(map) vm_map_assert_rdlock_ln(map, __FILE__, __LINE__) +#define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, __FILE__, __LINE__) +#define vm_map_assert_unlocked(map) vm_map_assert_unlocked_ln(map, __FILE__, __LINE__) #else #define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0) #define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0) @@ -440,6 +448,10 @@ void vm_map_unbusy_ln(struct vm_map*, char*, int); #define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0) #define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0) #define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0) +#define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, NULL, 0) +#define vm_map_assert_rdlock(map) vm_map_assert_rdlock_ln(map, NULL, 0) +#define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, NULL, 0) +#define vm_map_assert_unlocked(map) vm_map_assert_unlocked_ln(map, NULL, 0) #endif void uvm_map_lock_entry(struct vm_map_entry *); -- 2.20.1