-/* $OpenBSD: uvm_map.c,v 1.330 2024/07/24 12:17:31 mpi Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.331 2024/10/20 11:28:17 dlg Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
boolean_t
vm_map_lock_try_ln(struct vm_map *map, char *file, int line)
{
- boolean_t rv;
+ int rv;
if (map->flags & VM_MAP_INTRSAFE) {
- rv = mtx_enter_try(&map->mtx);
+ if (!mtx_enter_try(&map->mtx))
+ return FALSE;
} else {
+ struct proc *busy;
+
mtx_enter(&map->flags_lock);
- if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
- mtx_leave(&map->flags_lock);
- return (FALSE);
- }
+ busy = map->busy;
mtx_leave(&map->flags_lock);
- rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0);
- /* check if the lock is busy and back out if we won the race */
- if (rv) {
- mtx_enter(&map->flags_lock);
- if ((map->flags & VM_MAP_BUSY) &&
- (map->busy != curproc)) {
- rw_exit(&map->lock);
- rv = FALSE;
- }
- mtx_leave(&map->flags_lock);
+ if (busy != NULL && busy != curproc)
+ return FALSE;
+
+ rv = rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP);
+ if (rv != 0)
+ return FALSE;
+
+ /* to be sure, to be sure */
+ mtx_enter(&map->flags_lock);
+ busy = map->busy;
+ mtx_leave(&map->flags_lock);
+ if (busy != NULL && busy != curproc) {
+ rw_exit(&map->lock);
+ return FALSE;
}
}
- if (rv) {
- map->timestamp++;
- LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
- uvm_tree_sanity(map, file, line);
- uvm_tree_size_chk(map, file, line);
- }
+ map->timestamp++;
+ LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
+ uvm_tree_sanity(map, file, line);
+ uvm_tree_size_chk(map, file, line);
- return (rv);
+ return TRUE;
}
void
vm_map_lock_ln(struct vm_map *map, char *file, int line)
{
if ((map->flags & VM_MAP_INTRSAFE) == 0) {
- do {
- mtx_enter(&map->flags_lock);
-tryagain:
- while ((map->flags & VM_MAP_BUSY) &&
- (map->busy != curproc)) {
- map->flags |= VM_MAP_WANTLOCK;
- msleep_nsec(&map->flags, &map->flags_lock,
+ mtx_enter(&map->flags_lock);
+ for (;;) {
+ while (map->busy != NULL && map->busy != curproc) {
+ map->nbusy++;
+ msleep_nsec(&map->busy, &map->mtx,
PVM, vmmapbsy, INFSLP);
+ map->nbusy--;
}
mtx_leave(&map->flags_lock);
- } while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0);
- /* check if the lock is busy and back out if we won the race */
- mtx_enter(&map->flags_lock);
- if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
- rw_exit(&map->lock);
- goto tryagain;
+
+ rw_enter_write(&map->lock);
+
+ /* to be sure, to be sure */
+ mtx_enter(&map->flags_lock);
+ if (map->busy != NULL && map->busy != curproc) {
+ /* go around again */
+ rw_exit_write(&map->lock);
+ } else {
+ /* we won */
+ break;
+ }
}
mtx_leave(&map->flags_lock);
} else {
mtx_enter(&map->mtx);
}
- if (map->busy != curproc)
+ if (map->busy != curproc) {
+ KASSERT(map->busy == NULL);
map->timestamp++;
+ }
LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
uvm_tree_sanity(map, file, line);
uvm_tree_size_chk(map, file, line);
mtx_enter(&map->flags_lock);
map->busy = curproc;
- map->flags |= VM_MAP_BUSY;
mtx_leave(&map->flags_lock);
}
void
vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
{
- int oflags;
+ unsigned int nbusy;
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
KASSERT(map->busy == curproc);
mtx_enter(&map->flags_lock);
- oflags = map->flags;
+ nbusy = map->nbusy;
map->busy = NULL;
- map->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK);
mtx_leave(&map->flags_lock);
- if (oflags & VM_MAP_WANTLOCK)
- wakeup(&map->flags);
+
+ if (nbusy > 0)
+ wakeup(&map->busy);
}
void
-/* $OpenBSD: uvm_map.h,v 1.90 2024/06/18 12:37:29 jsg Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.91 2024/10/20 11:28:17 dlg Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
* map is write-locked. may be tested
* without asserting `flags_lock'.
*
- * VM_MAP_BUSY r/w; may only be set when map is
- * write-locked, may only be cleared by
- * thread which set it, map read-locked
- * or write-locked. must be tested
- * while `flags_lock' is asserted.
- *
- * VM_MAP_WANTLOCK r/w; may only be set when the map
- * is busy, and thread is attempting
- * to write-lock. must be tested
- * while `flags_lock' is asserted.
- *
* VM_MAP_GUARDPAGES r/o; must be specified at map
* initialization time.
* If set, guards will appear between
* a atomic operations
* I immutable after creation or exec(2)
* v `vm_map_lock' (this map `lock' or `mtx')
+ * f flags_lock
*/
struct vm_map {
struct pmap *pmap; /* [I] Physical map */
vsize_t size; /* virtual size */
int ref_count; /* [a] Reference count */
- int flags; /* flags */
+ int flags; /* [f] flags */
unsigned int timestamp; /* Version number */
- struct proc *busy; /* [v] thread holding map busy*/
+ struct proc *busy; /* [f] thread holding map busy*/
+ unsigned int nbusy; /* [f] waiters for busy */
vaddr_t min_offset; /* [I] First address in map. */
vaddr_t max_offset; /* [I] Last address in map. */
#define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
#define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
#define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
-#define VM_MAP_BUSY 0x08 /* rw: map is busy */
-#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
#define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */
#define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */
#define VM_MAP_PINSYSCALL_ONCE 0x100 /* rw: pinsyscall done */