-/* $OpenBSD: uvm_map.c,v 1.331 2024/10/20 11:28:17 dlg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.332 2024/10/21 06:07:33 dlg Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
boolean_t
vm_map_lock_try_ln(struct vm_map *map, char *file, int line)
{
- int rv;
+ boolean_t rv;
if (map->flags & VM_MAP_INTRSAFE) {
- if (!mtx_enter_try(&map->mtx))
- return FALSE;
+ rv = mtx_enter_try(&map->mtx);
} else {
- struct proc *busy;
-
- mtx_enter(&map->flags_lock);
- busy = map->busy;
- mtx_leave(&map->flags_lock);
- if (busy != NULL && busy != curproc)
- return FALSE;
-
- rv = rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP);
- if (rv != 0)
- return FALSE;
-
- /* to be sure, to be sure */
mtx_enter(&map->flags_lock);
- busy = map->busy;
+ if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
+ mtx_leave(&map->flags_lock);
+ return (FALSE);
+ }
mtx_leave(&map->flags_lock);
- if (busy != NULL && busy != curproc) {
- rw_exit(&map->lock);
- return FALSE;
+ rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0);
+ /* check if the lock is busy and back out if we won the race */
+ if (rv) {
+ mtx_enter(&map->flags_lock);
+ if ((map->flags & VM_MAP_BUSY) &&
+ (map->busy != curproc)) {
+ rw_exit(&map->lock);
+ rv = FALSE;
+ }
+ mtx_leave(&map->flags_lock);
}
}
- map->timestamp++;
- LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
- uvm_tree_sanity(map, file, line);
- uvm_tree_size_chk(map, file, line);
+ if (rv) {
+ map->timestamp++;
+ LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
+ uvm_tree_sanity(map, file, line);
+ uvm_tree_size_chk(map, file, line);
+ }
- return TRUE;
+ return (rv);
}
void
vm_map_lock_ln(struct vm_map *map, char *file, int line)
{
if ((map->flags & VM_MAP_INTRSAFE) == 0) {
- mtx_enter(&map->flags_lock);
- for (;;) {
- while (map->busy != NULL && map->busy != curproc) {
- map->nbusy++;
- msleep_nsec(&map->busy, &map->mtx,
+ do {
+ mtx_enter(&map->flags_lock);
+tryagain:
+ while ((map->flags & VM_MAP_BUSY) &&
+ (map->busy != curproc)) {
+ map->flags |= VM_MAP_WANTLOCK;
+ msleep_nsec(&map->flags, &map->flags_lock,
PVM, vmmapbsy, INFSLP);
- map->nbusy--;
}
mtx_leave(&map->flags_lock);
-
- rw_enter_write(&map->lock);
-
- /* to be sure, to be sure */
- mtx_enter(&map->flags_lock);
- if (map->busy != NULL && map->busy != curproc) {
- /* go around again */
- rw_exit_write(&map->lock);
- } else {
- /* we won */
- break;
- }
+ } while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0);
+ /* check if the lock is busy and back out if we won the race */
+ mtx_enter(&map->flags_lock);
+ if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
+ rw_exit(&map->lock);
+ goto tryagain;
}
mtx_leave(&map->flags_lock);
} else {
mtx_enter(&map->mtx);
}
- if (map->busy != curproc) {
- KASSERT(map->busy == NULL);
+ if (map->busy != curproc)
map->timestamp++;
- }
LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
uvm_tree_sanity(map, file, line);
uvm_tree_size_chk(map, file, line);
mtx_enter(&map->flags_lock);
map->busy = curproc;
+ map->flags |= VM_MAP_BUSY;
mtx_leave(&map->flags_lock);
}
void
vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
{
- unsigned int nbusy;
+ int oflags;
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
KASSERT(map->busy == curproc);
mtx_enter(&map->flags_lock);
- nbusy = map->nbusy;
+ oflags = map->flags;
map->busy = NULL;
+ map->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK);
mtx_leave(&map->flags_lock);
-
- if (nbusy > 0)
- wakeup(&map->busy);
+ if (oflags & VM_MAP_WANTLOCK)
+ wakeup(&map->flags);
}
void
-/* $OpenBSD: uvm_map.h,v 1.91 2024/10/20 11:28:17 dlg Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.92 2024/10/21 06:07:33 dlg Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
* map is write-locked. may be tested
* without asserting `flags_lock'.
*
+ * VM_MAP_BUSY r/w; may only be set when map is
+ * write-locked, may only be cleared by
+ * thread which set it, map read-locked
+ * or write-locked. must be tested
+ * while `flags_lock' is asserted.
+ *
+ * VM_MAP_WANTLOCK r/w; may only be set when the map
+ * is busy, and thread is attempting
+ * to write-lock. must be tested
+ * while `flags_lock' is asserted.
+ *
* VM_MAP_GUARDPAGES r/o; must be specified at map
* initialization time.
* If set, guards will appear between
* a atomic operations
* I immutable after creation or exec(2)
* v `vm_map_lock' (this map `lock' or `mtx')
- * f flags_lock
*/
struct vm_map {
struct pmap *pmap; /* [I] Physical map */
vsize_t size; /* virtual size */
int ref_count; /* [a] Reference count */
- int flags; /* [f] flags */
+ int flags; /* flags */
unsigned int timestamp; /* Version number */
- struct proc *busy; /* [f] thread holding map busy*/
- unsigned int nbusy; /* [f] waiters for busy */
+ struct proc *busy; /* [v] thread holding map busy*/
vaddr_t min_offset; /* [I] First address in map. */
vaddr_t max_offset; /* [I] Last address in map. */
#define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
#define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
#define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
+#define VM_MAP_BUSY 0x08 /* rw: map is busy */
+#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
#define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */
#define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */
#define VM_MAP_PINSYSCALL_ONCE 0x100 /* rw: pinsyscall done */