summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2024-10-31 05:00:01 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2024-10-31 05:00:01 +0000
commit8711058ca477ba8064e1c586fb2bd19e1d32722f (patch)
tree62f619b41f61f4f4ebacf82007483422840c0878 /sys/uvm
parent0aefd52dc5a68259cc4802af2eeb0c5682273fa3 (diff)
try to simplify the locking code around busy maps.
this is src/sys/uvm/uvm_map.c r1.331 again with a small fix to pass the right mutex to msleep when waiting for the busy proc pointer. vm_maps have a "feature" where they can mark that they're being operated on by a specific proc, and then release the rwlock protecting their state. to relock, you have to be the same proc that marked it busy. this diff tries to simplify it a bit. it basically has threads check the busy field up front and rechecks the busy field inside the rwlock. if you can sleep, it will sleep up front for the busy field to become clear, rather than sleep on either the busy field or the rwlock. some code paths clear the busy field without holding the rwlock, so it doesn't make sense to me to be waiting for the busy field but sleeping somewhere else. ok claudio@
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_map.c96
-rw-r--r--sys/uvm/uvm_map.h21
2 files changed, 57 insertions, 60 deletions
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 02e2c0fa0f5..2feb604516a 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.332 2024/10/21 06:07:33 dlg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.333 2024/10/31 05:00:00 dlg Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -5201,68 +5201,77 @@ out:
boolean_t
vm_map_lock_try_ln(struct vm_map *map, char *file, int line)
{
- boolean_t rv;
+ int rv;
if (map->flags & VM_MAP_INTRSAFE) {
- rv = mtx_enter_try(&map->mtx);
+ if (!mtx_enter_try(&map->mtx))
+ return FALSE;
} else {
+ struct proc *busy;
+
mtx_enter(&map->flags_lock);
- if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
- mtx_leave(&map->flags_lock);
- return (FALSE);
- }
+ busy = map->busy;
mtx_leave(&map->flags_lock);
- rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0);
- /* check if the lock is busy and back out if we won the race */
- if (rv) {
- mtx_enter(&map->flags_lock);
- if ((map->flags & VM_MAP_BUSY) &&
- (map->busy != curproc)) {
- rw_exit(&map->lock);
- rv = FALSE;
- }
- mtx_leave(&map->flags_lock);
+ if (busy != NULL && busy != curproc)
+ return FALSE;
+
+ rv = rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP);
+ if (rv != 0)
+ return FALSE;
+
+ /* to be sure, to be sure */
+ mtx_enter(&map->flags_lock);
+ busy = map->busy;
+ mtx_leave(&map->flags_lock);
+ if (busy != NULL && busy != curproc) {
+ rw_exit(&map->lock);
+ return FALSE;
}
}
- if (rv) {
- map->timestamp++;
- LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
- uvm_tree_sanity(map, file, line);
- uvm_tree_size_chk(map, file, line);
- }
+ map->timestamp++;
+ LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
+ uvm_tree_sanity(map, file, line);
+ uvm_tree_size_chk(map, file, line);
- return (rv);
+ return TRUE;
}
void
vm_map_lock_ln(struct vm_map *map, char *file, int line)
{
if ((map->flags & VM_MAP_INTRSAFE) == 0) {
- do {
- mtx_enter(&map->flags_lock);
-tryagain:
- while ((map->flags & VM_MAP_BUSY) &&
- (map->busy != curproc)) {
- map->flags |= VM_MAP_WANTLOCK;
- msleep_nsec(&map->flags, &map->flags_lock,
+ mtx_enter(&map->flags_lock);
+ for (;;) {
+ while (map->busy != NULL && map->busy != curproc) {
+ map->nbusy++;
+ msleep_nsec(&map->busy, &map->flags_lock,
PVM, vmmapbsy, INFSLP);
+ map->nbusy--;
}
mtx_leave(&map->flags_lock);
- } while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0);
- /* check if the lock is busy and back out if we won the race */
- mtx_enter(&map->flags_lock);
- if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
- rw_exit(&map->lock);
- goto tryagain;
+
+ rw_enter_write(&map->lock);
+
+ /* to be sure, to be sure */
+ mtx_enter(&map->flags_lock);
+ if (map->busy != NULL && map->busy != curproc) {
+ /* go around again */
+ rw_exit_write(&map->lock);
+ } else {
+ /* we won */
+ break;
+ }
}
mtx_leave(&map->flags_lock);
} else {
mtx_enter(&map->mtx);
}
- if (map->busy != curproc)
+ if (map->busy != curproc) {
+ KASSERT(map->busy == NULL);
map->timestamp++;
+ }
LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
uvm_tree_sanity(map, file, line);
uvm_tree_size_chk(map, file, line);
@@ -5314,25 +5323,24 @@ vm_map_busy_ln(struct vm_map *map, char *file, int line)
mtx_enter(&map->flags_lock);
map->busy = curproc;
- map->flags |= VM_MAP_BUSY;
mtx_leave(&map->flags_lock);
}
void
vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
{
- int oflags;
+ unsigned int nbusy;
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
KASSERT(map->busy == curproc);
mtx_enter(&map->flags_lock);
- oflags = map->flags;
+ nbusy = map->nbusy;
map->busy = NULL;
- map->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK);
mtx_leave(&map->flags_lock);
- if (oflags & VM_MAP_WANTLOCK)
- wakeup(&map->flags);
+
+ if (nbusy > 0)
+ wakeup(&map->busy);
}
void
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index 0c98020d9d2..6bcc5572bd1 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.h,v 1.92 2024/10/21 06:07:33 dlg Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.93 2024/10/31 05:00:00 dlg Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -214,17 +214,6 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
* map is write-locked. may be tested
* without asserting `flags_lock'.
*
- * VM_MAP_BUSY r/w; may only be set when map is
- * write-locked, may only be cleared by
- * thread which set it, map read-locked
- * or write-locked. must be tested
- * while `flags_lock' is asserted.
- *
- * VM_MAP_WANTLOCK r/w; may only be set when the map
- * is busy, and thread is attempting
- * to write-lock. must be tested
- * while `flags_lock' is asserted.
- *
* VM_MAP_GUARDPAGES r/o; must be specified at map
* initialization time.
* If set, guards will appear between
@@ -257,6 +246,7 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
* a atomic operations
* I immutable after creation or exec(2)
* v `vm_map_lock' (this map `lock' or `mtx')
+ * f flags_lock
*/
struct vm_map {
struct pmap *pmap; /* [I] Physical map */
@@ -266,9 +256,10 @@ struct vm_map {
vsize_t size; /* virtual size */
int ref_count; /* [a] Reference count */
- int flags; /* flags */
+ int flags; /* [f] flags */
unsigned int timestamp; /* Version number */
- struct proc *busy; /* [v] thread holding map busy*/
+ struct proc *busy; /* [f] thread holding map busy*/
+ unsigned int nbusy; /* [f] waiters for busy */
vaddr_t min_offset; /* [I] First address in map. */
vaddr_t max_offset; /* [I] Last address in map. */
@@ -323,8 +314,6 @@ struct vm_map {
#define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
#define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
#define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
-#define VM_MAP_BUSY 0x08 /* rw: map is busy */
-#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
#define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */
#define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */
#define VM_MAP_PINSYSCALL_ONCE 0x100 /* rw: pinsyscall done */