summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_map.c
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2022-03-11 19:24:20 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2022-03-11 19:24:20 +0000
commit315898cb62285655b4c17c0ba377bbd9f2d1dac3 (patch)
treec413e6535ccbef742814a54138b7922675f592a4 /sys/uvm/uvm_map.c
parentfa139867613fcf0817e691d2579ab5d086cd1bbf (diff)
Hold a read lock on the map while copying out data during a sysctl(2) call
to prevent another thread from unmapping the memory and triggering an assertion or even corrupting random physical memory pages. This fix is similar to the change in uvm_glue.c rev. 1.74. However in this case we need to be careful since some sysctl(2) calls look at the map of the current process. In those cases we must not attempt to lock the map again. ok mpi@ Should fix: Reported-by: syzbot+be89fe83d6c004fcb412@syzkaller.appspotmail.com
Diffstat (limited to 'sys/uvm/uvm_map.c')
-rw-r--r--sys/uvm/uvm_map.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index ab114029180..98aa7d4326e 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.288 2022/02/15 11:54:19 kn Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.289 2022/03/11 19:24:19 kettenis Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -4522,7 +4522,12 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
return 0;
/* Acquire lock on srcmap. */
- vm_map_lock(srcmap);
+ if (flags & UVM_EXTRACT_RDLOCKED) {
+ vm_map_busy(srcmap);
+ vm_map_upgrade(srcmap);
+ vm_map_unbusy(srcmap);
+ } else
+ vm_map_lock(srcmap);
/* Lock srcmap, lookup first and last entry in <start,len>. */
first = uvm_map_entrybyaddr(&srcmap->addr, start);
@@ -4624,7 +4629,10 @@ fail2:
vm_map_unlock(kernel_map);
fail:
- vm_map_unlock(srcmap);
+ if (flags & UVM_EXTRACT_RDLOCKED)
+ vm_map_downgrade(srcmap);
+ else
+ vm_map_unlock(srcmap);
uvm_unmap_detach(&dead, 0);
@@ -5581,7 +5589,9 @@ uvm_map_fill_vmmap(struct vm_map *map, struct kinfo_vmentry *kve,
*/
start = (vaddr_t)kve[0].kve_start;
- vm_map_lock(map);
+ vm_map_busy(map);
+ vm_map_upgrade(map);
+ vm_map_unbusy(map);
RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
if (cnt == maxcnt) {
error = ENOMEM;
@@ -5605,7 +5615,7 @@ uvm_map_fill_vmmap(struct vm_map *map, struct kinfo_vmentry *kve,
kve++;
cnt++;
}
- vm_map_unlock(map);
+ vm_map_downgrade(map);
KASSERT(cnt <= maxcnt);