summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/uvm/uvm_extern.h6
-rw-r--r--sys/uvm/uvm_fault.c68
-rw-r--r--sys/uvm/uvm_fault.h5
-rw-r--r--sys/uvm/uvm_glue.c29
-rw-r--r--sys/uvm/uvm_map.c14
5 files changed, 90 insertions, 32 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index f6dd3d2aa24..95b93c48569 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.10 2001/03/09 14:20:50 art Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.28 1999/06/15 23:27:47 thorpej Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.11 2001/05/07 16:08:40 art Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.29 1999/06/17 15:47:22 thorpej Exp $ */
/*
*
@@ -288,7 +288,7 @@ boolean_t uvm_kernacc __P((caddr_t, size_t, int));
__dead void uvm_scheduler __P((void)) __attribute__((__noreturn__));
void uvm_swapin __P((struct proc *));
boolean_t uvm_useracc __P((caddr_t, size_t, int));
-void uvm_vslock __P((struct proc *, caddr_t, size_t,
+int uvm_vslock __P((struct proc *, caddr_t, size_t,
vm_prot_t));
void uvm_vsunlock __P((struct proc *, caddr_t, size_t));
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 3d3b75b82bc..50f29232244 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.c,v 1.10 2001/03/22 23:36:52 niklas Exp $ */
-/* $NetBSD: uvm_fault.c,v 1.35 1999/06/16 18:43:28 thorpej Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.11 2001/05/07 16:08:40 art Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.37 1999/06/16 23:02:40 thorpej Exp $ */
/*
*
@@ -1707,10 +1707,10 @@ Case2:
/*
* uvm_fault_wire: wire down a range of virtual addresses in a map.
*
- * => map should be locked by caller? If so how can we call
- * uvm_fault? WRONG.
- * => XXXCDC: locking here is all screwed up!!! start with
- * uvm_map_pageable and fix it.
+ * => map may be read-locked by caller, but MUST NOT be write-locked.
+ * => if map is read-locked, any operations which may cause map to
+ * be write-locked in uvm_fault() must be taken care of by
+ * the caller. See uvm_map_pageable().
*/
int
@@ -1761,6 +1761,24 @@ uvm_fault_unwire(map, start, end)
vm_map_t map;
vaddr_t start, end;
{
+
+ vm_map_lock_read(map);
+ uvm_fault_unwire_locked(map, start, end);
+ vm_map_unlock_read(map);
+}
+
+/*
+ * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
+ *
+ * => map must be at least read-locked.
+ */
+
+void
+uvm_fault_unwire_locked(map, start, end)
+ vm_map_t map;
+ vaddr_t start, end;
+{
+ vm_map_entry_t entry;
pmap_t pmap = vm_map_pmap(map);
vaddr_t va;
paddr_t pa;
@@ -1768,7 +1786,7 @@ uvm_fault_unwire(map, start, end)
#ifdef DIAGNOSTIC
if (map->flags & VM_MAP_INTRSAFE)
- panic("uvm_fault_unwire: intrsafe map");
+ panic("uvm_fault_unwire_locked: intrsafe map");
#endif
/*
@@ -1780,15 +1798,47 @@ uvm_fault_unwire(map, start, end)
uvm_lock_pageq();
+ /*
+ * find the beginning map entry for the region.
+ */
+#ifdef DIAGNOSTIC
+ if (start < vm_map_min(map) || end > vm_map_max(map))
+ panic("uvm_fault_unwire_locked: address out of range");
+#endif
+ if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
+ panic("uvm_fault_unwire_locked: address not in map");
+
for (va = start; va < end ; va += PAGE_SIZE) {
pa = pmap_extract(pmap, va);
/* XXX: assumes PA 0 cannot be in map */
if (pa == (paddr_t) 0) {
- panic("uvm_fault_unwire: unwiring non-wired memory");
+ panic("uvm_fault_unwire_locked: unwiring "
+ "non-wired memory");
}
- pmap_change_wiring(pmap, va, FALSE); /* tell the pmap */
+ /*
+ * make sure the current entry is for the address we're
+ * dealing with. if not, grab the next entry.
+ */
+#ifdef DIAGNOSTIC
+ if (va < entry->start)
+ panic("uvm_fault_unwire_locked: hole 1");
+#endif
+ if (va >= entry->end) {
+#ifdef DIAGNOSTIC
+ if (entry->next == &map->header ||
+ entry->next->start > entry->end)
+ panic("uvm_fault_unwire_locked: hole 2");
+#endif
+ entry = entry->next;
+ }
+
+ /*
+ * if the entry is no longer wired, tell the pmap.
+ */
+ if (VM_MAPENT_ISWIRED(entry) == 0)
+ pmap_change_wiring(pmap, va, FALSE);
pg = PHYS_TO_VM_PAGE(pa);
if (pg)
diff --git a/sys/uvm/uvm_fault.h b/sys/uvm/uvm_fault.h
index d77f23f098a..2ab79b5fda5 100644
--- a/sys/uvm/uvm_fault.h
+++ b/sys/uvm/uvm_fault.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.h,v 1.6 2001/03/09 05:34:38 smart Exp $ */
-/* $NetBSD: uvm_fault.h,v 1.11 1999/06/04 23:38:41 thorpej Exp $ */
+/* $OpenBSD: uvm_fault.h,v 1.7 2001/05/07 16:08:40 art Exp $ */
+/* $NetBSD: uvm_fault.h,v 1.12 1999/06/16 22:11:23 thorpej Exp $ */
/*
*
@@ -84,6 +84,7 @@ static void uvmfault_unlockmaps __P((struct uvm_faultinfo *, boolean_t));
int uvm_fault_wire __P((vm_map_t, vaddr_t, vaddr_t, vm_prot_t));
void uvm_fault_unwire __P((vm_map_t, vaddr_t, vaddr_t));
+void uvm_fault_unwire_locked __P((vm_map_t, vaddr_t, vaddr_t));
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index 800d296d669..bf467bb5a30 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_glue.c,v 1.13 2001/05/05 23:25:54 art Exp $ */
-/* $NetBSD: uvm_glue.c,v 1.23 1999/05/28 20:49:51 thorpej Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.14 2001/05/07 16:08:40 art Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.26 1999/06/17 15:47:22 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -155,11 +155,18 @@ uvm_useracc(addr, len, rw)
size_t len;
int rw;
{
+ vm_map_t map;
boolean_t rv;
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
- rv = uvm_map_checkprot(&curproc->p_vmspace->vm_map,
- trunc_page((vaddr_t)addr), round_page((vaddr_t)addr+len), prot);
+ /* XXX curproc */
+ map = &curproc->p_vmspace->vm_map;
+
+ vm_map_lock_read(map);
+ rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr),
+ round_page((vaddr_t)addr+len), prot);
+ vm_map_unlock_read(map);
+
return(rv);
}
@@ -210,16 +217,24 @@ uvm_chgkprot(addr, len, rw)
* - XXXCDC: consider nuking this (or making it a macro?)
*/
-void
+int
uvm_vslock(p, addr, len, access_type)
struct proc *p;
caddr_t addr;
size_t len;
vm_prot_t access_type;
{
+ vm_map_t map;
+ vaddr_t start, end;
+ int rv;
+
+ map = &p->p_vmspace->vm_map;
+ start = trunc_page((vaddr_t)addr);
+ end = round_page((vaddr_t)addr + len);
+
+ rv = uvm_fault_wire(map, start, end, access_type);
- uvm_fault_wire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),
- round_page((vaddr_t)addr+len), access_type);
+ return (rv);
}
/*
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 1ca7c0e883d..266e8462def 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.12 2001/05/05 23:25:55 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.56 1999/06/16 19:34:24 thorpej Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.13 2001/05/07 16:08:40 art Exp $ */
+/* $NetBSD: uvm_map.c,v 1.58 1999/06/17 00:24:10 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -293,8 +293,8 @@ uvm_map_entry_unwire(map, entry)
vm_map_entry_t entry;
{
- uvm_fault_unwire(map, entry->start, entry->end);
entry->wired_count = 0;
+ uvm_fault_unwire_locked(map, entry->start, entry->end);
}
/*
@@ -2043,10 +2043,6 @@ uvm_map_pageable(map, start, end, new_pageable)
* POSIX 1003.1b - a single munlock call unlocks a region,
* regardless of the number of mlock calls made on that
* region.
- *
- * Note, uvm_fault_unwire() (called via uvm_map_entry_unwire())
- * does not lock the map, so we don't have to do anything
- * special regarding locking here.
*/
entry = start_entry;
while ((entry != &map->header) && (entry->start < end)) {
@@ -2229,10 +2225,6 @@ uvm_map_pageable_all(map, flags, limit)
/*
* POSIX 1003.1b -- munlockall unlocks all regions,
* regardless of how many times mlockall has been called.
- *
- * Note, uvm_fault_unwire() (called via uvm_map_entry_unwire())
- * does not lock the map, so we don't have to do anything
- * special regarding locking here.
*/
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {