summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-03-09 14:20:53 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-03-09 14:20:53 +0000
commitc8e101baa1ab502f68cae77e98bb4fa04b0d9a71 (patch)
treea80b2d2a230964d20f283a88cb544442cedceb24
parentc9d635487708a1945207b34245ab48ad8fcff8d8 (diff)
More syncing to NetBSD.
Implements mincore(2), mlockall(2) and munlockall(2). mlockall and munlockall are disabled for the moment. The rest is mostly cosmetic.
-rw-r--r--sys/sys/mman.h11
-rw-r--r--sys/uvm/uvm_extern.h5
-rw-r--r--sys/uvm/uvm_fault.c11
-rw-r--r--sys/uvm/uvm_map.c300
-rw-r--r--sys/uvm/uvm_map_i.h6
-rw-r--r--sys/uvm/uvm_mmap.c204
-rw-r--r--sys/vm/vm_map.h30
7 files changed, 495 insertions, 72 deletions
diff --git a/sys/sys/mman.h b/sys/sys/mman.h
index 8df2d454681..e04ee0027a8 100644
--- a/sys/sys/mman.h
+++ b/sys/sys/mman.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: mman.h,v 1.7 1998/06/02 05:22:26 deraadt Exp $ */
+/* $OpenBSD: mman.h,v 1.8 2001/03/09 14:20:50 art Exp $ */
/* $NetBSD: mman.h,v 1.11 1995/03/26 20:24:23 jtc Exp $ */
/*-
@@ -89,6 +89,13 @@
#define MS_SYNC 0x02 /* perform synchronous writes */
#define MS_INVALIDATE 0x04 /* invalidate cached data */
+/*
+ * Flags to mlockall
+ */
+#define MCL_CURRENT 0x01 /* lock all pages currently mapped */
+#define MCL_FUTURE 0x02 /* lock all pages mapped in the future */
+
+
#ifndef _KERNEL
#include <sys/cdefs.h>
@@ -101,6 +108,8 @@ int munmap __P((void *, size_t));
int msync __P((void *, size_t, int));
int mlock __P((const void *, size_t));
int munlock __P((const void *, size_t));
+int mlockall __P((int));
+int munlockall __P((void));
int madvise __P((void *, size_t, int));
int mincore __P((void *, size_t, char *));
int minherit __P((void *, size_t, int));
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 742d3e63732..f6dd3d2aa24 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.9 2001/03/09 05:34:38 smart Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.27 1999/05/26 19:16:36 thorpej Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.10 2001/03/09 14:20:50 art Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.28 1999/06/15 23:27:47 thorpej Exp $ */
/*
*
@@ -325,6 +325,7 @@ int uvm_map __P((vm_map_t, vaddr_t *, vsize_t,
struct uvm_object *, vaddr_t, uvm_flag_t));
int uvm_map_pageable __P((vm_map_t, vaddr_t,
vaddr_t, boolean_t));
+int uvm_map_pageable_all __P((vm_map_t, int, vsize_t));
boolean_t uvm_map_checkprot __P((vm_map_t, vaddr_t,
vaddr_t, vm_prot_t));
int uvm_map_protect __P((vm_map_t, vaddr_t,
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 0834cd17eb2..4be4a10c3f7 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.c,v 1.7 2001/03/08 15:21:36 smart Exp $ */
-/* $NetBSD: uvm_fault.c,v 1.33 1999/06/04 23:38:41 thorpej Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.8 2001/03/09 14:20:51 art Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.35 1999/06/16 18:43:28 thorpej Exp $ */
/*
*
@@ -646,7 +646,7 @@ ReFault:
*/
enter_prot = ufi.entry->protection;
- wired = (ufi.entry->wired_count != 0) || (fault_type == VM_FAULT_WIRE);
+ wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE);
if (wired)
access_type = enter_prot; /* full access for wired */
@@ -846,7 +846,7 @@ ReFault:
VM_PAGE_TO_PHYS(anon->u.an_page),
(anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
enter_prot,
- (ufi.entry->wired_count != 0), 0);
+ VM_MAPENT_ISWIRED(ufi.entry), 0);
}
simple_unlock(&anon->an_lock);
}
@@ -1734,8 +1734,7 @@ uvm_fault_wire(map, start, end, access_type)
/*
* now fault it in page at a time. if the fault fails then we have
- * to undo what we have done. note that in uvm_fault VM_PROT_NONE
- * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
+ * to undo what we have done.
*/
for (va = start ; va < end ; va += PAGE_SIZE) {
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 6b6355e9207..f5e6bbc8c4b 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.8 2001/01/29 02:07:46 niklas Exp $ */
-/* $NetBSD: uvm_map.c,v 1.53 1999/06/07 16:31:42 thorpej Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.9 2001/03/09 14:20:51 art Exp $ */
+/* $NetBSD: uvm_map.c,v 1.56 1999/06/16 19:34:24 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -595,8 +595,8 @@ uvm_map(map, startp, size, uobj, uoffset, flags)
prev_entry->advice != advice)
goto step3;
- /* wired_count's must match (new area is unwired) */
- if (prev_entry->wired_count)
+ /* wiring status must match (new area is unwired) */
+ if (VM_MAPENT_ISWIRED(prev_entry))
goto step3;
/*
@@ -987,7 +987,7 @@ uvm_unmap_remove(map, start, end, entry_list)
* unwiring will put the entries back into the pmap (XXX).
*/
- if (entry->wired_count)
+ if (VM_MAPENT_ISWIRED(entry))
uvm_map_entry_unwire(map, entry);
/*
@@ -1112,7 +1112,7 @@ uvm_unmap_detach(first_entry, amap_unref_flags)
* sanity check
*/
/* was part of vm_map_entry_delete() */
- if (first_entry->wired_count)
+ if (VM_MAPENT_ISWIRED(first_entry))
panic("unmap: still wired!");
#endif
@@ -1974,6 +1974,8 @@ uvm_map_advice(map, start, end, new_advice)
/*
* uvm_map_pageable: sets the pageability of a range in a map.
*
+ * => wires map entries. should not be used for transient page locking.
+ * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
* => regions sepcified as not pageable require lock-down (wired) memory
* and page tables.
* => map must not be locked.
@@ -1986,8 +1988,7 @@ uvm_map_pageable(map, start, end, new_pageable)
vaddr_t start, end;
boolean_t new_pageable;
{
- vm_map_entry_t entry, start_entry;
- vaddr_t failed = 0;
+ vm_map_entry_t entry, start_entry, failed_entry;
int rv;
UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
@@ -2021,20 +2022,17 @@ uvm_map_pageable(map, start, end, new_pageable)
* handle wiring and unwiring separately.
*/
- if (new_pageable) { /* unwire */
-
+ if (new_pageable) { /* unwire */
UVM_MAP_CLIP_START(map, entry, start);
-
/*
* unwiring. first ensure that the range to be unwired is
* really wired down and that there are no holes.
*/
while ((entry != &map->header) && (entry->start < end)) {
-
if (entry->wired_count == 0 ||
(entry->end < end &&
- (entry->next == &map->header ||
- entry->next->start > entry->end))) {
+ (entry->next == &map->header ||
+ entry->next->start > entry->end))) {
vm_map_unlock(map);
UVMHIST_LOG(maphist,
"<- done (INVALID UNWIRE ARG)",0,0,0,0);
@@ -2044,23 +2042,19 @@ uvm_map_pageable(map, start, end, new_pageable)
}
/*
- * now decrement the wiring count for each region. if a region
- * becomes completely unwired, unwire its physical pages and
- * mappings.
+ * POSIX 1003.1b - a single munlock call unlocks a region,
+ * regardless of the number of mlock calls made on that
+ * region.
*
* Note, uvm_fault_unwire() (called via uvm_map_entry_unwire())
* does not lock the map, so we don't have to do anything
* special regarding locking here.
*/
-
entry = start_entry;
while ((entry != &map->header) && (entry->start < end)) {
UVM_MAP_CLIP_END(map, entry, end);
-
- entry->wired_count--;
- if (entry->wired_count == 0)
+ if (VM_MAPENT_ISWIRED(entry))
uvm_map_entry_unwire(map, entry);
-
entry = entry->next;
}
vm_map_unlock(map);
@@ -2080,7 +2074,7 @@ uvm_map_pageable(map, start, end, new_pageable)
* be wired and increment its wiring count.
*
* 2: we downgrade to a read lock, and call uvm_fault_wire to fault
- * in the pages for any newly wired area (wired_count is 1).
+ * in the pages for any newly wired area (wired_count == 1).
*
* downgrading to a read lock for uvm_fault_wire avoids a possible
* deadlock with another thread that may have faulted on one of
@@ -2094,9 +2088,7 @@ uvm_map_pageable(map, start, end, new_pageable)
*/
while ((entry != &map->header) && (entry->start < end)) {
-
- if (entry->wired_count == 0) { /* not already wired? */
-
+ if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
/*
* perform actions of vm_map_lookup that need the
* write lock on the map: create an anonymous map
@@ -2104,22 +2096,16 @@ uvm_map_pageable(map, start, end, new_pageable)
* for a zero-fill region. (XXXCDC: submap case
* ok?)
*/
-
if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
- /*
- * XXXCDC: protection vs. max_protection??
- * (wirefault uses max?)
- * XXXCDC: used to do it always if
- * uvm_obj == NULL (wrong?)
- */
- if ( UVM_ET_ISNEEDSCOPY(entry) &&
- (entry->protection & VM_PROT_WRITE) != 0) {
+ if (UVM_ET_ISNEEDSCOPY(entry) &&
+ ((entry->protection & VM_PROT_WRITE) ||
+ (entry->object.uvm_obj == NULL))) {
amap_copy(map, entry, M_WAITOK, TRUE,
start, end);
/* XXXCDC: wait OK? */
}
}
- } /* wired_count == 0 */
+ }
UVM_MAP_CLIP_START(map, entry, start);
UVM_MAP_CLIP_END(map, entry, end);
entry->wired_count++;
@@ -2127,8 +2113,10 @@ uvm_map_pageable(map, start, end, new_pageable)
/*
* Check for holes
*/
- if (entry->end < end && (entry->next == &map->header ||
- entry->next->start > entry->end)) {
+ if (entry->protection == VM_PROT_NONE ||
+ (entry->end < end &&
+ (entry->next == &map->header ||
+ entry->next->start > entry->end))) {
/*
* found one. amap creation actions do not need to
* be undone, but the wired counts need to be restored.
@@ -2178,16 +2166,24 @@ uvm_map_pageable(map, start, end, new_pageable)
* first drop the wiring count on all the entries
* which haven't actually been wired yet.
*/
- failed = entry->start;
- while (entry != &map->header && entry->start < end)
+ failed_entry = entry;
+ while (entry != &map->header && entry->start < end) {
entry->wired_count--;
+ entry = entry->next;
+ }
/*
- * now, unlock the map, and unwire all the pages that
- * were successfully wired above.
+ * now, unwire all the entries that were successfully
+ * wired above.
*/
+ entry = start_entry;
+ while (entry != failed_entry) {
+ entry->wired_count--;
+ if (VM_MAPENT_ISWIRED(entry) == 0)
+ uvm_map_entry_unwire(map, entry);
+ entry = entry->next;
+ }
vm_map_unlock(map);
- (void) uvm_map_pageable(map, start, failed, TRUE);
UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
return(rv);
}
@@ -2200,6 +2196,212 @@ uvm_map_pageable(map, start, end, new_pageable)
}
/*
+ * uvm_map_pageable_all: special case of uvm_map_pageable - affects
+ * all mapped regions.
+ *
+ * => map must not be locked.
+ * => if no flags are specified, all regions are unwired.
+ * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
+ */
+
+int
+uvm_map_pageable_all(map, flags, limit)
+ vm_map_t map;
+ int flags;
+ vsize_t limit;
+{
+ vm_map_entry_t entry, failed_entry;
+ vsize_t size;
+ int rv;
+ UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
+ UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
+
+#ifdef DIAGNOSTIC
+ if ((map->flags & VM_MAP_PAGEABLE) == 0)
+ panic("uvm_map_pageable_all: map %p not pageable", map);
+#endif
+
+ vm_map_lock(map);
+
+ /*
+ * handle wiring and unwiring separately.
+ */
+
+ if (flags == 0) { /* unwire */
+ /*
+ * POSIX 1003.1b -- munlockall unlocks all regions,
+ * regardless of how many times mlockall has been called.
+ *
+ * Note, uvm_fault_unwire() (called via uvm_map_entry_unwire())
+ * does not lock the map, so we don't have to do anything
+ * special regarding locking here.
+ */
+ for (entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ if (VM_MAPENT_ISWIRED(entry))
+ uvm_map_entry_unwire(map, entry);
+ }
+ map->flags &= ~VM_MAP_WIREFUTURE;
+ vm_map_unlock(map);
+ UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
+ return (KERN_SUCCESS);
+
+ /*
+ * end of unwire case!
+ */
+ }
+
+ if (flags & MCL_FUTURE) {
+ /*
+ * must wire all future mappings; remember this.
+ */
+ map->flags |= VM_MAP_WIREFUTURE;
+ }
+
+ if ((flags & MCL_CURRENT) == 0) {
+ /*
+ * no more work to do!
+ */
+ UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
+ vm_map_unlock(map);
+ return (KERN_SUCCESS);
+ }
+
+ /*
+ * wire case: in three passes [XXXCDC: ugly block of code here]
+ *
+ * 1: holding the write lock, count all pages mapped by non-wired
+ * entries. if this would cause us to go over our limit, we fail.
+ *
+ * 2: still holding the write lock, we create any anonymous maps that
+ * need to be created. then we increment its wiring count.
+ *
+ * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
+ * in the pages for any newly wired area (wired_count == 1).
+ *
+ * downgrading to a read lock for uvm_fault_wire avoids a possible
+ * deadlock with another thread that may have faulted on one of
+ * the pages to be wired (it would mark the page busy, blocking
+ * us, then in turn block on the map lock that we hold). because
+ * of problems in the recursive lock package, we cannot upgrade
+ * to a write lock in vm_map_lookup. thus, any actions that
+ * require the write lock must be done beforehand. because we
+ * keep the read lock on the map, the copy-on-write status of the
+ * entries we modify here cannot change.
+ */
+
+ for (size = 0, entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ if (entry->protection != VM_PROT_NONE &&
+ VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
+ size += entry->end - entry->start;
+ }
+ }
+
+ if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE); /* XXX overloaded */
+ }
+
+ /* XXX non-pmap_wired_count case must be handled by caller */
+#ifdef pmap_wired_count
+ if (limit != 0 &&
+ (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE); /* XXX overloaded */
+ }
+#endif
+
+ /*
+ * Pass 2.
+ */
+
+ for (entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ if (entry->protection == VM_PROT_NONE)
+ continue;
+ if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
+ /*
+ * perform actions of vm_map_lookup that need the
+ * write lock on the map: create an anonymous map
+ * for a copy-on-write region, or an anonymous map
+ * for a zero-fill region. (XXXCDC: submap case
+ * ok?)
+ */
+ if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
+ if (UVM_ET_ISNEEDSCOPY(entry) &&
+ ((entry->protection & VM_PROT_WRITE) ||
+ (entry->object.uvm_obj == NULL))) {
+ amap_copy(map, entry, M_WAITOK, TRUE,
+ entry->start, entry->end);
+ /* XXXCDC: wait OK? */
+ }
+ }
+ }
+ entry->wired_count++;
+ }
+
+ /*
+ * Pass 3.
+ */
+
+ vm_map_downgrade(map);
+
+ rv = KERN_SUCCESS;
+ for (entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ if (entry->wired_count == 1) {
+ rv = uvm_fault_wire(map, entry->start, entry->end,
+ entry->protection);
+ if (rv) {
+ /*
+ * wiring failed. break out of the loop.
+ * we'll clean up the map below, once we
+ * have a write lock again.
+ */
+ break;
+ }
+ }
+ }
+
+ if (rv) { /* failed? */
+ /*
+ * Get back an exclusive (write) lock.
+ */
+ vm_map_upgrade(map);
+
+ /*
+ * first drop the wiring count on all the entries
+ * which haven't actually been wired yet.
+ */
+ failed_entry = entry;
+ for (/* nothing */; entry != &map->header;
+ entry = entry->next)
+ entry->wired_count--;
+
+ /*
+ * now, unwire all the entries that were successfully
+ * wired above.
+ */
+ for (entry = map->header.next; entry != failed_entry;
+ entry = entry->next) {
+ entry->wired_count--;
+ if (VM_MAPENT_ISWIRED(entry) == 0)
+ uvm_map_entry_unwire(map, entry);
+ }
+ vm_map_unlock(map);
+ UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
+ return (rv);
+ }
+
+ /* We are holding a read lock here. */
+ vm_map_unlock_read(map);
+
+ UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
+ return (KERN_SUCCESS);
+}
+
+/*
* uvm_map_clean: push dirty pages off to backing store.
*
* => valid flags:
@@ -2477,6 +2679,14 @@ uvmspace_exec(p)
#endif
/*
+ * POSIX 1003.1b -- "lock future mappings" is revoked
+ * when a process execs another program image.
+ */
+ vm_map_lock(map);
+ map->flags &= ~VM_MAP_WIREFUTURE;
+ vm_map_unlock(map);
+
+ /*
* now unmap the old program
*/
uvm_unmap(map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
@@ -2750,7 +2960,7 @@ uvmspace_fork(vm1)
if ((amap_flags(old_entry->aref.ar_amap) &
AMAP_SHARED) != 0 ||
- old_entry->wired_count != 0) {
+ VM_MAPENT_ISWIRED(old_entry)) {
amap_copy(new_map, new_entry, M_WAITOK, FALSE,
0, 0);
@@ -2769,7 +2979,7 @@ uvmspace_fork(vm1)
* allocated any needed amap (above).
*/
- if (old_entry->wired_count != 0) {
+ if (VM_MAPENT_ISWIRED(old_entry)) {
/*
* resolve all copy-on-write faults now
diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h
index 4f04761b726..f2849603770 100644
--- a/sys/uvm/uvm_map_i.h
+++ b/sys/uvm/uvm_map_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map_i.h,v 1.5 2001/01/29 02:07:46 niklas Exp $ */
-/* $NetBSD: uvm_map_i.h,v 1.14 1999/06/04 23:38:42 thorpej Exp $ */
+/* $OpenBSD: uvm_map_i.h,v 1.6 2001/03/09 14:20:51 art Exp $ */
+/* $NetBSD: uvm_map_i.h,v 1.15 1999/06/14 22:05:23 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -124,7 +124,7 @@ uvm_map_setup(map, min, max, flags)
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
- lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
+ lockinit(&map->lock, PVM, "vmmaplk", 0, 0);
simple_lock_init(&map->ref_lock);
simple_lock_init(&map->hint_lock);
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 7c6dbadca89..586631d6d10 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_mmap.c,v 1.6 2001/01/29 02:07:46 niklas Exp $ */
-/* $NetBSD: uvm_mmap.c,v 1.21 1999/05/23 06:27:13 mrg Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.7 2001/03/09 14:20:52 art Exp $ */
+/* $NetBSD: uvm_mmap.c,v 1.23 1999/06/16 17:25:39 minoura Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -131,15 +131,140 @@ sys_mincore(p, v, retval)
void *v;
register_t *retval;
{
-#if 0
struct sys_mincore_args /* {
- syscallarg(caddr_t) addr;
+ syscallarg(void *) addr;
syscallarg(size_t) len;
syscallarg(char *) vec;
} */ *uap = v;
+ vm_page_t m;
+ char *vec, pgi;
+ struct uvm_object *uobj;
+ struct vm_amap *amap;
+ struct vm_anon *anon;
+ vm_map_entry_t entry;
+ vaddr_t start, end, lim;
+ vm_map_t map;
+ vsize_t len;
+ int error = 0, npgs;
+
+ map = &p->p_vmspace->vm_map;
+
+ start = (vaddr_t)SCARG(uap, addr);
+ len = SCARG(uap, len);
+ vec = SCARG(uap, vec);
+
+ if (start & PAGE_MASK)
+ return (EINVAL);
+ len = round_page(len);
+ end = start + len;
+ if (end <= start)
+ return (EINVAL);
+
+ npgs = len >> PAGE_SHIFT;
+
+ if (uvm_useracc(vec, npgs, B_WRITE) == FALSE)
+ return (EFAULT);
+
+ /*
+ * Lock down vec, so our returned status isn't outdated by
+ * storing the status byte for a page.
+ */
+ uvm_vslock(p, vec, npgs, VM_PROT_WRITE);
+
+ vm_map_lock_read(map);
+
+ if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ for (/* nothing */;
+ entry != &map->header && entry->start < end;
+ entry = entry->next) {
+#ifdef DIAGNOSTIC
+ if (UVM_ET_ISSUBMAP(entry))
+ panic("mincore: user map has submap");
+ if (start < entry->start)
+ panic("mincore: hole");
#endif
+ /* Make sure there are no holes. */
+ if (entry->end < end &&
+ (entry->next == &map->header ||
+ entry->next->start > entry->end)) {
+ error = ENOMEM;
+ goto out;
+ }
- return (ENOSYS);
+ lim = end < entry->end ? end : entry->end;
+
+ /*
+ * Special case for mapped devices; these are always
+ * considered resident.
+ */
+ if (UVM_ET_ISOBJ(entry)) {
+ extern struct uvm_pagerops uvm_deviceops; /* XXX */
+#ifdef DIAGNOSTIC
+ if (UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj))
+ panic("mincore: user map has kernel object");
+#endif
+ if (entry->object.uvm_obj->pgops == &uvm_deviceops) {
+ for (/* nothing */; start < lim;
+ start += PAGE_SIZE, vec++)
+ subyte(vec, 1);
+ continue;
+ }
+ }
+
+ uobj = entry->object.uvm_obj; /* top layer */
+ amap = entry->aref.ar_amap; /* bottom layer */
+
+ if (amap != NULL)
+ amap_lock(amap);
+ if (uobj != NULL)
+ simple_lock(&uobj->vmobjlock);
+
+ for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) {
+ pgi = 0;
+ if (amap != NULL) {
+ /* Check the top layer first. */
+ anon = amap_lookup(&entry->aref,
+ start - entry->start);
+ /* Don't need to lock anon here. */
+ if (anon != NULL && anon->u.an_page != NULL) {
+ /*
+ * Anon has the page for this entry
+ * offset.
+ */
+ pgi = 1;
+ }
+ }
+
+ if (uobj != NULL && pgi == 0) {
+ /* Check the bottom layer. */
+ m = uvm_pagelookup(uobj,
+ entry->offset + (start - entry->start));
+ if (m != NULL) {
+ /*
+ * Object has the page for this entry
+ * offset.
+ */
+ pgi = 1;
+ }
+ }
+
+ (void) subyte(vec, pgi);
+ }
+
+ if (uobj != NULL)
+ simple_unlock(&obj->vmobjlock);
+ if (amap != NULL)
+ amap_unlock(amap);
+ }
+
+ out:
+ vm_map_unlock_read(map);
+ uvm_vsunlock(p, SCARG(uap, vec), npgs);
+ return (error);
}
#if 0
@@ -817,6 +942,75 @@ sys_munlock(p, v, retval)
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
+#ifdef notyet
+/*
+ * sys_mlockall: lock all pages mapped into an address space.
+ */
+
+int
+sys_mlockall(p, v, retval)
+ struct proc *p;
+ void *v;
+ register_t *retval;
+{
+ struct sys_mlockall_args /* {
+ syscallarg(int) flags;
+ } */ *uap = v;
+ vsize_t limit;
+ int error, flags;
+
+ flags = SCARG(uap, flags);
+
+ if (flags == 0 ||
+ (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
+ return (EINVAL);
+
+#ifdef pmap_wired_count
+ /* Actually checked in uvm_map_pageable_all() */
+ limit = p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur;
+#else
+ limit = 0;
+ if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
+ return (error);
+#endif
+
+ error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags, limit);
+ switch (error) {
+ case KERN_SUCCESS:
+ error = 0;
+ break;
+
+ case KERN_NO_SPACE: /* XXX overloaded */
+ error = ENOMEM;
+ break;
+
+ default:
+ /*
+ * "Some or all of the memory could not be locked when
+ * the call was made."
+ */
+ error = EAGAIN;
+ }
+
+ return (error);
+}
+
+/*
+ * sys_munlockall: unlock all pages mapped into an address space.
+ */
+
+int
+sys_munlockall(p, v, retval)
+ struct proc *p;
+ void *v;
+ register_t *retval;
+{
+
+ (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
+ return (0);
+}
+#endif
+
/*
* uvm_mmap: internal version of mmap
*
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 38f06bd7173..9878db88895 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_map.h,v 1.11 2000/03/16 22:11:05 art Exp $ */
+/* $OpenBSD: vm_map.h,v 1.12 2001/03/09 14:20:52 art Exp $ */
/* $NetBSD: vm_map.h,v 1.11 1995/03/26 20:39:10 jtc Exp $ */
/*
@@ -130,7 +130,7 @@ struct vm_map_entry {
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
- int wired_count; /* can be paged if = 0 */
+ int wired_count; /* can be paged if == 0 */
#ifdef UVM
struct vm_aref aref; /* anonymous overlay */
int advice; /* madvise advice */
@@ -142,6 +142,8 @@ struct vm_map_entry {
#endif /* UVM */
};
+#define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
+
/*
* Maps are doubly-linked lists of map entries, kept sorted
* by address. A single hint is provided to start
@@ -163,7 +165,14 @@ struct vm_map {
simple_lock_data_t hint_lock; /* lock for hint storage */
vm_map_entry_t first_free; /* First free space hint */
#ifdef UVM
- int flags; /* flags (read-only) */
+ /*
+ * Locking note: read-only flags need not be locked to read
+ * them; they are set once at map creation time, and never
+ * changed again. Only read-write flags require that the
+ * appropriate map lock be acquired before reading or writing
+ * the flag.
+ */
+ int flags; /* flags */
#else
boolean_t entries_pageable; /* map entries pageable?? */
#endif
@@ -174,8 +183,9 @@ struct vm_map {
#ifdef UVM
/* vm_map flags */
-#define VM_MAP_PAGEABLE 0x01 /* entries are pageable*/
-#define VM_MAP_INTRSAFE 0x02 /* interrupt safe map */
+#define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable*/
+#define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
+#define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
/*
* Interrupt-safe maps must also be kept on a special list,
* to assist uvm_fault() in avoiding locking problems.
@@ -336,23 +346,23 @@ do { \
}
#ifdef DIAGNOSTIC
#define vm_map_lock(map) { \
- if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
+ if (lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curproc) != 0) { \
panic("vm_map_lock: failed to get lock"); \
} \
(map)->timestamp++; \
}
#else
#define vm_map_lock(map) { \
- lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc); \
+ lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curproc); \
(map)->timestamp++; \
}
#endif /* DIAGNOSTIC */
#define vm_map_unlock(map) \
- lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
+ lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc)
#define vm_map_lock_read(map) \
- lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc)
+ lockmgr(&(map)->lock, LK_SHARED, NULL, curproc)
#define vm_map_unlock_read(map) \
- lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
+ lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc)
#define vm_map_set_recursive(map) { \
simple_lock(&(map)->lk_interlock); \
(map)->lk_flags |= LK_CANRECURSE; \