summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/uvm/uvm.h17
-rw-r--r--sys/uvm/uvm_anon.c6
-rw-r--r--sys/uvm/uvm_aobj.c213
-rw-r--r--sys/uvm/uvm_extern.h15
-rw-r--r--sys/uvm/uvm_fault.c10
-rw-r--r--sys/uvm/uvm_glue.c31
-rw-r--r--sys/uvm/uvm_km.c242
-rw-r--r--sys/uvm/uvm_map.c118
-rw-r--r--sys/uvm/uvm_meter.c4
-rw-r--r--sys/uvm/uvm_mmap.c28
-rw-r--r--sys/uvm/uvm_vnode.c4
11 files changed, 374 insertions, 314 deletions
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h
index 40b9f292229..e5410a9e52c 100644
--- a/sys/uvm/uvm.h
+++ b/sys/uvm/uvm.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm.h,v 1.9 2001/04/10 06:59:12 niklas Exp $ */
-/* $NetBSD: uvm.h,v 1.16 1999/06/21 17:25:11 thorpej Exp $ */
+/* $OpenBSD: uvm.h,v 1.10 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm.h,v 1.17 1999/07/22 22:58:38 thorpej Exp $ */
/*
*
@@ -147,16 +147,17 @@ UVMHIST_DECL(pdhist);
/*
* UVM_UNLOCK_AND_WAIT: atomic unlock+wait... front end for the
- * (poorly named) thread_sleep_msg function.
+ * uvm_sleep() function.
*/
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-#define UVM_UNLOCK_AND_WAIT(event,lock,intr,msg, timo) \
- thread_sleep_msg(event,lock,intr,msg, timo)
+#define UVM_UNLOCK_AND_WAIT(event, lock, intr ,msg, timo) \
+ uvm_sleep(event, lock, intr, msg, timo)
+
#else
-#define UVM_UNLOCK_AND_WAIT(event,lock,intr,msg, timo) \
- thread_sleep_msg(event,NULL,intr,msg, timo)
-#endif
+#define UVM_UNLOCK_AND_WAIT(event, lock, intr, msg, timo) \
+ uvm_sleep(event, NULL, intr, msg, timo)
+#endif /* MULTIPROCESSOR || LOCKDEBUG */
/*
* UVM_PAGE_OWN: track page ownership (only if UVM_PAGE_TRKOWN)
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index da7733d32d1..b393d701ddb 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_anon.c,v 1.6 2001/01/29 02:07:42 niklas Exp $ */
-/* $NetBSD: uvm_anon.c,v 1.2 1999/03/26 17:34:15 chs Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.7 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm_anon.c,v 1.3 1999/08/14 06:25:48 ross Exp $ */
/*
*
@@ -76,6 +76,7 @@ uvm_anon_init()
for (lcv = 0 ; lcv < nanon ; lcv++) {
anon[lcv].u.an_nxt = uvm.afree;
uvm.afree = &anon[lcv];
+ simple_lock_init(&uvm.afree->an_lock);
}
simple_lock_init(&uvm.afreelock);
}
@@ -108,6 +109,7 @@ uvm_anon_add(pages)
simple_lock_init(&anon->an_lock);
anon[lcv].u.an_nxt = uvm.afree;
uvm.afree = &anon[lcv];
+ simple_lock_init(&uvm.afree->an_lock);
}
simple_unlock(&uvm.afreelock);
}
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 34341c5361b..84e8a3539f5 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_aobj.c,v 1.9 2001/03/22 03:05:54 smart Exp $ */
-/* $NetBSD: uvm_aobj.c,v 1.21 1999/07/07 05:32:26 thorpej Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.10 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.25 1999/08/21 02:19:05 thorpej Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -705,28 +705,211 @@ uao_detach(uobj)
}
/*
- * uao_flush: uh, yea, sure it's flushed. really!
+ * uao_flush: "flush" pages out of a uvm object
+ *
+ * => object should be locked by caller. we may _unlock_ the object
+ * if (and only if) we need to clean a page (PGO_CLEANIT).
+ * XXXJRT Currently, however, we don't. In the case of cleaning
+ * XXXJRT a page, we simply just deactivate it. Should probably
+ * XXXJRT handle this better, in the future (although "flushing"
+ * XXXJRT anonymous memory isn't terribly important).
+ * => if PGO_CLEANIT is not set, then we will neither unlock the object
+ * or block.
+ * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
+ * for flushing.
+ * => NOTE: we rely on the fact that the object's memq is a TAILQ and
+ * that new pages are inserted on the tail end of the list. thus,
+ * we can make a complete pass through the object in one go by starting
+ * at the head and working towards the tail (new pages are put in
+ * front of us).
+ * => NOTE: we are allowed to lock the page queues, so the caller
+ * must not be holding the lock on them [e.g. pagedaemon had
+ * better not call us with the queues locked]
+ * => we return TRUE unless we encountered some sort of I/O error
+ * XXXJRT currently never happens, as we never directly initiate
+ * XXXJRT I/O
+ *
+ * comment on "cleaning" object and PG_BUSY pages:
+ * this routine is holding the lock on the object. the only time
+ * that is can run into a PG_BUSY page that it does not own is if
+ * some other process has started I/O on the page (e.g. either
+ * a pagein or a pageout). if the PG_BUSY page is being paged
+ * in, then it can not be dirty (!PG_CLEAN) because no one has
+ * had a change to modify it yet. if the PG_BUSY page is being
+ * paged out then it means that someone else has already started
+ * cleaning the page for us (how nice!). in this case, if we
+ * have syncio specified, then after we make our pass through the
+ * object we need to wait for the other PG_BUSY pages to clear
+ * off (i.e. we need to do an iosync). also note that once a
+ * page is PG_BUSY is must stary in its object until it is un-busyed.
+ * XXXJRT We never actually do this, as we are "flushing" anonymous
+ * XXXJRT memory, which doesn't have persistent backing store.
+ *
+ * note on page traversal:
+ * we can traverse the pages in an object either by going down the
+ * linked list in "uobj->memq", or we can go over the address range
+ * by page doing hash table lookups for each address. depending
+ * on how many pages are in the object it may be cheaper to do one
+ * or the other. we set "by_list" to true if we are using memq.
+ * if the cost of a hash lookup was equal to the cost of the list
+ * traversal we could compare the number of pages in the start->stop
+ * range to the total number of pages in the object. however, it
+ * seems that a hash table lookup is more expensive than the linked
+ * list traversal, so we multiply the number of pages in the
+ * start->stop range by a penalty which we define below.
*/
+
+#define UAO_HASH_PENALTY 4 /* XXX: a guess */
+
boolean_t
-uao_flush(uobj, start, end, flags)
+uao_flush(uobj, start, stop, flags)
struct uvm_object *uobj;
- vaddr_t start, end;
+ vaddr_t start, stop;
int flags;
{
+ struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
+ struct vm_page *pp, *ppnext;
+ boolean_t retval, by_list;
+ vaddr_t curoff;
+ UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
+
+ curoff = 0; /* XXX: shut up gcc */
+
+ retval = TRUE; /* default to success */
+
+ if (flags & PGO_ALLPAGES) {
+ start = 0;
+ stop = aobj->u_pages << PAGE_SHIFT;
+ by_list = TRUE; /* always go by the list */
+ } else {
+ start = trunc_page(start);
+ stop = round_page(stop);
+ if (stop > (aobj->u_pages << PAGE_SHIFT)) {
+ printf("uao_flush: strange, got an out of range "
+ "flush (fixed)\n");
+ stop = aobj->u_pages << PAGE_SHIFT;
+ }
+ by_list = (uobj->uo_npages <=
+ ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
+ }
+
+ UVMHIST_LOG(maphist,
+ " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
+ start, stop, by_list, flags);
/*
- * anonymous memory doesn't "flush"
- */
+ * Don't need to do any work here if we're not freeing
+ * or deactivating pages.
+ */
+ if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
+ UVMHIST_LOG(maphist,
+ "<- done (no work to do)",0,0,0,0);
+ return (retval);
+ }
+
/*
- * XXX
- * Deal with:
- *
- * PGO_DEACTIVATE for sequential access, via uvm_fault(), and
- * for MADV_DONTNEED
- *
- * PGO_FREE for MADV_FREE and MSINVALIDATE
+ * now do it. note: we must update ppnext in the body of loop or we
+ * will get stuck. we need to use ppnext because we may free "pp"
+ * before doing the next loop.
*/
- return TRUE;
+
+ if (by_list) {
+ pp = uobj->memq.tqh_first;
+ } else {
+ curoff = start;
+ pp = uvm_pagelookup(uobj, curoff);
+ }
+
+ ppnext = NULL; /* XXX: shut up gcc */
+ uvm_lock_pageq(); /* page queues locked */
+
+ /* locked: both page queues and uobj */
+ for ( ; (by_list && pp != NULL) ||
+ (!by_list && curoff < stop) ; pp = ppnext) {
+ if (by_list) {
+ ppnext = pp->listq.tqe_next;
+
+ /* range check */
+ if (pp->offset < start || pp->offset >= stop)
+ continue;
+ } else {
+ curoff += PAGE_SIZE;
+ if (curoff < stop)
+ ppnext = uvm_pagelookup(uobj, curoff);
+
+ /* null check */
+ if (pp == NULL)
+ continue;
+ }
+
+ switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
+ /*
+ * XXX In these first 3 cases, we always just
+ * XXX deactivate the page. We may want to
+ * XXX handle the different cases more specifically
+ * XXX in the future.
+ */
+ case PGO_CLEANIT|PGO_FREE:
+ case PGO_CLEANIT|PGO_DEACTIVATE:
+ case PGO_DEACTIVATE:
+ deactivate_it:
+ /* skip the page if it's loaned or wired */
+ if (pp->loan_count != 0 ||
+ pp->wire_count != 0)
+ continue;
+
+ /* zap all mappings for the page. */
+ pmap_page_protect(PMAP_PGARG(pp),
+ VM_PROT_NONE);
+
+ /* ...and deactivate the page. */
+ uvm_pagedeactivate(pp);
+
+ continue;
+
+ case PGO_FREE:
+ /*
+ * If there are multiple references to
+ * the object, just deactivate the page.
+ */
+ if (uobj->uo_refs > 1)
+ goto deactivate_it;
+
+ /* XXX skip the page if it's loaned or wired */
+ if (pp->loan_count != 0 ||
+ pp->wire_count != 0)
+ continue;
+
+ /*
+ * mark the page as released if its busy.
+ */
+ if (pp->flags & PG_BUSY) {
+ pp->flags |= PG_RELEASED;
+ continue;
+ }
+
+ /* zap all mappings for the page. */
+ pmap_page_protect(PMAP_PGARG(pp),
+ VM_PROT_NONE);
+
+ uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
+ uvm_pagefree(pp);
+
+ continue;
+
+ default:
+ panic("uao_flush: weird flags");
+ }
+#ifdef DIAGNOSTIC
+ panic("uao_flush: unreachable code");
+#endif
+ }
+
+ uvm_unlock_pageq();
+
+ UVMHIST_LOG(maphist,
+ "<- done, rv=%d",retval,0,0,0);
+ return (retval);
}
/*
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index bcfd4060cd3..5491dd744c8 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.12 2001/05/10 07:59:06 art Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.32 1999/07/02 23:20:58 thorpej Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.13 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.34 1999/07/22 22:58:38 thorpej Exp $ */
/*
*
@@ -136,6 +136,12 @@
#define UVM_PGA_USERESERVE 0x0001
/*
+ * lockflags that control the locking behavior of various functions.
+ */
+#define UVM_LK_ENTER 0x00000001 /* map locked on entry */
+#define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */
+
+/*
* structures
*/
@@ -150,6 +156,7 @@ struct vm_anon;
struct vmspace;
struct pmap;
struct vnode;
+struct simplelock;
/*
* uvmexp: global data structures that are exported to parts of the kernel
@@ -282,6 +289,8 @@ int uvm_fault __P((vm_map_t, vaddr_t,
#if defined(KGDB)
void uvm_chgkprot __P((caddr_t, size_t, int));
#endif
+void uvm_sleep __P((void *, struct simplelock *, boolean_t,
+ const char *, int));
void uvm_fork __P((struct proc *, struct proc *, boolean_t,
void *, size_t));
void uvm_exit __P((struct proc *));
@@ -360,7 +369,7 @@ struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
#define uvm_pagealloc(obj, off, anon, flags) \
uvm_pagealloc_strat((obj), (off), (anon), (flags), \
UVM_PGA_STRAT_NORMAL, 0)
-vaddr_t uvm_pagealloc_contig __P((vaddr_t, vaddr_t,
+vaddr_t uvm_pagealloc_contig __P((vaddr_t, vaddr_t,
vaddr_t, vaddr_t));
void uvm_pagerealloc __P((struct vm_page *,
struct uvm_object *, vaddr_t));
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index dfbff5be96a..2db0ebe05e1 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.c,v 1.13 2001/06/08 08:09:38 art Exp $ */
-/* $NetBSD: uvm_fault.c,v 1.40 1999/07/08 18:11:03 thorpej Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.14 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.44 1999/07/22 22:58:38 thorpej Exp $ */
/*
*
@@ -724,7 +724,7 @@ ReFault:
npages = nback + nforw + 1;
centeridx = nback;
- narrow = FALSE; /* ensure only once per-fault */
+ narrow = TRUE; /* ensure only once per-fault */
} else {
@@ -843,8 +843,8 @@ ReFault:
uvmexp.fltnamap++;
pmap_enter(ufi.orig_map->pmap, currva,
VM_PAGE_TO_PHYS(anon->u.an_page),
- (anon->an_ref > 1) ?
- (enter_prot & ~VM_PROT_WRITE) : enter_prot,
+ (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
+ enter_prot,
VM_MAPENT_ISWIRED(ufi.entry), 0);
}
simple_unlock(&anon->an_lock);
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index 6d0d2409b83..03560f5ea94 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_glue.c,v 1.15 2001/06/08 08:09:39 art Exp $ */
-/* $NetBSD: uvm_glue.c,v 1.27 1999/07/08 18:11:03 thorpej Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.16 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.29 1999/07/25 06:30:36 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -107,6 +107,31 @@ int readbuffers = 0; /* allow KGDB to read kern buffer pool */
/*
+ * uvm_sleep: atomic unlock and sleep for UVM_UNLOCK_AND_WAIT().
+ */
+
+void
+uvm_sleep(event, slock, canintr, msg, timo)
+ void *event;
+ struct simplelock *slock;
+ boolean_t canintr;
+ const char *msg;
+ int timo;
+{
+ int s, pri;
+
+ pri = PVM;
+ if (canintr)
+ pri |= PCATCH;
+
+ s = splhigh();
+ if (slock != NULL)
+ simple_unlock(slock);
+ (void) tsleep(event, pri, (char *)msg, timo);
+ splx(s);
+}
+
+/*
* uvm_kernacc: can the kernel access a region of memory
*
* - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
@@ -298,7 +323,7 @@ uvm_fork(p1, p2, shared, stack, stacksize)
panic("uvm_fork: uvm_fault_wire failed: %d", rv);
/*
- * p_stats currently point at fields in the user struct. Copy
+ * p_stats currently points at a field in the user struct. Copy
* parts of p_stats, and zero out the rest.
*/
p2->p_stats = &up->u_stats;
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 956c913bed5..8841554be91 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.9 2001/03/22 03:05:55 smart Exp $ */
-/* $NetBSD: uvm_km.c,v 1.27 1999/06/04 23:38:41 thorpej Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.10 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm_km.c,v 1.31 1999/07/22 22:58:38 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -161,13 +161,6 @@ struct vmi_list vmi_list;
simple_lock_data_t vmi_list_slock;
/*
- * local functions
- */
-
-static int uvm_km_get __P((struct uvm_object *, vaddr_t,
- vm_page_t *, int *, int, vm_prot_t, int, int));
-
-/*
* local data structues
*/
@@ -175,233 +168,12 @@ static struct vm_map kernel_map_store;
static struct uvm_object kmem_object_store;
static struct uvm_object mb_object_store;
-static struct uvm_pagerops km_pager = {
- NULL, /* init */
- NULL, /* reference */
- NULL, /* detach */
- NULL, /* fault */
- NULL, /* flush */
- uvm_km_get, /* get */
- /* ... rest are NULL */
-};
-
/*
- * uvm_km_get: pager get function for kernel objects
- *
- * => currently we do not support pageout to the swap area, so this
- * pager is very simple. eventually we may want an anonymous
- * object pager which will do paging.
- * => XXXCDC: this pager should be phased out in favor of the aobj pager
+ * All pager operations here are NULL, but the object must have
+ * a pager ops vector associated with it; various places assume
+ * it to be so.
*/
-
-
-static int
-uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
- struct uvm_object *uobj;
- vaddr_t offset;
- struct vm_page **pps;
- int *npagesp;
- int centeridx, advice, flags;
- vm_prot_t access_type;
-{
- vaddr_t current_offset;
- vm_page_t ptmp;
- int lcv, gotpages, maxpages;
- boolean_t done;
- UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
-
- UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
-
- /*
- * get number of pages
- */
-
- maxpages = *npagesp;
-
- /*
- * step 1: handled the case where fault data structures are locked.
- */
-
- if (flags & PGO_LOCKED) {
-
- /*
- * step 1a: get pages that are already resident. only do
- * this if the data structures are locked (i.e. the first time
- * through).
- */
-
- done = TRUE; /* be optimistic */
- gotpages = 0; /* # of pages we got so far */
-
- for (lcv = 0, current_offset = offset ;
- lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
-
- /* do we care about this page? if not, skip it */
- if (pps[lcv] == PGO_DONTCARE)
- continue;
-
- /* lookup page */
- ptmp = uvm_pagelookup(uobj, current_offset);
-
- /* null? attempt to allocate the page */
- if (ptmp == NULL) {
- ptmp = uvm_pagealloc(uobj, current_offset,
- NULL, 0);
- if (ptmp) {
- /* new page */
- ptmp->flags &= ~(PG_BUSY|PG_FAKE);
- UVM_PAGE_OWN(ptmp, NULL);
- uvm_pagezero(ptmp);
- }
- }
-
- /*
- * to be useful must get a non-busy, non-released page
- */
- if (ptmp == NULL ||
- (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
- if (lcv == centeridx ||
- (flags & PGO_ALLPAGES) != 0)
- /* need to do a wait or I/O! */
- done = FALSE;
- continue;
- }
-
- /*
- * useful page: busy/lock it and plug it in our
- * result array
- */
-
- /* caller must un-busy this page */
- ptmp->flags |= PG_BUSY;
- UVM_PAGE_OWN(ptmp, "uvm_km_get1");
- pps[lcv] = ptmp;
- gotpages++;
-
- } /* "for" lcv loop */
-
- /*
- * step 1b: now we've either done everything needed or we
- * to unlock and do some waiting or I/O.
- */
-
- UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
-
- *npagesp = gotpages;
- if (done)
- return(VM_PAGER_OK); /* bingo! */
- else
- return(VM_PAGER_UNLOCK); /* EEK! Need to
- * unlock and I/O */
- }
-
- /*
- * step 2: get non-resident or busy pages.
- * object is locked. data structures are unlocked.
- */
-
- for (lcv = 0, current_offset = offset ;
- lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
-
- /* skip over pages we've already gotten or don't want */
- /* skip over pages we don't _have_ to get */
- if (pps[lcv] != NULL ||
- (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
- continue;
-
- /*
- * we have yet to locate the current page (pps[lcv]). we
- * first look for a page that is already at the current offset.
- * if we find a page, we check to see if it is busy or
- * released. if that is the case, then we sleep on the page
- * until it is no longer busy or released and repeat the
- * lookup. if the page we found is neither busy nor
- * released, then we busy it (so we own it) and plug it into
- * pps[lcv]. this 'break's the following while loop and
- * indicates we are ready to move on to the next page in the
- * "lcv" loop above.
- *
- * if we exit the while loop with pps[lcv] still set to NULL,
- * then it means that we allocated a new busy/fake/clean page
- * ptmp in the object and we need to do I/O to fill in the
- * data.
- */
-
- while (pps[lcv] == NULL) { /* top of "pps" while loop */
-
- /* look for a current page */
- ptmp = uvm_pagelookup(uobj, current_offset);
-
- /* nope? allocate one now (if we can) */
- if (ptmp == NULL) {
-
- ptmp = uvm_pagealloc(uobj, current_offset,
- NULL, 0);
-
- /* out of RAM? */
- if (ptmp == NULL) {
- simple_unlock(&uobj->vmobjlock);
- uvm_wait("kmgetwait1");
- simple_lock(&uobj->vmobjlock);
- /* goto top of pps while loop */
- continue;
- }
-
- /*
- * got new page ready for I/O. break pps
- * while loop. pps[lcv] is still NULL.
- */
- break;
- }
-
- /* page is there, see if we need to wait on it */
- if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
- ptmp->flags |= PG_WANTED;
- UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock,
- FALSE, "uvn_get",0);
- simple_lock(&uobj->vmobjlock);
- continue; /* goto top of pps while loop */
- }
-
- /*
- * if we get here then the page has become resident
- * and unbusy between steps 1 and 2. we busy it now
- * (so we own it) and set pps[lcv] (so that we exit
- * the while loop). caller must un-busy.
- */
- ptmp->flags |= PG_BUSY;
- UVM_PAGE_OWN(ptmp, "uvm_km_get2");
- pps[lcv] = ptmp;
- }
-
- /*
- * if we own the a valid page at the correct offset, pps[lcv]
- * will point to it. nothing more to do except go to the
- * next page.
- */
-
- if (pps[lcv])
- continue; /* next lcv */
-
- /*
- * we have a "fake/busy/clean" page that we just allocated.
- * do the needed "i/o" (in this case that means zero it).
- */
-
- uvm_pagezero(ptmp);
- ptmp->flags &= ~(PG_FAKE);
- pps[lcv] = ptmp;
-
- } /* lcv loop */
-
- /*
- * finally, unlock object and return.
- */
-
- simple_unlock(&uobj->vmobjlock);
- UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
- return(VM_PAGER_OK);
-}
+static struct uvm_pagerops km_pager;
/*
* uvm_km_init: init kernel maps and objects to reflect reality (i.e.
@@ -1106,7 +878,7 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
vaddr_t va;
again:
- pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg == NULL) {
if (waitok) {
uvm_wait("plpg");
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 14e18ff3f60..bb318eb7371 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.15 2001/05/10 14:51:21 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.63 1999/07/07 21:51:35 thorpej Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.16 2001/06/23 19:24:33 smart Exp $ */
+/* $NetBSD: uvm_map.c,v 1.68 1999/08/21 02:19:05 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -1717,6 +1717,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
boolean_t set_max;
{
vm_map_entry_t current, entry;
+ int rv = KERN_SUCCESS;
UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
map, start, end, new_prot);
@@ -1737,11 +1738,13 @@ uvm_map_protect(map, start, end, new_prot, set_max)
current = entry;
while ((current != &map->header) && (current->start < end)) {
- if (UVM_ET_ISSUBMAP(current))
- return (KERN_INVALID_ARGUMENT);
+ if (UVM_ET_ISSUBMAP(current)) {
+ rv = KERN_INVALID_ARGUMENT;
+ goto out;
+ }
if ((new_prot & current->max_protection) != new_prot) {
- vm_map_unlock(map);
- return (KERN_PROTECTION_FAILURE);
+ rv = KERN_PROTECTION_FAILURE;
+ goto out;
}
current = current->next;
}
@@ -1773,12 +1776,43 @@ uvm_map_protect(map, start, end, new_prot, set_max)
current->protection & MASK(entry));
}
+ /*
+ * If the map is configured to lock any future mappings,
+ * wire this entry now if the old protection was VM_PROT_NONE
+ * and the new protection is not VM_PROT_NONE.
+ */
+
+ if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
+ VM_MAPENT_ISWIRED(entry) == 0 &&
+ old_prot == VM_PROT_NONE &&
+ new_prot != VM_PROT_NONE) {
+ if (uvm_map_pageable(map, entry->start,
+ entry->end, FALSE,
+ UVM_LK_ENTER|UVM_LK_EXIT) != KERN_SUCCESS) {
+ /*
+ * If locking the entry fails, remember the
+ * error if it's the first one. Note we
+ * still continue setting the protection in
+ * the map, but will return the resource
+ * shortage condition regardless.
+ *
+ * XXX Ignore what the actual error is,
+ * XXX just call it a resource shortage
+ * XXX so that it doesn't get confused
+ * XXX what uvm_map_protect() itself would
+ * XXX normally return.
+ */
+ rv = KERN_RESOURCE_SHORTAGE;
+ }
+ }
+
current = current->next;
}
+ out:
vm_map_unlock(map);
- UVMHIST_LOG(maphist, "<- done",0,0,0,0);
- return(KERN_SUCCESS);
+ UVMHIST_LOG(maphist, "<- done, rv=%d",rv,0,0,0);
+ return (rv);
}
#undef max
@@ -1825,10 +1859,6 @@ uvm_map_inherit(map, start, end, new_inheritance)
entry = temp_entry->next;
}
- /*
- * XXXJRT: disallow holes?
- */
-
while ((entry != &map->header) && (entry->start < end)) {
UVM_MAP_CLIP_END(map, entry, end);
@@ -1871,6 +1901,10 @@ uvm_map_advice(map, start, end, new_advice)
entry = temp_entry->next;
}
+ /*
+ * XXXJRT: disallow holes?
+ */
+
while ((entry != &map->header) && (entry->start < end)) {
UVM_MAP_CLIP_END(map, entry, end);
@@ -1913,10 +1947,11 @@ uvm_map_advice(map, start, end, new_advice)
*/
int
-uvm_map_pageable(map, start, end, new_pageable, islocked)
+uvm_map_pageable(map, start, end, new_pageable, lockflags)
vm_map_t map;
vaddr_t start, end;
- boolean_t new_pageable, islocked;
+ boolean_t new_pageable;
+ int lockflags;
{
vm_map_entry_t entry, start_entry, failed_entry;
int rv;
@@ -1932,8 +1967,9 @@ uvm_map_pageable(map, start, end, new_pageable, islocked)
panic("uvm_map_pageable: map %p not pageable", map);
#endif
- if (islocked == FALSE)
+ if ((lockflags & UVM_LK_ENTER) == 0)
vm_map_lock(map);
+
VM_MAP_RANGE_CHECK(map, start, end);
/*
@@ -1945,7 +1981,8 @@ uvm_map_pageable(map, start, end, new_pageable, islocked)
*/
if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
- vm_map_unlock(map);
+ if ((lockflags & UVM_LK_EXIT) == 0)
+ vm_map_unlock(map);
UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
return (KERN_INVALID_ADDRESS);
@@ -1967,7 +2004,8 @@ uvm_map_pageable(map, start, end, new_pageable, islocked)
(entry->end < end &&
(entry->next == &map->header ||
entry->next->start > entry->end))) {
- vm_map_unlock(map);
+ if ((lockflags & UVM_LK_EXIT) == 0)
+ vm_map_unlock(map);
UVMHIST_LOG(maphist,
"<- done (INVALID UNWIRE ARG)",0,0,0,0);
return (KERN_INVALID_ARGUMENT);
@@ -1987,7 +2025,8 @@ uvm_map_pageable(map, start, end, new_pageable, islocked)
uvm_map_entry_unwire(map, entry);
entry = entry->next;
}
- vm_map_unlock(map);
+ if ((lockflags & UVM_LK_EXIT) == 0)
+ vm_map_unlock(map);
UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
return(KERN_SUCCESS);
@@ -2055,7 +2094,8 @@ uvm_map_pageable(map, start, end, new_pageable, islocked)
entry->wired_count--;
entry = entry->prev;
}
- vm_map_unlock(map);
+ if ((lockflags & UVM_LK_EXIT) == 0)
+ vm_map_unlock(map);
UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
return (KERN_INVALID_ARGUMENT);
}
@@ -2123,15 +2163,24 @@ uvm_map_pageable(map, start, end, new_pageable, islocked)
uvm_map_entry_unwire(map, entry);
entry = entry->next;
}
- vm_map_unlock(map);
+ if ((lockflags & UVM_LK_EXIT) == 0)
+ vm_map_unlock(map);
UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
return(rv);
}
/* We are holding a read lock here. */
- vm_map_unbusy(map);
- vm_map_unlock_read(map);
-
+ if ((lockflags & UVM_LK_EXIT) == 0) {
+ vm_map_unbusy(map);
+ vm_map_unlock_read(map);
+ } else {
+ /*
+ * Get back to an exclusive (write) lock.
+ */
+ vm_map_upgrade(map);
+ vm_map_unbusy(map);
+ }
+
UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
return(KERN_SUCCESS);
}
@@ -2323,20 +2372,29 @@ uvm_map_pageable_all(map, flags, limit)
/*
* first drop the wiring count on all the entries
* which haven't actually been wired yet.
+ *
+ * Skip VM_PROT_NONE entries like we did above.
*/
failed_entry = entry;
for (/* nothing */; entry != &map->header;
- entry = entry->next)
+ entry = entry->next) {
+ if (entry->protection == VM_PROT_NONE)
+ continue;
entry->wired_count--;
+ }
/*
* now, unwire all the entries that were successfully
* wired above.
+ *
+ * Skip VM_PROT_NONE entries like we did above.
*/
for (entry = map->header.next; entry != failed_entry;
entry = entry->next) {
+ if (entry->protection == VM_PROT_NONE)
+ continue;
entry->wired_count--;
- if (VM_MAPENT_ISWIRED(entry) == 0)
+ if (VM_MAPENT_ISWIRED(entry))
uvm_map_entry_unwire(map, entry);
}
vm_map_unlock(map);
@@ -2471,6 +2529,7 @@ uvm_map_clean(map, start, end, flags)
case PGO_CLEANIT|PGO_FREE:
case PGO_CLEANIT|PGO_DEACTIVATE:
case PGO_DEACTIVATE:
+ deactivate_it:
/* skip the page if it's loaned or wired */
if (pg->loan_count != 0 ||
pg->wire_count != 0) {
@@ -2515,12 +2574,19 @@ uvm_map_clean(map, start, end, flags)
continue;
case PGO_FREE:
+ /*
+ * If there are multiple references to
+ * the amap, just deactivate the page.
+ */
+ if (amap_refs(amap) > 1)
+ goto deactivate_it;
+
/* XXX skip the page if it's wired */
if (pg->wire_count != 0) {
simple_unlock(&anon->an_lock);
continue;
}
- amap_unadd(&entry->aref, offset);
+ amap_unadd(&current->aref, offset);
refs = --anon->an_ref;
simple_unlock(&anon->an_lock);
if (refs == 0)
diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c
index b208c4740ac..c397087d7bb 100644
--- a/sys/uvm/uvm_meter.c
+++ b/sys/uvm/uvm_meter.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_meter.c,v 1.9 2001/01/29 02:07:46 niklas Exp $ */
-/* $NetBSD: uvm_meter.c,v 1.8 1999/03/25 18:48:53 mrg Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.10 2001/06/23 19:24:34 smart Exp $ */
+/* $NetBSD: uvm_meter.c,v 1.10 1999/07/25 06:30:36 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 791a056d936..d52dfa4e539 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_mmap.c,v 1.14 2001/06/08 08:09:39 art Exp $ */
-/* $NetBSD: uvm_mmap.c,v 1.30 1999/07/08 00:52:45 thorpej Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.15 2001/06/23 19:24:34 smart Exp $ */
+/* $NetBSD: uvm_mmap.c,v 1.35 1999/07/17 21:35:50 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -93,7 +93,7 @@ sys_sbrk(p, v, retval)
{
#if 0
struct sys_sbrk_args /* {
- syscallarg(int) incr;
+ syscallarg(intptr_t) incr;
} */ *uap = v;
#endif
@@ -198,16 +198,16 @@ sys_mincore(p, v, retval)
lim = end < entry->end ? end : entry->end;
/*
- * Special case for mapped devices; these are always
- * considered resident.
+ * Special case for objects with no "real" pages. Those
+ * are always considered resident (mapped devices).
*/
if (UVM_ET_ISOBJ(entry)) {
- extern struct uvm_pagerops uvm_deviceops; /* XXX */
#ifdef DIAGNOSTIC
if (UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj))
panic("mincore: user map has kernel object");
#endif
- if (entry->object.uvm_obj->pgops == &uvm_deviceops) {
+ if (entry->object.uvm_obj->pgops->pgo_releasepg
+ == NULL) {
for (/* nothing */; start < lim;
start += PAGE_SIZE, vec++)
subyte(vec, 1);
@@ -215,8 +215,8 @@ sys_mincore(p, v, retval)
}
}
- uobj = entry->object.uvm_obj; /* top layer */
- amap = entry->aref.ar_amap; /* bottom layer */
+ amap = entry->aref.ar_amap; /* top layer */
+ uobj = entry->object.uvm_obj; /* bottom layer */
if (amap != NULL)
amap_lock(amap);
@@ -621,7 +621,9 @@ sys_msync(p, v, retval)
/*
* translate MS_ flags into PGO_ flags
*/
- uvmflags = PGO_CLEANIT | (flags & MS_INVALIDATE) ? PGO_FREE : 0;
+ uvmflags = PGO_CLEANIT;
+ if (flags & MS_INVALIDATE)
+ uvmflags |= PGO_FREE;
if (flags & MS_SYNC)
uvmflags |= PGO_SYNCIO;
else
@@ -980,7 +982,7 @@ sys_mlock(p, v, retval)
#endif
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
- FALSE);
+ 0);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1027,7 +1029,7 @@ sys_munlock(p, v, retval)
#endif
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
- FALSE);
+ 0);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1258,7 +1260,7 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
goto bad;
}
retval = uvm_map_pageable(map, *addr, *addr + size,
- FALSE, TRUE);
+ FALSE, UVM_LK_ENTER);
if (retval != KERN_SUCCESS) {
/* unmap the region! */
(void) uvm_unmap(map, *addr, *addr + size);
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index 8219e4faf87..12327f97836 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_vnode.c,v 1.12 2001/03/22 03:05:57 smart Exp $ */
-/* $NetBSD: uvm_vnode.c,v 1.23 1999/04/11 04:04:11 chs Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.13 2001/06/23 19:24:34 smart Exp $ */
+/* $NetBSD: uvm_vnode.c,v 1.25 1999/07/22 22:58:39 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.