summaryrefslogtreecommitdiff
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
commitb92b419a6a8ef401c8a4e022115bf3e18426eea0 (patch)
treeff214e6b334202d15c2b303427a2a5d2f16af4f0 /sys/vm/vm_pageout.c
parent4f215f167e35940141001f7f31bfce350266d153 (diff)
Merge of MACHINE_NEW_CONTIG (aka MNN) code from Chuck Cranor,
<chuck@openbsd.org>. This code is as of yet disabled on all platforms, actually not yet supported on more than mvme68k, although other platforms are expected soon, as code is already available. This code makes handling of multiple physical memory regions consistent over all platforms, as well as keeping the performance of maintaining a single continuous memory chunk. It is also a requirement for the upcoming UVM replacement VM system. What I did in this merge: just declared the pmap_map function in a MD include file per port that needs it. It's not an exported pmap interface, says Chuck. It ended up in differnt include files on differnet ports, as I tried to follow the current policy on a per-arch basis.
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c47
1 files changed, 40 insertions, 7 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index b1b41394611..e6a9fef00bb 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_pageout.c,v 1.8 1997/11/06 05:59:36 csapuntz Exp $ */
+/* $OpenBSD: vm_pageout.c,v 1.9 1998/03/01 00:38:21 niklas Exp $ */
/* $NetBSD: vm_pageout.c,v 1.23 1996/02/05 01:54:07 christos Exp $ */
/*
@@ -99,6 +99,33 @@ int doclustered_pageout = 1;
#endif
/*
+ * Activate the pageout daemon and sleep awaiting more free memory
+ */
+void vm_wait(msg)
+ char *msg;
+{
+ int timo = 0;
+
+ if(curproc == pageout_daemon) {
+ /*
+ * We might be toast here, but IF some paging operations
+ * are pending then pages will magically appear. We
+ * usually can't return an error because callers of
+ * malloc who can wait generally don't check for
+ * failure.
+ *
+ * Only the pageout_daemon wakes up this channel!
+ */
+ printf("pageout daemon has stalled\n");
+ timo = hz >> 3;
+ }
+ simple_lock(&vm_pages_needed_lock);
+ thread_wakeup(&vm_pages_needed);
+ thread_sleep_msg(&cnt.v_free_count, &vm_pages_needed_lock, FALSE, msg,
+ timo);
+}
+
+/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
void
@@ -202,7 +229,6 @@ vm_pageout_scan()
object = m->object;
if (!vm_object_lock_try(object))
continue;
- cnt.v_pageouts++;
#ifdef CLUSTERED_PAGEOUT
if (object->pager &&
vm_pager_cancluster(object->pager, PG_CLUSTERPUT))
@@ -294,9 +320,10 @@ vm_pageout_page(m, object)
vm_object_unlock(object);
/*
- * Do a wakeup here in case the following operations block.
+ * We _used_ to wakeup page consumers here, "in case the following
+ * operations block". That leads to livelock if the pageout fails,
+ * which is actually quite a common thing for NFS paging.
*/
- thread_wakeup(&cnt.v_free_count);
/*
* If there is no pager for the page, use the default pager.
@@ -317,6 +344,9 @@ vm_pageout_page(m, object)
switch (pageout_status) {
case VM_PAGER_OK:
case VM_PAGER_PEND:
+ /* hmm, don't wakeup if memory is _very_ low? */
+ thread_wakeup(&cnt.v_free_count);
+ cnt.v_pageouts++;
cnt.v_pgpgout++;
m->flags &= ~PG_LAUNDRY;
break;
@@ -340,7 +370,7 @@ vm_pageout_page(m, object)
* XXX could get stuck here.
*/
(void)tsleep((caddr_t)&vm_pages_needed, PZERO|PCATCH,
- "pageout", hz);
+ "pageout", hz>>3);
break;
}
case VM_PAGER_FAIL:
@@ -391,6 +421,7 @@ vm_pageout_cluster(m, object)
vm_page_t plist[MAXPOCLUSTER], *plistp, p;
int postatus, ix, count;
+ cnt.v_pageouts++;
/*
* Determine the range of pages that can be part of a cluster
* for this object/offset. If it is only our single page, just
@@ -524,7 +555,8 @@ again:
void
vm_pageout()
{
- (void)spl0();
+ pageout_daemon = curproc;
+ (void) spl0();
/*
* Initialize some paging parameters.
@@ -557,7 +589,8 @@ vm_pageout()
simple_lock(&vm_pages_needed_lock);
while (TRUE) {
- thread_sleep(&vm_pages_needed, &vm_pages_needed_lock, FALSE);
+ thread_sleep_msg(&vm_pages_needed, &vm_pages_needed_lock,
+ FALSE, "paged", 0);
/*
* Compute the inactive target for this scan.
* We need to keep a reasonable amount of memory in the