summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_page.c')
-rw-r--r--sys/uvm/uvm_page.c163
1 files changed, 107 insertions, 56 deletions
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 4e46822894a..3207e33020c 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_page.c,v 1.29 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_page.c,v 1.43 2000/11/09 19:15:28 christos Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.30 2001/11/10 18:42:31 art Exp $ */
+/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -75,6 +75,7 @@
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/sched.h>
+#include <sys/kernel.h>
#define UVM_PAGE /* pull in uvm_page.h functions */
#include <uvm/uvm.h>
@@ -96,8 +97,18 @@ int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
* We therefore provide a way to disable it from machdep code here.
*/
+/*
+ * XXX disabled until we can find a way to do this without causing
+ * problems for either cpu caches or DMA latency.
+ */
boolean_t vm_page_zero_enable = FALSE;
+#ifdef UBC
+u_long uvm_pgcnt_anon;
+u_long uvm_pgcnt_vnode;
+extern struct uvm_pagerops uvm_vnodeops;
+#endif
+
/*
* local variables
*/
@@ -124,7 +135,7 @@ static struct pglist uvm_bootbucket;
*/
static void uvm_pageinsert __P((struct vm_page *));
-
+static void uvm_pageremove __P((struct vm_page *));
/*
* inline functions
@@ -161,7 +172,6 @@ uvm_pageinsert(pg)
TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
pg->flags |= PG_TABLED;
pg->uobject->uo_npages++;
-
}
/*
@@ -171,21 +181,14 @@ uvm_pageinsert(pg)
* => caller must lock page queues
*/
-void __inline
+static __inline void
uvm_pageremove(pg)
struct vm_page *pg;
{
struct pglist *buck;
int s;
-#ifdef DIAGNOSTIC
- if ((pg->flags & (PG_FAULTING)) != 0)
- panic("uvm_pageremove: page is faulting");
-#endif
-
- if ((pg->flags & PG_TABLED) == 0)
- return; /* XXX: log */
-
+ KASSERT(pg->flags & PG_TABLED);
buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
s = splimp();
simple_lock(&uvm.hashlock);
@@ -193,6 +196,12 @@ uvm_pageremove(pg)
simple_unlock(&uvm.hashlock);
splx(s);
+#ifdef UBC
+ if (pg->uobject->pgops == &uvm_vnodeops) {
+ uvm_pgcnt_vnode--;
+ }
+#endif
+
/* object should be locked */
TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
@@ -200,7 +209,6 @@ uvm_pageremove(pg)
pg->uobject->uo_npages--;
pg->uobject = NULL;
pg->version++;
-
}
/*
@@ -218,7 +226,6 @@ uvm_page_init(kvm_startp, kvm_endp)
int lcv, i;
paddr_t paddr;
-
/*
* step 1: init the page queues and page queue locks
*/
@@ -239,7 +246,7 @@ uvm_page_init(kvm_startp, kvm_endp)
*/
uvm.page_nhash = 1; /* 1 bucket */
- uvm.page_hashmask = 0; /* mask for hash function */
+ uvm.page_hashmask = 0; /* mask for hash function */
uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */
TAILQ_INIT(uvm.page_hash); /* init hash table */
simple_lock_init(&uvm.hashlock); /* init hash table lock */
@@ -292,7 +299,6 @@ uvm_page_init(kvm_startp, kvm_endp)
*/
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
-
n = vm_physmem[lcv].end - vm_physmem[lcv].start;
if (n > pagecount) {
printf("uvm_page_init: lost %ld page(s) in init\n",
@@ -318,6 +324,7 @@ uvm_page_init(kvm_startp, kvm_endp)
}
}
}
+
/*
* step 5: pass up the values of virtual_space_start and
* virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
@@ -328,10 +335,11 @@ uvm_page_init(kvm_startp, kvm_endp)
*kvm_endp = trunc_page(virtual_space_end);
/*
- * step 6: init pagedaemon lock
+ * step 6: init locks for kernel threads
*/
simple_lock_init(&uvm.pagedaemon_lock);
+ simple_lock_init(&uvm.aiodoned_lock);
/*
* step 7: init reserve thresholds
@@ -343,10 +351,6 @@ uvm_page_init(kvm_startp, kvm_endp)
/*
* step 8: determine if we should zero pages in the idle
* loop.
- *
- * XXXJRT - might consider zero'ing up to the target *now*,
- * but that could take an awfully long time if you
- * have a lot of memory.
*/
uvm.page_idle_zero = vm_page_zero_enable;
@@ -361,7 +365,6 @@ uvm_page_init(kvm_startp, kvm_endp)
* uvm_setpagesize: set the page size
*
* => sets page_shift and page_mask from uvmexp.pagesize.
- * => XXXCDC: move global vars.
*/
void
@@ -892,22 +895,26 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
struct pgfreelist *pgfl;
boolean_t use_reserve;
-#ifdef DIAGNOSTIC
- /* sanity check */
- if (obj && anon)
- panic("uvm_pagealloc: obj and anon != NULL");
-#endif
-
- s = uvm_lock_fpageq(); /* lock free page queue */
+ KASSERT(obj == NULL || anon == NULL);
+ KASSERT(off == trunc_page(off));
+ s = uvm_lock_fpageq();
/*
* check to see if we need to generate some free pages waking
* the pagedaemon.
*/
+#ifdef UBC
+ if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
+ (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
+ uvmexp.inactive < uvmexp.inactarg)) {
+ wakeup(&uvm.pagedaemon);
+ }
+#else
if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg &&
uvmexp.inactive < uvmexp.inactarg))
wakeup(&uvm.pagedaemon);
+#endif
/*
* fail if any of these conditions is true:
@@ -961,11 +968,7 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
case UVM_PGA_STRAT_ONLY:
case UVM_PGA_STRAT_FALLBACK:
/* Attempt to allocate from the specified free list. */
-#ifdef DIAGNOSTIC
- if (free_list >= VM_NFREELIST || free_list < 0)
- panic("uvm_pagealloc_strat: bad free list %d",
- free_list);
-#endif
+ KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
pgfl = &uvm.page_free[free_list];
if ((pg = TAILQ_FIRST((freeq =
&pgfl->pgfl_queues[try1]))) != NULL ||
@@ -1016,11 +1019,12 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
pg->uanon = anon;
pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
pg->version++;
- pg->wire_count = 0;
- pg->loan_count = 0;
if (anon) {
anon->u.an_page = pg;
pg->pqflags = PQ_ANON;
+#ifdef UBC
+ uvm_pgcnt_anon++;
+#endif
} else {
if (obj)
uvm_pageinsert(pg);
@@ -1121,8 +1125,6 @@ uvm_pagerealloc(pg, newobj, newoff)
pg->version++;
uvm_pageinsert(pg);
}
-
- return;
}
@@ -1136,14 +1138,20 @@ uvm_pagerealloc(pg, newobj, newoff)
* => assumes all valid mappings of pg are gone
*/
-void uvm_pagefree(pg)
-
-struct vm_page *pg;
-
+void
+uvm_pagefree(pg)
+ struct vm_page *pg;
{
int s;
int saved_loan_count = pg->loan_count;
+#ifdef DEBUG
+ if (pg->uobject == (void *)0xdeadbeef &&
+ pg->uanon == (void *)0xdeadbeef) {
+ panic("uvm_pagefree: freeing free page %p\n", pg);
+ }
+#endif
+
/*
* if the page was an object page (and thus "TABLED"), remove it
* from the object.
@@ -1152,7 +1160,7 @@ struct vm_page *pg;
if (pg->flags & PG_TABLED) {
/*
- * if the object page is on loan we are going to drop ownership.
+ * if the object page is on loan we are going to drop ownership.
* it is possible that an anon will take over as owner for this
* page later on. the anon will want a !PG_CLEAN page so that
* it knows it needs to allocate swap if it wants to page the
@@ -1161,7 +1169,6 @@ struct vm_page *pg;
if (saved_loan_count)
pg->flags &= ~PG_CLEAN; /* in case an anon takes over */
-
uvm_pageremove(pg);
/*
@@ -1172,9 +1179,9 @@ struct vm_page *pg;
* return (when the last loan is dropped, then the page can be
* freed by whatever was holding the last loan).
*/
+
if (saved_loan_count)
return;
-
} else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
/*
@@ -1184,19 +1191,12 @@ struct vm_page *pg;
* note that the kernel can't change the loan status of our
* page as long as we are holding PQ lock.
*/
+
pg->pqflags &= ~PQ_ANON;
pg->uanon = NULL;
return;
}
-
-#ifdef DIAGNOSTIC
- if (saved_loan_count) {
- printf("uvm_pagefree: warning: freeing page with a loan "
- "count of %d\n", saved_loan_count);
- panic("uvm_pagefree: loan count");
- }
-#endif
-
+ KASSERT(saved_loan_count == 0);
/*
* now remove the page from the queues
@@ -1219,13 +1219,19 @@ struct vm_page *pg;
/*
* if the page was wired, unwire it now.
*/
+
if (pg->wire_count) {
pg->wire_count = 0;
uvmexp.wired--;
}
+#ifdef UBC
+ if (pg->uanon) {
+ uvm_pgcnt_anon--;
+ }
+#endif
/*
- * and put on free queue
+ * and put on free queue
*/
pg->flags &= ~PG_ZERO;
@@ -1247,6 +1253,51 @@ struct vm_page *pg;
uvm_unlock_fpageq(s);
}
+/*
+ * uvm_page_unbusy: unbusy an array of pages.
+ *
+ * => pages must either all belong to the same object, or all belong to anons.
+ * => if pages are object-owned, object must be locked.
+ * => if pages are anon-owned, anons must be unlockd and have 0 refcount.
+ */
+
+void
+uvm_page_unbusy(pgs, npgs)
+ struct vm_page **pgs;
+ int npgs;
+{
+ struct vm_page *pg;
+ struct uvm_object *uobj;
+ int i;
+ UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
+
+ for (i = 0; i < npgs; i++) {
+ pg = pgs[i];
+
+ if (pg == NULL) {
+ continue;
+ }
+ if (pg->flags & PG_WANTED) {
+ wakeup(pg);
+ }
+ if (pg->flags & PG_RELEASED) {
+ UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
+ uobj = pg->uobject;
+ if (uobj != NULL) {
+ uobj->pgops->pgo_releasepg(pg, NULL);
+ } else {
+ pg->flags &= ~(PG_BUSY);
+ UVM_PAGE_OWN(pg, NULL);
+ uvm_anfree(pg->uanon);
+ }
+ } else {
+ UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
+ pg->flags &= ~(PG_WANTED|PG_BUSY);
+ UVM_PAGE_OWN(pg, NULL);
+ }
+ }
+}
+
#if defined(UVM_PAGE_TRKOWN)
/*
* uvm_page_own: set or release page ownership