summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_page.c')
-rw-r--r--sys/uvm/uvm_page.c173
1 files changed, 125 insertions, 48 deletions
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 2cf45c11375..afd841ff331 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.89 2009/06/16 00:11:29 oga Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.90 2009/06/16 16:42:41 ariane Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -159,11 +159,10 @@ uvm_pageinsert(struct vm_page *pg)
KASSERT((pg->pg_flags & PG_TABLED) == 0);
mtx_enter(&uvm.hashlock);
buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
- TAILQ_INSERT_TAIL(buck, pg, fq.queues.hashq); /* put in hash */
+ TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */
mtx_leave(&uvm.hashlock);
- TAILQ_INSERT_TAIL(&pg->uobject->memq, pg,
- fq.queues.listq); /* put in object */
+ TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
atomic_setbits_int(&pg->pg_flags, PG_TABLED);
pg->uobject->uo_npages++;
}
@@ -184,7 +183,7 @@ uvm_pageremove(struct vm_page *pg)
KASSERT(pg->pg_flags & PG_TABLED);
mtx_enter(&uvm.hashlock);
buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
- TAILQ_REMOVE(buck, pg, fq.queues.hashq);
+ TAILQ_REMOVE(buck, pg, hashq);
mtx_leave(&uvm.hashlock);
#ifdef UBC
@@ -194,7 +193,7 @@ uvm_pageremove(struct vm_page *pg)
#endif
/* object should be locked */
- TAILQ_REMOVE(&pg->uobject->memq, pg, fq.queues.listq);
+ TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
atomic_clearbits_int(&pg->pg_flags, PG_TABLED|PQ_AOBJ);
pg->uobject->uo_npages--;
@@ -227,12 +226,15 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
* init the page queues and page queue locks
*/
+ for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
+ for (i = 0; i < PGFL_NQUEUES; i++)
+ TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
+ }
TAILQ_INIT(&uvm.page_active);
TAILQ_INIT(&uvm.page_inactive_swp);
TAILQ_INIT(&uvm.page_inactive_obj);
simple_lock_init(&uvm.pageqlock);
mtx_init(&uvm.fpageqlock, IPL_VM);
- uvm_pmr_init();
/*
* init the <obj,offset> => <page> hash table. for now
@@ -317,13 +319,10 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
if (atop(paddr) >= vm_physmem[lcv].avail_start &&
atop(paddr) <= vm_physmem[lcv].avail_end) {
uvmexp.npages++;
+ /* add page to free pool */
+ uvm_pagefree(&vm_physmem[lcv].pgs[i]);
}
}
-
- /* add pages to free pool */
- uvm_pmr_freepages(&vm_physmem[lcv].pgs[
- vm_physmem[lcv].avail_start - vm_physmem[lcv].start],
- vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
}
/*
@@ -812,10 +811,10 @@ uvm_page_rehash(void)
/* ... and rehash */
for (lcv = 0 ; lcv < oldcount ; lcv++) {
while ((pg = TAILQ_FIRST(&oldbuckets[lcv])) != NULL) {
- TAILQ_REMOVE(&oldbuckets[lcv], pg, fq.queues.hashq);
+ TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
TAILQ_INSERT_TAIL(
&uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
- pg, fq.queues.hashq);
+ pg, hashq);
}
}
mtx_leave(&uvm.hashlock);
@@ -893,15 +892,18 @@ struct vm_page *
uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
int flags, int strat, int free_list)
{
- struct pglist pgl;
- int pmr_flags;
+ int lcv, try1, try2, zeroit = 0;
struct vm_page *pg;
+ struct pglist *freeq;
+ struct pgfreelist *pgfl;
boolean_t use_reserve;
UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist);
KASSERT(obj == NULL || anon == NULL);
KASSERT(off == trunc_page(off));
+ uvm_lock_fpageq();
+
/*
* check to see if we need to generate some free pages waking
* the pagedaemon.
@@ -928,20 +930,95 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
(curproc == syncerproc))))
goto fail;
- pmr_flags = UVM_PLA_NOWAIT;
- if (flags & UVM_PGA_ZERO)
- pmr_flags |= UVM_PLA_ZERO;
- TAILQ_INIT(&pgl);
- if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0)
+#if PGFL_NQUEUES != 2
+#error uvm_pagealloc_strat needs to be updated
+#endif
+
+ /*
+ * If we want a zero'd page, try the ZEROS queue first, otherwise
+ * we try the UNKNOWN queue first.
+ */
+ if (flags & UVM_PGA_ZERO) {
+ try1 = PGFL_ZEROS;
+ try2 = PGFL_UNKNOWN;
+ } else {
+ try1 = PGFL_UNKNOWN;
+ try2 = PGFL_ZEROS;
+ }
+
+ UVMHIST_LOG(pghist, "obj=%p off=%lx anon=%p flags=%lx",
+ obj, (u_long)off, anon, flags);
+ UVMHIST_LOG(pghist, "strat=%ld free_list=%ld", strat, free_list, 0, 0);
+ again:
+ switch (strat) {
+ case UVM_PGA_STRAT_NORMAL:
+ /* Check all freelists in descending priority order. */
+ for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
+ pgfl = &uvm.page_free[lcv];
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try1]))) != NULL ||
+ (pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try2]))) != NULL)
+ goto gotit;
+ }
+
+ /* No pages free! */
+ goto fail;
+
+ case UVM_PGA_STRAT_ONLY:
+ case UVM_PGA_STRAT_FALLBACK:
+ /* Attempt to allocate from the specified free list. */
+ KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
+ pgfl = &uvm.page_free[free_list];
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try1]))) != NULL ||
+ (pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try2]))) != NULL)
+ goto gotit;
+
+ /* Fall back, if possible. */
+ if (strat == UVM_PGA_STRAT_FALLBACK) {
+ strat = UVM_PGA_STRAT_NORMAL;
+ goto again;
+ }
+
+ /* No pages free! */
goto fail;
- pg = TAILQ_FIRST(&pgl);
- KASSERT(pg != NULL);
- KASSERT(TAILQ_NEXT(pg, pageq) == NULL);
+
+ default:
+ panic("uvm_pagealloc_strat: bad strat %d", strat);
+ /* NOTREACHED */
+ }
+
+ gotit:
+ TAILQ_REMOVE(freeq, pg, pageq);
+ uvmexp.free--;
+
+ /* update zero'd page count */
+ if (pg->pg_flags & PG_ZERO)
+ uvmexp.zeropages--;
+
+ /*
+ * update allocation statistics and remember if we have to
+ * zero the page
+ */
+ if (flags & UVM_PGA_ZERO) {
+ if (pg->pg_flags & PG_ZERO) {
+ uvmexp.pga_zerohit++;
+ zeroit = 0;
+ } else {
+ uvmexp.pga_zeromiss++;
+ zeroit = 1;
+ }
+ }
+
+ uvm_unlock_fpageq(); /* unlock free page queue */
pg->offset = off;
pg->uobject = obj;
pg->uanon = anon;
pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE;
+ pg->pg_version++;
if (anon) {
anon->an_page = pg;
atomic_setbits_int(&pg->pg_flags, PQ_ANON);
@@ -957,11 +1034,22 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
#endif
UVM_PAGE_OWN(pg, "new alloc");
+ if (flags & UVM_PGA_ZERO) {
+ /*
+ * A zero'd page is not clean. If we got a page not already
+ * zero'd, then we have to zero it ourselves.
+ */
+ atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
+ if (zeroit)
+ pmap_zero_page(pg);
+ }
+
UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg,
(u_long)VM_PAGE_TO_PHYS(pg), 0, 0);
return(pg);
fail:
+ uvm_unlock_fpageq();
UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0);
return (NULL);
}
@@ -1012,7 +1100,6 @@ uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
void
uvm_pagefree(struct vm_page *pg)
{
- struct pglist pgl;
int saved_loan_count = pg->loan_count;
UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist);
@@ -1108,35 +1195,27 @@ uvm_pagefree(struct vm_page *pg)
}
/*
- * Clean page state bits.
- */
- atomic_clearbits_int(&pg->pg_flags,
- PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|PG_CLEAN|PG_CLEANCHK);
- /*
- * Pmap flag cleaning.
- * XXX: Shouldn't pmap do this?
+ * and put on free queue
*/
- atomic_clearbits_int(&pg->pg_flags,
- PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3);
-
-#if defined(DIAGNOSTIC)
- if (pg->pg_flags != 0) {
- panic("uvm_pagefree: expected page %p pg_flags to be 0\n"
- "uvm_pagefree: instead of pg->pg_flags = %x\n",
- VM_PAGE_TO_PHYS(pg), pg->pg_flags);
- }
-#endif
+
+ atomic_clearbits_int(&pg->pg_flags, PG_ZERO);
+
+ uvm_lock_fpageq();
+ TAILQ_INSERT_TAIL(&uvm.page_free[
+ uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
+ atomic_clearbits_int(&pg->pg_flags, PQ_MASK);
+ atomic_setbits_int(&pg->pg_flags, PQ_FREE);
#ifdef DEBUG
pg->uobject = (void *)0xdeadbeef;
pg->offset = 0xdeadbeef;
pg->uanon = (void *)0xdeadbeef;
#endif
- TAILQ_INIT(&pgl);
- TAILQ_INSERT_HEAD(&pgl, pg, pageq);
- uvm_pmr_freepageq(&pgl);
+ uvmexp.free++;
if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
uvm.page_idle_zero = vm_page_zero_enable;
+
+ uvm_unlock_fpageq();
}
/*
@@ -1229,7 +1308,6 @@ uvm_page_own(struct vm_page *pg, char *tag)
void
uvm_pageidlezero(void)
{
-#if 0 /* Disabled for now. */
struct vm_page *pg;
struct pgfreelist *pgfl;
int free_list;
@@ -1296,7 +1374,6 @@ uvm_pageidlezero(void)
uvmexp.zeropages++;
uvm_unlock_fpageq();
} while (curcpu_is_idle());
-#endif /* 0 */
}
/*
@@ -1399,7 +1476,7 @@ uvm_pagelookup(struct uvm_object *obj, voff_t off)
mtx_enter(&uvm.hashlock);
buck = &uvm.page_hash[uvm_pagehash(obj,off)];
- TAILQ_FOREACH(pg, buck, fq.queues.hashq) {
+ TAILQ_FOREACH(pg, buck, hashq) {
if (pg->uobject == obj && pg->offset == off) {
break;
}