diff options
author | Owain Ainsworth <oga@cvs.openbsd.org> | 2010-04-22 19:02:56 +0000 |
---|---|---|
committer | Owain Ainsworth <oga@cvs.openbsd.org> | 2010-04-22 19:02:56 +0000 |
commit | 06f07da7e6659ce9de50f80088d0c37b13d4b829 (patch) | |
tree | 61465ef7199c8a6f5432bf287ad9dc234fdaeb79 /sys/uvm/uvm_page.c | |
parent | a9c39c19e55d16c05e07be8dfd52db61ccf53e99 (diff) |
Committing on behalf or ariane@.
recommit pmemrange:
physmem allocator: change the view of free memory from single
free pages to free ranges. Classify memory based on region with
associated use-counter (which is used to construct a priority
list of where to allocate memory).
Based on code from tedu@, help from many.
Useable now that bugs have been found and fixed in most architecture's
pmap.c
ok by everyone who has done a pmap or uvm commit in the last year.
Diffstat (limited to 'sys/uvm/uvm_page.c')
-rw-r--r-- | sys/uvm/uvm_page.c | 197 |
1 files changed, 52 insertions, 145 deletions
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index 467e072ff43..d40e6a41abb 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_page.c,v 1.99 2010/04/20 22:05:44 tedu Exp $ */ +/* $OpenBSD: uvm_page.c,v 1.100 2010/04/22 19:02:55 oga Exp $ */ /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ /* @@ -73,7 +73,6 @@ #include <sys/param.h> #include <sys/systm.h> -#include <sys/malloc.h> #include <sys/sched.h> #include <sys/kernel.h> #include <sys/vnode.h> @@ -212,15 +211,12 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) * init the page queues and page queue locks */ - for (lcv = 0; lcv < VM_NFREELIST; lcv++) { - for (i = 0; i < PGFL_NQUEUES; i++) - TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]); - } TAILQ_INIT(&uvm.page_active); TAILQ_INIT(&uvm.page_inactive_swp); TAILQ_INIT(&uvm.page_inactive_obj); simple_lock_init(&uvm.pageqlock); mtx_init(&uvm.fpageqlock, IPL_VM); + uvm_pmr_init(); /* * allocate vm_page structures. @@ -271,9 +267,9 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { n = vm_physmem[lcv].end - vm_physmem[lcv].start; if (n > pagecount) { - printf("uvm_page_init: lost %ld page(s) in init\n", + panic("uvm_page_init: lost %ld page(s) in init\n", (long)(n - pagecount)); - panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ + /* XXXCDC: shouldn't happen? */ /* n = pagecount; */ } @@ -293,10 +289,15 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) if (atop(paddr) >= vm_physmem[lcv].avail_start && atop(paddr) <= vm_physmem[lcv].avail_end) { uvmexp.npages++; - /* add page to free pool */ - uvm_pagefree(&vm_physmem[lcv].pgs[i]); } } + + /* + * Add pages to free pool. + */ + uvm_pmr_freepages(&vm_physmem[lcv].pgs[ + vm_physmem[lcv].avail_start - vm_physmem[lcv].start], + vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); } /* @@ -651,13 +652,19 @@ uvm_page_physload_flags(paddr_t start, paddr_t end, paddr_t avail_start, } else { #if defined(VM_PHYSSEG_NOADD) panic("uvm_page_physload: tried to add RAM after vm_mem_init"); -#else - uvm_pagefree(&pgs[lcv]); #endif } } } - /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ + + /* + * Add pages to free pool. + */ + if ((flags & PHYSLOAD_DEVICE) == 0) { + uvm_pmr_freepages(&pgs[avail_start - start], + avail_end - avail_start); + } + /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ } else { @@ -778,31 +785,21 @@ uvm_shutdown(void) * => if anon != NULL, anon must be locked (to put in anon) * => only one of obj or anon can be non-null * => caller must activate/deactivate page if it is not wired. - * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. - * => policy decision: it is more important to pull a page off of the - * appropriate priority free list than it is to get a zero'd or - * unknown contents page. This is because we live with the - * consequences of a bad free list decision for the entire - * lifetime of the page, e.g. if the page comes from memory that - * is slower to access. */ struct vm_page * -uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, - int flags, int strat, int free_list) +uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, + int flags) { - int lcv, try1, try2, zeroit = 0; struct vm_page *pg; - struct pglist *freeq; - struct pgfreelist *pgfl; + struct pglist pgl; + int pmr_flags; boolean_t use_reserve; - UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist); + UVMHIST_FUNC("uvm_pagealloc"); UVMHIST_CALLED(pghist); KASSERT(obj == NULL || anon == NULL); KASSERT(off == trunc_page(off)); - uvm_lock_fpageq(); - /* * check to see if we need to generate some free pages waking * the pagedaemon. @@ -829,124 +826,39 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, (curproc == syncerproc)))) goto fail; -#if PGFL_NQUEUES != 2 -#error uvm_pagealloc_strat needs to be updated -#endif - - /* - * If we want a zero'd page, try the ZEROS queue first, otherwise - * we try the UNKNOWN queue first. - */ - if (flags & UVM_PGA_ZERO) { - try1 = PGFL_ZEROS; - try2 = PGFL_UNKNOWN; - } else { - try1 = PGFL_UNKNOWN; - try2 = PGFL_ZEROS; - } - - UVMHIST_LOG(pghist, "obj=%p off=%lx anon=%p flags=%lx", - obj, (u_long)off, anon, flags); - UVMHIST_LOG(pghist, "strat=%ld free_list=%ld", strat, free_list, 0, 0); - again: - switch (strat) { - case UVM_PGA_STRAT_NORMAL: - /* Check all freelists in descending priority order. */ - for (lcv = 0; lcv < VM_NFREELIST; lcv++) { - pgfl = &uvm.page_free[lcv]; - if ((pg = TAILQ_FIRST((freeq = - &pgfl->pgfl_queues[try1]))) != NULL || - (pg = TAILQ_FIRST((freeq = - &pgfl->pgfl_queues[try2]))) != NULL) - goto gotit; - } - - /* No pages free! */ - goto fail; - - case UVM_PGA_STRAT_ONLY: - case UVM_PGA_STRAT_FALLBACK: - /* Attempt to allocate from the specified free list. */ - KASSERT(free_list >= 0 && free_list < VM_NFREELIST); - pgfl = &uvm.page_free[free_list]; - if ((pg = TAILQ_FIRST((freeq = - &pgfl->pgfl_queues[try1]))) != NULL || - (pg = TAILQ_FIRST((freeq = - &pgfl->pgfl_queues[try2]))) != NULL) - goto gotit; - - /* Fall back, if possible. */ - if (strat == UVM_PGA_STRAT_FALLBACK) { - strat = UVM_PGA_STRAT_NORMAL; - goto again; - } - - /* No pages free! */ + pmr_flags = UVM_PLA_NOWAIT; + if (flags & UVM_PGA_ZERO) + pmr_flags |= UVM_PLA_ZERO; + TAILQ_INIT(&pgl); + if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) goto fail; - default: - panic("uvm_pagealloc_strat: bad strat %d", strat); - /* NOTREACHED */ - } - - gotit: - TAILQ_REMOVE(freeq, pg, pageq); - uvmexp.free--; - - /* update zero'd page count */ - if (pg->pg_flags & PG_ZERO) - uvmexp.zeropages--; - - /* - * update allocation statistics and remember if we have to - * zero the page - */ - if (flags & UVM_PGA_ZERO) { - if (pg->pg_flags & PG_ZERO) { - uvmexp.pga_zerohit++; - zeroit = 0; - } else { - uvmexp.pga_zeromiss++; - zeroit = 1; - } - } - - uvm_unlock_fpageq(); /* unlock free page queue */ + pg = TAILQ_FIRST(&pgl); + KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); pg->offset = off; pg->uobject = obj; pg->uanon = anon; KASSERT((pg->pg_flags & PG_DEV) == 0); - pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE; - pg->pg_version++; + atomic_setbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE); + if (flags & UVM_PGA_ZERO) + atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); if (anon) { anon->an_page = pg; atomic_setbits_int(&pg->pg_flags, PQ_ANON); - } else { - if (obj) - uvm_pageinsert(pg); - } + } else if (obj) + uvm_pageinsert(pg); + #if defined(UVM_PAGE_TRKOWN) pg->owner_tag = NULL; #endif UVM_PAGE_OWN(pg, "new alloc"); - if (flags & UVM_PGA_ZERO) { - /* - * A zero'd page is not clean. If we got a page not already - * zero'd, then we have to zero it ourselves. - */ - atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); - if (zeroit) - pmap_zero_page(pg); - } - UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); return(pg); fail: - uvm_unlock_fpageq(); UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); return (NULL); } @@ -1030,7 +942,7 @@ uvm_pagefree(struct vm_page *pg) if (saved_loan_count) atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); uvm_pageremove(pg); - + /* * if our page was on loan, then we just lost control over it * (in fact, if it was loaned to an anon, the anon may have @@ -1085,38 +997,31 @@ uvm_pagefree(struct vm_page *pg) } if (pg->uanon) { pg->uanon->an_page = NULL; -#ifdef UBC - uvm_pgcnt_anon--; -#endif + pg->uanon = NULL; + atomic_clearbits_int(&pg->pg_flags, PQ_ANON); } /* - * and put on free queue + * Clean page state bits. */ + atomic_clearbits_int(&pg->pg_flags, PQ_AOBJ); /* XXX: find culprit */ + atomic_clearbits_int(&pg->pg_flags, PQ_ENCRYPT| + PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|PG_CLEAN|PG_CLEANCHK); - atomic_clearbits_int(&pg->pg_flags, PG_ZERO); + /* + * and put on free queue + */ - uvm_lock_fpageq(); -#ifdef PAGEFASTRECYCLE - TAILQ_INSERT_HEAD(&uvm.page_free[ - uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); -#else - TAILQ_INSERT_TAIL(&uvm.page_free[ - uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); -#endif - atomic_clearbits_int(&pg->pg_flags, PQ_MASK); - atomic_setbits_int(&pg->pg_flags, PQ_FREE); #ifdef DEBUG pg->uobject = (void *)0xdeadbeef; pg->offset = 0xdeadbeef; pg->uanon = (void *)0xdeadbeef; #endif - uvmexp.free++; + + uvm_pmr_freepages(pg, 1); if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) uvm.page_idle_zero = vm_page_zero_enable; - - uvm_unlock_fpageq(); } /* @@ -1216,6 +1121,7 @@ uvm_page_own(struct vm_page *pg, char *tag) void uvm_pageidlezero(void) { +#if 0 /* disabled: need new code */ struct vm_page *pg; struct pgfreelist *pgfl; int free_list; @@ -1282,6 +1188,7 @@ uvm_pageidlezero(void) uvmexp.zeropages++; uvm_unlock_fpageq(); } while (curcpu_is_idle()); +#endif /* 0 */ } /* |