diff options
author | Bob Beck <beck@cvs.openbsd.org> | 2011-07-06 19:50:39 +0000 |
---|---|---|
committer | Bob Beck <beck@cvs.openbsd.org> | 2011-07-06 19:50:39 +0000 |
commit | fe2320a5e93a9ef965f8f839c7771fb66f471c87 (patch) | |
tree | aaf63cc9837845352635f0bb173f5b1928b62261 /sys/uvm/uvm_pmemrange.h | |
parent | 9bc7cbcf91abbd4993165a1fcb14af1eb0068dea (diff) |
uvm changes for buffer cache improvements.
1) Make the pagedaemon aware of the memory ranges and size of allocations
where memory is being requested, and pass this information on to
bufbackoff(), which will later (not yet) be used to ensure that the
buffer cache gets out of the way in the right area of memory.
Note that this commit does not yet make it *do* that - as currently
the buffer cache is all in dma-able memory and it will simply back
off.
2) Add uvm_pagerealloc_multi - to be used by the buffer cache code
for reallocating pages to particular regions.
much of this work by ariane, with smatterings of me, art,and oga
ok oga@, thib@, ariane@, deraadt@
Diffstat (limited to 'sys/uvm/uvm_pmemrange.h')
-rw-r--r-- | sys/uvm/uvm_pmemrange.h | 47 |
1 files changed, 45 insertions, 2 deletions
diff --git a/sys/uvm/uvm_pmemrange.h b/sys/uvm/uvm_pmemrange.h index fa27540e77d..b80d6a3539d 100644 --- a/sys/uvm/uvm_pmemrange.h +++ b/sys/uvm/uvm_pmemrange.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pmemrange.h,v 1.7 2011/04/03 22:07:37 ariane Exp $ */ +/* $OpenBSD: uvm_pmemrange.h,v 1.8 2011/07/06 19:50:38 beck Exp $ */ /* * Copyright (c) 2009 Ariane van der Steldt <ariane@stack.nl> @@ -59,6 +59,44 @@ struct uvm_pmemrange { /* pmr, sorted by address */ }; +/* + * Description of failing memory allocation. + * + * Two ways new pages can become available: + * [1] page daemon drops them (we notice because they are freed) + * [2] a process calls free + * + * The buffer cache and page daemon can decide that they don't have the + * ability to make pages available in the requested range. In that case, + * the FAIL bit will be set. + * XXX There's a possibility that a page is no longer on the queues but + * XXX has not yet been freed, or that a page was busy. + * XXX Also, wired pages are not considered for paging, so they could + * XXX cause a failure that may be recoverable. + */ +struct uvm_pmalloc { + TAILQ_ENTRY(uvm_pmalloc) pmq; + + /* + * Allocation request parameters. + */ + struct uvm_constraint_range pm_constraint; + psize_t pm_size; + + /* + * State flags. + */ + int pm_flags; +}; + +/* + * uvm_pmalloc flags. + */ +#define UVM_PMA_LINKED 0x01 /* uvm_pmalloc is on list */ +#define UVM_PMA_BUSY 0x02 /* entry is busy with fpageq unlocked */ +#define UVM_PMA_FAIL 0x10 /* page daemon cannot free pages */ +#define UVM_PMA_FREED 0x20 /* at least one page in the range was freed */ + RB_HEAD(uvm_pmemrange_addr, uvm_pmemrange); TAILQ_HEAD(uvm_pmemrange_use, uvm_pmemrange); @@ -68,13 +106,18 @@ TAILQ_HEAD(uvm_pmemrange_use, uvm_pmemrange); struct uvm_pmr_control { struct uvm_pmemrange_addr addr; struct uvm_pmemrange_use use; + + /* Only changed while fpageq is locked. */ + TAILQ_HEAD(, uvm_pmalloc) allocs; }; void uvm_pmr_freepages(struct vm_page *, psize_t); -void uvm_pmr_freepageq(struct pglist *pgl); +void uvm_pmr_freepageq(struct pglist *); int uvm_pmr_getpages(psize_t, paddr_t, paddr_t, paddr_t, paddr_t, int, int, struct pglist *); void uvm_pmr_init(void); +int uvm_wait_pla(paddr_t, paddr_t, paddr_t, int); +void uvm_wakeup_pla(paddr_t, psize_t); #if defined(DDB) || defined(DEBUG) int uvm_pmr_isfree(struct vm_page *pg); |