diff options
author | Thordur I. Bjornsson <thib@cvs.openbsd.org> | 2010-06-27 03:03:50 +0000 |
---|---|---|
committer | Thordur I. Bjornsson <thib@cvs.openbsd.org> | 2010-06-27 03:03:50 +0000 |
commit | 8a46d785199bcae03d0469a55b99e08fe33263fa (patch) | |
tree | 9b784b445528b5c78da12077fd065ae9030cf6d7 /sys/uvm/uvm_pmemrange.c | |
parent | b109c783d06f4af76ff573fa020fbe9d1d855f7b (diff) |
uvm constraints. Add two mandatory MD symbols, uvm_md_constraints
which contains the constraints for DMA/memory allocation for each
architecture, and dma_constraints which contains the range of addresses
that are dma accessable by the system.
This is based on ariane@'s physcontig diff, with lots of bugfixes and
additions the following additions by my self:
Introduce a new function pool_set_constraints() which sets the address
range for which we allocate pages for the pool from, this is now used
for the mbuf/mbuf cluster pools to keep them dma accessible.
The !direct archs no longer stuff pages into the kernel object in
uvm_km_getpage_pla but rather do a pmap_extract() in uvm_km_putpages.
Tested heavily by my self on i386, amd64 and sparc64. Some tests on
alpha and SGI.
"commit it" beck, art, oga, deraadt
"i like the diff" deraadt
Diffstat (limited to 'sys/uvm/uvm_pmemrange.c')
-rw-r--r-- | sys/uvm/uvm_pmemrange.c | 61 |
1 files changed, 37 insertions, 24 deletions
diff --git a/sys/uvm/uvm_pmemrange.c b/sys/uvm/uvm_pmemrange.c index 54d3f6960d6..d4c6bd87789 100644 --- a/sys/uvm/uvm_pmemrange.c +++ b/sys/uvm/uvm_pmemrange.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pmemrange.c,v 1.14 2010/06/23 09:36:03 thib Exp $ */ +/* $OpenBSD: uvm_pmemrange.c,v 1.15 2010/06/27 03:03:49 thib Exp $ */ /* * Copyright (c) 2009, 2010 Ariane van der Steldt <ariane@stack.nl> @@ -109,7 +109,6 @@ void uvm_pmr_assertvalid(struct uvm_pmemrange *pmr); #define uvm_pmr_assertvalid(pmr) do {} while (0) #endif - int uvm_pmr_get1page(psize_t, int, struct pglist *, paddr_t, paddr_t); @@ -1305,7 +1304,26 @@ uvm_pmr_split(paddr_t pageno) KASSERT(pmr->low < pageno); KASSERT(pmr->high > pageno); + /* + * uvm_pmr_allocpmr() calls into malloc() which in turn calls into + * uvm_kmemalloc which calls into pmemrange, making the locking + * a bit hard, so we just race! + */ + uvm_unlock_fpageq(); drain = uvm_pmr_allocpmr(); + uvm_lock_fpageq(); + pmr = uvm_pmemrange_find(pageno); + if (pmr == NULL || !(pmr->low < pageno)) { + /* + * We lost the race since someone else ran this or a related + * function, however this should be triggered very rarely so + * we just leak the pmr. + */ + printf("uvm_pmr_split: lost one pmr\n"); + uvm_unlock_fpageq(); + return; + } + drain->low = pageno; drain->high = pmr->high; drain->use = pmr->use; @@ -1379,37 +1397,29 @@ void uvm_pmr_use_inc(paddr_t low, paddr_t high) { struct uvm_pmemrange *pmr; + paddr_t sz; - /* - * If high+1 == 0 and low == 0, then you are increasing use - * of the whole address space, which won't make any difference. - * Skip in that case. - */ + /* pmr uses page numbers, translate low and high. */ high++; - if (high == 0 && low == 0) - return; - - /* - * pmr uses page numbers, translate low and high. - */ - low = atop(round_page(low)); high = atop(trunc_page(high)); + low = atop(round_page(low)); uvm_pmr_split(low); uvm_pmr_split(high); uvm_lock_fpageq(); - /* Increase use count on segments in range. */ RB_FOREACH(pmr, uvm_pmemrange_addr, &uvm.pmr_control.addr) { if (PMR_IS_SUBRANGE_OF(pmr->low, pmr->high, low, high)) { TAILQ_REMOVE(&uvm.pmr_control.use, pmr, pmr_use); pmr->use++; + sz += pmr->high - pmr->low; uvm_pmemrange_use_insert(&uvm.pmr_control.use, pmr); } uvm_pmr_assertvalid(pmr); } - uvm_unlock_fpageq(); + + KASSERT(sz >= high - low); } /* @@ -1420,19 +1430,21 @@ uvm_pmr_use_inc(paddr_t low, paddr_t high) * (And if called in between, you're dead.) */ struct uvm_pmemrange * -uvm_pmr_allocpmr() +uvm_pmr_allocpmr(void) { struct uvm_pmemrange *nw; int i; + /* We're only ever hitting the !uvm.page_init_done case for now. */ if (!uvm.page_init_done) { nw = (struct uvm_pmemrange *) uvm_pageboot_alloc(sizeof(struct uvm_pmemrange)); - bzero(nw, sizeof(struct uvm_pmemrange)); } else { nw = malloc(sizeof(struct uvm_pmemrange), - M_VMMAP, M_NOWAIT | M_ZERO); + M_VMMAP, M_NOWAIT); } + KASSERT(nw != NULL); + bzero(nw, sizeof(struct uvm_pmemrange)); RB_INIT(&nw->addr); for (i = 0; i < UVM_PMR_MEMTYPE_MAX; i++) { RB_INIT(&nw->size[i]); @@ -1441,8 +1453,6 @@ uvm_pmr_allocpmr() return nw; } -static const struct uvm_io_ranges uvm_io_ranges[] = UVM_IO_RANGES; - /* * Initialization of pmr. * Called by uvm_page_init. @@ -1458,15 +1468,18 @@ uvm_pmr_init(void) TAILQ_INIT(&uvm.pmr_control.use); RB_INIT(&uvm.pmr_control.addr); + /* By default, one range for the entire address space. */ new_pmr = uvm_pmr_allocpmr(); new_pmr->low = 0; - new_pmr->high = atop((paddr_t)-1) + 1; + new_pmr->high = atop((paddr_t)-1) + 1; RB_INSERT(uvm_pmemrange_addr, &uvm.pmr_control.addr, new_pmr); uvm_pmemrange_use_insert(&uvm.pmr_control.use, new_pmr); - for (i = 0; i < nitems(uvm_io_ranges); i++) - uvm_pmr_use_inc(uvm_io_ranges[i].low, uvm_io_ranges[i].high); + for (i = 0; uvm_md_constraints[i] != NULL; i++) { + uvm_pmr_use_inc(uvm_md_constraints[i]->ucr_low, + uvm_md_constraints[i]->ucr_high); + } } /* |