summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorThordur I. Bjornsson <thib@cvs.openbsd.org>2010-06-27 03:03:50 +0000
committerThordur I. Bjornsson <thib@cvs.openbsd.org>2010-06-27 03:03:50 +0000
commit8a46d785199bcae03d0469a55b99e08fe33263fa (patch)
tree9b784b445528b5c78da12077fd065ae9030cf6d7 /sys/kern
parentb109c783d06f4af76ff573fa020fbe9d1d855f7b (diff)
uvm constraints. Add two mandatory MD symbols, uvm_md_constraints
which contains the constraints for DMA/memory allocation for each architecture, and dma_constraints which contains the range of addresses that are dma accessable by the system. This is based on ariane@'s physcontig diff, with lots of bugfixes and additions the following additions by my self: Introduce a new function pool_set_constraints() which sets the address range for which we allocate pages for the pool from, this is now used for the mbuf/mbuf cluster pools to keep them dma accessible. The !direct archs no longer stuff pages into the kernel object in uvm_km_getpage_pla but rather do a pmap_extract() in uvm_km_putpages. Tested heavily by my self on i386, amd64 and sparc64. Some tests on alpha and SGI. "commit it" beck, art, oga, deraadt "i like the diff" deraadt
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/subr_pool.c43
-rw-r--r--sys/kern/uipc_mbuf.c9
2 files changed, 42 insertions, 10 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index ba56aaf367e..a8c7771c468 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.92 2010/06/17 16:11:20 miod Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.93 2010/06/27 03:03:48 thib Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -94,6 +94,12 @@ struct pool_item {
((pp)->pr_nitems < (pp)->pr_minitems)
/*
+ * Default constraint range for pools, that cover the whole
+ * address space.
+ */
+struct uvm_constraint_range pool_full_range = { 0x0, (paddr_t)-1 };
+
+/*
* Every pool gets a unique serial number assigned to it. If this counter
* wraps, we're screwed, but we shouldn't create so many pools anyway.
*/
@@ -393,6 +399,10 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pool_setipl(&phpool, IPL_HIGH);
}
+ /* pglistalloc/constraint parameters */
+ pp->pr_crange = &pool_full_range;
+ pp->pr_pa_nsegs = 0;
+
/* Insert this into the list of all pools. */
TAILQ_INSERT_HEAD(&pool_head, pp, pr_poollist);
}
@@ -999,6 +1009,21 @@ done:
}
void
+pool_set_constraints(struct pool *pp, struct uvm_constraint_range *range,
+ int nsegs)
+{
+ /*
+ * Subsequent changes to the constrictions are only
+ * allowed to make them _more_ strict.
+ */
+ KASSERT(pp->pr_crange->ucr_high >= range->ucr_high &&
+ pp->pr_crange->ucr_low <= range->ucr_low);
+
+ pp->pr_crange = range;
+ pp->pr_pa_nsegs = nsegs;
+}
+
+void
pool_set_ctordtor(struct pool *pp, int (*ctor)(void *, void *, int),
void (*dtor)(void *, void *), void *arg)
{
@@ -1452,15 +1477,15 @@ pool_allocator_free(struct pool *pp, void *v)
void *
pool_page_alloc(struct pool *pp, int flags, int *slowdown)
{
- boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+ int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT;
- return (uvm_km_getpage(waitok, slowdown));
+ return (uvm_km_getpage_pla(kfl, slowdown, pp->pr_crange->ucr_low,
+ pp->pr_crange->ucr_high, 0, 0));
}
void
pool_page_free(struct pool *pp, void *v)
{
-
uvm_km_putpage(v);
}
@@ -1472,7 +1497,9 @@ pool_large_alloc(struct pool *pp, int flags, int *slowdown)
int s;
s = splvm();
- va = uvm_km_kmemalloc(kmem_map, NULL, pp->pr_alloc->pa_pagesz, kfl);
+ va = uvm_km_kmemalloc_pla(kmem_map, NULL, pp->pr_alloc->pa_pagesz, kfl,
+ pp->pr_crange->ucr_low, pp->pr_crange->ucr_high,
+ 0, 0, pp->pr_pa_nsegs);
splx(s);
return ((void *)va);
@@ -1493,8 +1520,10 @@ pool_large_alloc_ni(struct pool *pp, int flags, int *slowdown)
{
int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT;
- return ((void *)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
- pp->pr_alloc->pa_pagesz, kfl));
+ return ((void *)uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object,
+ pp->pr_alloc->pa_pagesz, kfl,
+ pp->pr_crange->ucr_low, pp->pr_crange->ucr_high,
+ 0, 0, pp->pr_pa_nsegs));
}
void
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index a7033c1ddb4..99c1decbb06 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uipc_mbuf.c,v 1.137 2010/06/07 19:47:25 blambert Exp $ */
+/* $OpenBSD: uipc_mbuf.c,v 1.138 2010/06/27 03:03:48 thib Exp $ */
/* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
/*
@@ -90,6 +90,7 @@
#include <machine/cpu.h>
+#include <uvm/uvm.h>
#include <uvm/uvm_extern.h>
struct mbstat mbstat; /* mbuf stats */
@@ -137,13 +138,15 @@ mbinit(void)
pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL);
pool_setlowat(&mbpool, mblowat);
+ pool_set_constraints(&mbpool, &dma_constraint, 1);
for (i = 0; i < nitems(mclsizes); i++) {
snprintf(mclnames[i], sizeof(mclnames[0]), "mcl%dk",
mclsizes[i] >> 10);
- pool_init(&mclpools[i], mclsizes[i], 0, 0, 0, mclnames[i],
- NULL);
+ pool_init(&mclpools[i], mclsizes[i], 0, 0, 0,
+ mclnames[i], NULL);
pool_setlowat(&mclpools[i], mcllowat);
+ pool_set_constraints(&mclpools[i], &dma_constraint, 1);
}
nmbclust_update();