diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2011-04-06 15:52:14 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2011-04-06 15:52:14 +0000 |
commit | ace3f5313d3ed5313fbfc9c4add15781b29c344a (patch) | |
tree | c9f5c164c2700b33d111d6518c40d515b3dcd2d1 /sys | |
parent | 253d341f5752763e0a967d48e5f20b575864ae90 (diff) |
Backout the uvm_km_getpage -> km_alloc conversion. Weird things are happening
and we aren't sure what's causing them.
shouted oks by many before I even built a kernel with the diff.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/dma_alloc.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_pool.c | 56 | ||||
-rw-r--r-- | sys/kern/uipc_mbuf.c | 6 | ||||
-rw-r--r-- | sys/sys/pool.h | 8 | ||||
-rw-r--r-- | sys/uvm/uvm_extern.h | 10 | ||||
-rw-r--r-- | sys/uvm/uvm_km.c | 83 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 7 |
7 files changed, 135 insertions, 39 deletions
diff --git a/sys/kern/dma_alloc.c b/sys/kern/dma_alloc.c index 84c82e517c7..55989077def 100644 --- a/sys/kern/dma_alloc.c +++ b/sys/kern/dma_alloc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: dma_alloc.c,v 1.6 2011/04/05 01:28:05 art Exp $ */ +/* $OpenBSD: dma_alloc.c,v 1.7 2011/04/06 15:52:13 art Exp $ */ /* * Copyright (c) 2010 Theo de Raadt <deraadt@openbsd.org> * @@ -37,7 +37,7 @@ dma_alloc_init(void) 1 << (i + DMA_BUCKET_OFFSET)); pool_init(&dmapools[i], 1 << (i + DMA_BUCKET_OFFSET), 0, 0, 0, dmanames[i], NULL); - pool_set_constraints(&dmapools[i], &kp_dma); + pool_set_constraints(&dmapools[i], &dma_constraint, 1); pool_setipl(&dmapools[i], IPL_VM); /* XXX need pool_setlowat(&dmapools[i], dmalowat); */ } diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index c6189ffe431..da4937c12d1 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.102 2011/04/05 01:28:05 art Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.103 2011/04/06 15:52:13 art Exp $ */ /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ /*- @@ -401,7 +401,8 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, } /* pglistalloc/constraint parameters */ - pp->pr_crange = &kp_dirty; + pp->pr_crange = &no_constraint; + pp->pr_pa_nsegs = 0; /* Insert this into the list of all pools. */ TAILQ_INSERT_HEAD(&pool_head, pp, pr_poollist); @@ -1012,9 +1013,18 @@ done: } void -pool_set_constraints(struct pool *pp, struct kmem_pa_mode *mode) +pool_set_constraints(struct pool *pp, struct uvm_constraint_range *range, + int nsegs) { - pp->pr_crange = mode; + /* + * Subsequent changes to the constrictions are only + * allowed to make them _more_ strict. + */ + KASSERT(pp->pr_crange->ucr_high >= range->ucr_high && + pp->pr_crange->ucr_low <= range->ucr_low); + + pp->pr_crange = range; + pp->pr_pa_nsegs = nsegs; } void @@ -1485,36 +1495,32 @@ pool_allocator_free(struct pool *pp, void *v) void * pool_page_alloc(struct pool *pp, int flags, int *slowdown) { - struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER; - - kd.kd_waitok = (flags & PR_WAITOK); - kd.kd_slowdown = slowdown; + int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; - return (km_alloc(PAGE_SIZE, &kv_page, pp->pr_crange, &kd)); + return (uvm_km_getpage_pla(kfl, slowdown, pp->pr_crange->ucr_low, + pp->pr_crange->ucr_high, 0, 0)); } void pool_page_free(struct pool *pp, void *v) { - km_free(v, PAGE_SIZE, &kv_page, pp->pr_crange); + uvm_km_putpage(v); } void * pool_large_alloc(struct pool *pp, int flags, int *slowdown) { - struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER; - void *v; + int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; + vaddr_t va; int s; - kd.kd_waitok = (flags & PR_WAITOK); - kd.kd_slowdown = slowdown; - s = splvm(); - v = km_alloc(pp->pr_alloc->pa_pagesz, &kv_intrsafe, pp->pr_crange, - &kd); + va = uvm_km_kmemalloc_pla(kmem_map, NULL, pp->pr_alloc->pa_pagesz, 0, + kfl, pp->pr_crange->ucr_low, pp->pr_crange->ucr_high, + 0, 0, pp->pr_pa_nsegs); splx(s); - return (v); + return ((void *)va); } void @@ -1523,23 +1529,23 @@ pool_large_free(struct pool *pp, void *v) int s; s = splvm(); - km_free(v, pp->pr_alloc->pa_pagesz, &kv_intrsafe, pp->pr_crange); + uvm_km_free(kmem_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); splx(s); } void * pool_large_alloc_ni(struct pool *pp, int flags, int *slowdown) { - struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER; - - kd.kd_waitok = (flags & PR_WAITOK); - kd.kd_slowdown = slowdown; + int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; - return (km_alloc(pp->pr_alloc->pa_pagesz, &kv_any, pp->pr_crange, &kd)); + return ((void *)uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object, + pp->pr_alloc->pa_pagesz, 0, kfl, + pp->pr_crange->ucr_low, pp->pr_crange->ucr_high, + 0, 0, pp->pr_pa_nsegs)); } void pool_large_free_ni(struct pool *pp, void *v) { - km_free(v, pp->pr_alloc->pa_pagesz, &kv_any, pp->pr_crange); + uvm_km_free(kernel_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); } diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index af6894429de..4f5800d9d50 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uipc_mbuf.c,v 1.152 2011/04/05 11:48:28 blambert Exp $ */ +/* $OpenBSD: uipc_mbuf.c,v 1.153 2011/04/06 15:52:13 art Exp $ */ /* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */ /* @@ -138,7 +138,7 @@ mbinit(void) int i; pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL); - pool_set_constraints(&mbpool, &kp_dma); + pool_set_constraints(&mbpool, &dma_constraint, 1); pool_setlowat(&mbpool, mblowat); for (i = 0; i < nitems(mclsizes); i++) { @@ -146,7 +146,7 @@ mbinit(void) mclsizes[i] >> 10); pool_init(&mclpools[i], mclsizes[i], 0, 0, 0, mclnames[i], NULL); - pool_set_constraints(&mclpools[i], &kp_dma); + pool_set_constraints(&mclpools[i], &dma_constraint, 1); pool_setlowat(&mclpools[i], mcllowat); } diff --git a/sys/sys/pool.h b/sys/sys/pool.h index 13f40008d85..a9723cacec3 100644 --- a/sys/sys/pool.h +++ b/sys/sys/pool.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pool.h,v 1.38 2011/04/05 01:28:05 art Exp $ */ +/* $OpenBSD: pool.h,v 1.39 2011/04/06 15:52:13 art Exp $ */ /* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */ /*- @@ -132,7 +132,8 @@ struct pool { unsigned long pr_nidle; /* # of idle pages */ /* Physical memory configuration. */ - struct kmem_pa_mode *pr_crange; + struct uvm_constraint_range *pr_crange; + int pr_pa_nsegs; }; #ifdef _KERNEL @@ -147,7 +148,8 @@ void pool_setlowat(struct pool *, int); void pool_sethiwat(struct pool *, int); int pool_sethardlimit(struct pool *, u_int, const char *, int); struct uvm_constraint_range; /* XXX */ -void pool_set_constraints(struct pool *, struct kmem_pa_mode *mode); +void pool_set_constraints(struct pool *, + struct uvm_constraint_range *, int); void pool_set_ctordtor(struct pool *, int (*)(void *, void *, int), void(*)(void *, void *), void *); diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index cd57aaf39eb..f116e11e6f7 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.93 2011/04/05 01:28:05 art Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.94 2011/04/06 15:52:13 art Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -533,6 +533,14 @@ vaddr_t uvm_km_valloc_wait(vm_map_t, vsize_t); vaddr_t uvm_km_valloc_align(struct vm_map *, vsize_t, vsize_t, int); vaddr_t uvm_km_valloc_prefer_wait(vm_map_t, vsize_t, voff_t); +void *uvm_km_getpage_pla(boolean_t, int *, paddr_t, paddr_t, + paddr_t, paddr_t); +/* Wrapper around old function prototype. */ +#define uvm_km_getpage(waitok, slowdown) \ + uvm_km_getpage_pla(((waitok) ? 0 : UVM_KMF_NOWAIT), (slowdown), \ + (paddr_t)0, (paddr_t)-1, 0, 0) + +void uvm_km_putpage(void *); struct vm_map *uvm_km_suballoc(vm_map_t, vaddr_t *, vaddr_t *, vsize_t, int, diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 38590e2624c..759feace21f 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.c,v 1.92 2011/04/05 01:28:05 art Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.93 2011/04/06 15:52:13 art Exp $ */ /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* @@ -811,7 +811,88 @@ uvm_km_thread(void *arg) } } } +#endif + +void * +uvm_km_getpage_pla(int flags, int *slowdown, paddr_t low, paddr_t high, + paddr_t alignment, paddr_t boundary) +{ + struct pglist pgl; + int pla_flags; + struct vm_page *pg; + vaddr_t va; + + *slowdown = 0; + pla_flags = (flags & UVM_KMF_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK; + if (flags & UVM_KMF_ZERO) + pla_flags |= UVM_PLA_ZERO; + TAILQ_INIT(&pgl); + if (uvm_pglistalloc(PAGE_SIZE, low, high, alignment, boundary, &pgl, + 1, pla_flags) != 0) + return NULL; + pg = TAILQ_FIRST(&pgl); + KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); + TAILQ_REMOVE(&pgl, pg, pageq); + +#ifdef __HAVE_PMAP_DIRECT + va = pmap_map_direct(pg); + if (__predict_false(va == 0)) + uvm_pagefree(pg); + +#else /* !__HAVE_PMAP_DIRECT */ + mtx_enter(&uvm_km_pages.mtx); + while (uvm_km_pages.free == 0) { + if (flags & UVM_KMF_NOWAIT) { + mtx_leave(&uvm_km_pages.mtx); + uvm_pagefree(pg); + return NULL; + } + msleep(&uvm_km_pages.free, &uvm_km_pages.mtx, PVM, "getpage", + 0); + } + + va = uvm_km_pages.page[--uvm_km_pages.free]; + if (uvm_km_pages.free < uvm_km_pages.lowat && + curproc != uvm_km_pages.km_proc) { + *slowdown = 1; + wakeup(&uvm_km_pages.km_proc); + } + mtx_leave(&uvm_km_pages.mtx); + + + atomic_setbits_int(&pg->pg_flags, PG_FAKE); + UVM_PAGE_OWN(pg, NULL); + + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW); + pmap_update(kernel_map->pmap); + +#endif /* !__HAVE_PMAP_DIRECT */ + return ((void *)va); +} + +void +uvm_km_putpage(void *v) +{ +#ifdef __HAVE_PMAP_DIRECT + vaddr_t va = (vaddr_t)v; + struct vm_page *pg; + + pg = pmap_unmap_direct(va); + + uvm_pagefree(pg); +#else /* !__HAVE_PMAP_DIRECT */ + struct uvm_km_free_page *fp = v; + + mtx_enter(&uvm_km_pages.mtx); + fp->next = uvm_km_pages.freelist; + uvm_km_pages.freelist = fp; + if (uvm_km_pages.freelistlen++ > 16) + wakeup(&uvm_km_pages.km_proc); + mtx_leave(&uvm_km_pages.mtx); +#endif /* !__HAVE_PMAP_DIRECT */ +} +#ifndef __HAVE_PMAP_DIRECT struct uvm_km_free_page * uvm_km_doputpage(struct uvm_km_free_page *fp) { diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index e5bdfd94b37..17e5e9ef966 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.132 2011/04/05 01:28:05 art Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.133 2011/04/06 15:52:13 art Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -396,7 +396,7 @@ uvm_mapent_alloc(struct vm_map *map, int flags) { struct vm_map_entry *me, *ne; int s, i; - int pool_flags; + int slowdown, pool_flags; UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); pool_flags = PR_WAITOK; @@ -408,8 +408,7 @@ uvm_mapent_alloc(struct vm_map *map, int flags) simple_lock(&uvm.kentry_lock); me = uvm.kentry_free; if (me == NULL) { - ne = km_alloc(PAGE_SIZE, &kv_page, &kp_dirty, - &kd_nowait); + ne = uvm_km_getpage(0, &slowdown); if (ne == NULL) panic("uvm_mapent_alloc: cannot allocate map " "entry"); |