diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2011-04-18 19:23:47 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2011-04-18 19:23:47 +0000 |
commit | 35bb11f41141ac829e82402f997915ccc3ca7b4f (patch) | |
tree | 8c24eccc7319e4d098540fdef1a17e24087e117f | |
parent | cd3d27d3d50cb6afa83080b4d9e04162c631351c (diff) |
Put back the change of pool and malloc into the new km_alloc(9) api.
The problems during the hackathon were not caused by this (most likely).
prodded by deraadt@ and beck@
-rw-r--r-- | sys/kern/dma_alloc.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_pool.c | 56 | ||||
-rw-r--r-- | sys/kern/uipc_mbuf.c | 6 | ||||
-rw-r--r-- | sys/sys/pool.h | 7 | ||||
-rw-r--r-- | sys/uvm/uvm_extern.h | 49 | ||||
-rw-r--r-- | sys/uvm/uvm_km.c | 114 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 7 |
7 files changed, 75 insertions, 168 deletions
diff --git a/sys/kern/dma_alloc.c b/sys/kern/dma_alloc.c index 55989077def..a38a48f7d2a 100644 --- a/sys/kern/dma_alloc.c +++ b/sys/kern/dma_alloc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: dma_alloc.c,v 1.7 2011/04/06 15:52:13 art Exp $ */ +/* $OpenBSD: dma_alloc.c,v 1.8 2011/04/18 19:23:46 art Exp $ */ /* * Copyright (c) 2010 Theo de Raadt <deraadt@openbsd.org> * @@ -37,7 +37,7 @@ dma_alloc_init(void) 1 << (i + DMA_BUCKET_OFFSET)); pool_init(&dmapools[i], 1 << (i + DMA_BUCKET_OFFSET), 0, 0, 0, dmanames[i], NULL); - pool_set_constraints(&dmapools[i], &dma_constraint, 1); + pool_set_constraints(&dmapools[i], &kp_dma); pool_setipl(&dmapools[i], IPL_VM); /* XXX need pool_setlowat(&dmapools[i], dmalowat); */ } diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index da4937c12d1..fcc74a92fd6 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.103 2011/04/06 15:52:13 art Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.104 2011/04/18 19:23:46 art Exp $ */ /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ /*- @@ -401,8 +401,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, } /* pglistalloc/constraint parameters */ - pp->pr_crange = &no_constraint; - pp->pr_pa_nsegs = 0; + pp->pr_crange = &kp_dirty; /* Insert this into the list of all pools. */ TAILQ_INSERT_HEAD(&pool_head, pp, pr_poollist); @@ -1013,18 +1012,9 @@ done: } void -pool_set_constraints(struct pool *pp, struct uvm_constraint_range *range, - int nsegs) +pool_set_constraints(struct pool *pp, const struct kmem_pa_mode *mode) { - /* - * Subsequent changes to the constrictions are only - * allowed to make them _more_ strict. - */ - KASSERT(pp->pr_crange->ucr_high >= range->ucr_high && - pp->pr_crange->ucr_low <= range->ucr_low); - - pp->pr_crange = range; - pp->pr_pa_nsegs = nsegs; + pp->pr_crange = mode; } void @@ -1495,32 +1485,36 @@ pool_allocator_free(struct pool *pp, void *v) void * pool_page_alloc(struct pool *pp, int flags, int *slowdown) { - int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; + struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER; + + kd.kd_waitok = (flags & PR_WAITOK); + kd.kd_slowdown = slowdown; - return (uvm_km_getpage_pla(kfl, slowdown, pp->pr_crange->ucr_low, - pp->pr_crange->ucr_high, 0, 0)); + return (km_alloc(PAGE_SIZE, &kv_page, pp->pr_crange, &kd)); } void pool_page_free(struct pool *pp, void *v) { - uvm_km_putpage(v); + km_free(v, PAGE_SIZE, &kv_page, pp->pr_crange); } void * pool_large_alloc(struct pool *pp, int flags, int *slowdown) { - int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; - vaddr_t va; + struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER; + void *v; int s; + kd.kd_waitok = (flags & PR_WAITOK); + kd.kd_slowdown = slowdown; + s = splvm(); - va = uvm_km_kmemalloc_pla(kmem_map, NULL, pp->pr_alloc->pa_pagesz, 0, - kfl, pp->pr_crange->ucr_low, pp->pr_crange->ucr_high, - 0, 0, pp->pr_pa_nsegs); + v = km_alloc(pp->pr_alloc->pa_pagesz, &kv_intrsafe, pp->pr_crange, + &kd); splx(s); - return ((void *)va); + return (v); } void @@ -1529,23 +1523,23 @@ pool_large_free(struct pool *pp, void *v) int s; s = splvm(); - uvm_km_free(kmem_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); + km_free(v, pp->pr_alloc->pa_pagesz, &kv_intrsafe, pp->pr_crange); splx(s); } void * pool_large_alloc_ni(struct pool *pp, int flags, int *slowdown) { - int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT; + struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER; + + kd.kd_waitok = (flags & PR_WAITOK); + kd.kd_slowdown = slowdown; - return ((void *)uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object, - pp->pr_alloc->pa_pagesz, 0, kfl, - pp->pr_crange->ucr_low, pp->pr_crange->ucr_high, - 0, 0, pp->pr_pa_nsegs)); + return (km_alloc(pp->pr_alloc->pa_pagesz, &kv_any, pp->pr_crange, &kd)); } void pool_large_free_ni(struct pool *pp, void *v) { - uvm_km_free(kernel_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz); + km_free(v, pp->pr_alloc->pa_pagesz, &kv_any, pp->pr_crange); } diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 6e5be38f154..221109171a9 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uipc_mbuf.c,v 1.155 2011/04/11 13:10:13 claudio Exp $ */ +/* $OpenBSD: uipc_mbuf.c,v 1.156 2011/04/18 19:23:46 art Exp $ */ /* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */ /* @@ -141,7 +141,7 @@ mbinit(void) int i; pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL); - pool_set_constraints(&mbpool, &dma_constraint, 1); + pool_set_constraints(&mbpool, &kp_dma); pool_setlowat(&mbpool, mblowat); for (i = 0; i < nitems(mclsizes); i++) { @@ -149,7 +149,7 @@ mbinit(void) mclsizes[i] >> 10); pool_init(&mclpools[i], mclsizes[i], 0, 0, 0, mclnames[i], NULL); - pool_set_constraints(&mclpools[i], &dma_constraint, 1); + pool_set_constraints(&mclpools[i], &kp_dma); pool_setlowat(&mclpools[i], mcllowat); } diff --git a/sys/sys/pool.h b/sys/sys/pool.h index a9723cacec3..79619f12a23 100644 --- a/sys/sys/pool.h +++ b/sys/sys/pool.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pool.h,v 1.39 2011/04/06 15:52:13 art Exp $ */ +/* $OpenBSD: pool.h,v 1.40 2011/04/18 19:23:46 art Exp $ */ /* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */ /*- @@ -132,8 +132,7 @@ struct pool { unsigned long pr_nidle; /* # of idle pages */ /* Physical memory configuration. */ - struct uvm_constraint_range *pr_crange; - int pr_pa_nsegs; + const struct kmem_pa_mode *pr_crange; }; #ifdef _KERNEL @@ -149,7 +148,7 @@ void pool_sethiwat(struct pool *, int); int pool_sethardlimit(struct pool *, u_int, const char *, int); struct uvm_constraint_range; /* XXX */ void pool_set_constraints(struct pool *, - struct uvm_constraint_range *, int); + const struct kmem_pa_mode *mode); void pool_set_ctordtor(struct pool *, int (*)(void *, void *, int), void(*)(void *, void *), void *); diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index f116e11e6f7..77d0522bd14 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.94 2011/04/06 15:52:13 art Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.95 2011/04/18 19:23:46 art Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -533,14 +533,6 @@ vaddr_t uvm_km_valloc_wait(vm_map_t, vsize_t); vaddr_t uvm_km_valloc_align(struct vm_map *, vsize_t, vsize_t, int); vaddr_t uvm_km_valloc_prefer_wait(vm_map_t, vsize_t, voff_t); -void *uvm_km_getpage_pla(boolean_t, int *, paddr_t, paddr_t, - paddr_t, paddr_t); -/* Wrapper around old function prototype. */ -#define uvm_km_getpage(waitok, slowdown) \ - uvm_km_getpage_pla(((waitok) ? 0 : UVM_KMF_NOWAIT), (slowdown), \ - (paddr_t)0, (paddr_t)-1, 0, 0) - -void uvm_km_putpage(void *); struct vm_map *uvm_km_suballoc(vm_map_t, vaddr_t *, vaddr_t *, vsize_t, int, @@ -617,25 +609,26 @@ struct kmem_dyn_mode { * The exception is kv_page which needs to wait relatively often. * All kv_ except kv_intrsafe will potentially sleep. */ -extern struct kmem_va_mode kv_any; -extern struct kmem_va_mode kv_intrsafe; -extern struct kmem_va_mode kv_page; - -extern struct kmem_pa_mode kp_dirty; -extern struct kmem_pa_mode kp_zero; -extern struct kmem_pa_mode kp_dma; -extern struct kmem_pa_mode kp_dma_zero; -extern struct kmem_pa_mode kp_pageable; -extern struct kmem_pa_mode kp_none; - -extern struct kmem_dyn_mode kd_waitok; -extern struct kmem_dyn_mode kd_nowait; -extern struct kmem_dyn_mode kd_trylock; - - -void *km_alloc(size_t, struct kmem_va_mode *, struct kmem_pa_mode *, - struct kmem_dyn_mode *); -void km_free(void *, size_t, struct kmem_va_mode *, struct kmem_pa_mode *); +extern const struct kmem_va_mode kv_any; +extern const struct kmem_va_mode kv_intrsafe; +extern const struct kmem_va_mode kv_page; + +extern const struct kmem_pa_mode kp_dirty; +extern const struct kmem_pa_mode kp_zero; +extern const struct kmem_pa_mode kp_dma; +extern const struct kmem_pa_mode kp_dma_zero; +extern const struct kmem_pa_mode kp_pageable; +extern const struct kmem_pa_mode kp_none; + +extern const struct kmem_dyn_mode kd_waitok; +extern const struct kmem_dyn_mode kd_nowait; +extern const struct kmem_dyn_mode kd_trylock; + + +void *km_alloc(size_t, const struct kmem_va_mode *, const struct kmem_pa_mode *, + const struct kmem_dyn_mode *); +void km_free(void *, size_t, const struct kmem_va_mode *, + const struct kmem_pa_mode *); /* uvm_map.c */ #define uvm_map(_m, _a, _sz, _u, _f, _al, _fl) uvm_map_p(_m, _a, _sz, _u, _f, _al, _fl, 0) diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index f128fe21eb1..4d5c29c40e9 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.c,v 1.96 2011/04/15 23:04:19 deraadt Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.97 2011/04/18 19:23:46 art Exp $ */ /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* @@ -811,88 +811,7 @@ uvm_km_thread(void *arg) } } } -#endif - -void * -uvm_km_getpage_pla(int flags, int *slowdown, paddr_t low, paddr_t high, - paddr_t alignment, paddr_t boundary) -{ - struct pglist pgl; - int pla_flags; - struct vm_page *pg; - vaddr_t va; - - *slowdown = 0; - pla_flags = (flags & UVM_KMF_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK; - if (flags & UVM_KMF_ZERO) - pla_flags |= UVM_PLA_ZERO; - TAILQ_INIT(&pgl); - if (uvm_pglistalloc(PAGE_SIZE, low, high, alignment, boundary, &pgl, - 1, pla_flags) != 0) - return NULL; - pg = TAILQ_FIRST(&pgl); - KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); - TAILQ_REMOVE(&pgl, pg, pageq); - -#ifdef __HAVE_PMAP_DIRECT - va = pmap_map_direct(pg); - if (__predict_false(va == 0)) - uvm_pagefree(pg); - -#else /* !__HAVE_PMAP_DIRECT */ - mtx_enter(&uvm_km_pages.mtx); - while (uvm_km_pages.free == 0) { - if (flags & UVM_KMF_NOWAIT) { - mtx_leave(&uvm_km_pages.mtx); - uvm_pagefree(pg); - return NULL; - } - msleep(&uvm_km_pages.free, &uvm_km_pages.mtx, PVM, "getpage", - 0); - } - - va = uvm_km_pages.page[--uvm_km_pages.free]; - if (uvm_km_pages.free < uvm_km_pages.lowat && - curproc != uvm_km_pages.km_proc) { - *slowdown = 1; - wakeup(&uvm_km_pages.km_proc); - } - mtx_leave(&uvm_km_pages.mtx); - - - atomic_setbits_int(&pg->pg_flags, PG_FAKE); - UVM_PAGE_OWN(pg, NULL); - - pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW); - pmap_update(kernel_map->pmap); - -#endif /* !__HAVE_PMAP_DIRECT */ - return ((void *)va); -} - -void -uvm_km_putpage(void *v) -{ -#ifdef __HAVE_PMAP_DIRECT - vaddr_t va = (vaddr_t)v; - struct vm_page *pg; - - pg = pmap_unmap_direct(va); - - uvm_pagefree(pg); -#else /* !__HAVE_PMAP_DIRECT */ - struct uvm_km_free_page *fp = v; - - mtx_enter(&uvm_km_pages.mtx); - fp->next = uvm_km_pages.freelist; - uvm_km_pages.freelist = fp; - if (uvm_km_pages.freelistlen++ > 16) - wakeup(&uvm_km_pages.km_proc); - mtx_leave(&uvm_km_pages.mtx); -#endif /* !__HAVE_PMAP_DIRECT */ -} -#ifndef __HAVE_PMAP_DIRECT struct uvm_km_free_page * uvm_km_doputpage(struct uvm_km_free_page *fp) { @@ -922,8 +841,8 @@ uvm_km_doputpage(struct uvm_km_free_page *fp) #endif /* !__HAVE_PMAP_DIRECT */ void * -km_alloc(size_t sz, struct kmem_va_mode *kv, struct kmem_pa_mode *kp, - struct kmem_dyn_mode *kd) +km_alloc(size_t sz, const struct kmem_va_mode *kv, + const struct kmem_pa_mode *kp, const struct kmem_dyn_mode *kd) { struct vm_map *map; struct vm_page *pg; @@ -1058,7 +977,8 @@ try_map: } void -km_free(void *v, size_t sz, struct kmem_va_mode *kv, struct kmem_pa_mode *kp) +km_free(void *v, size_t sz, const struct kmem_va_mode *kv, + const struct kmem_pa_mode *kp) { vaddr_t sva, eva, va; struct vm_page *pg; @@ -1113,56 +1033,56 @@ free_va: wakeup(*kv->kv_map); } -struct kmem_va_mode kv_any = { +const struct kmem_va_mode kv_any = { .kv_map = &kernel_map, }; -struct kmem_va_mode kv_intrsafe = { +const struct kmem_va_mode kv_intrsafe = { .kv_map = &kmem_map, }; -struct kmem_va_mode kv_page = { +const struct kmem_va_mode kv_page = { .kv_singlepage = 1 }; -struct kmem_pa_mode kp_dirty = { +const struct kmem_pa_mode kp_dirty = { .kp_constraint = &no_constraint }; -struct kmem_pa_mode kp_dma = { +const struct kmem_pa_mode kp_dma = { .kp_constraint = &dma_constraint }; -struct kmem_pa_mode kp_dma_zero = { +const struct kmem_pa_mode kp_dma_zero = { .kp_constraint = &dma_constraint, .kp_zero = 1 }; -struct kmem_pa_mode kp_zero = { +const struct kmem_pa_mode kp_zero = { .kp_constraint = &no_constraint, .kp_zero = 1 }; -struct kmem_pa_mode kp_pageable = { +const struct kmem_pa_mode kp_pageable = { .kp_object = &uvm.kernel_object, .kp_pageable = 1 /* XXX - kp_nomem, maybe, but we'll need to fix km_free. */ }; -struct kmem_pa_mode kp_none = { +const struct kmem_pa_mode kp_none = { .kp_nomem = 1 }; -struct kmem_dyn_mode kd_waitok = { +const struct kmem_dyn_mode kd_waitok = { .kd_waitok = 1, .kd_prefer = UVM_UNKNOWN_OFFSET }; -struct kmem_dyn_mode kd_nowait = { +const struct kmem_dyn_mode kd_nowait = { .kd_prefer = UVM_UNKNOWN_OFFSET }; -struct kmem_dyn_mode kd_trylock = { +const struct kmem_dyn_mode kd_trylock = { .kd_trylock = 1, .kd_prefer = UVM_UNKNOWN_OFFSET }; diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 17e5e9ef966..9440e1563ed 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.133 2011/04/06 15:52:13 art Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.134 2011/04/18 19:23:46 art Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -396,7 +396,7 @@ uvm_mapent_alloc(struct vm_map *map, int flags) { struct vm_map_entry *me, *ne; int s, i; - int slowdown, pool_flags; + int pool_flags; UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); pool_flags = PR_WAITOK; @@ -408,7 +408,8 @@ uvm_mapent_alloc(struct vm_map *map, int flags) simple_lock(&uvm.kentry_lock); me = uvm.kentry_free; if (me == NULL) { - ne = uvm_km_getpage(0, &slowdown); + ne = km_alloc(PAGE_SIZE, &kv_page, &kp_dirty, + &kd_nowait); if (ne == NULL) panic("uvm_mapent_alloc: cannot allocate map " "entry"); |