diff options
author | Ted Unangst <tedu@cvs.openbsd.org> | 2006-05-07 20:06:51 +0000 |
---|---|---|
committer | Ted Unangst <tedu@cvs.openbsd.org> | 2006-05-07 20:06:51 +0000 |
commit | 4a02f1bb86ffaf58c243035d5c3117d4611db8eb (patch) | |
tree | 2d3d10f3a3aef857bc7cecfe6cf9f57e61c4365a /sys/kern | |
parent | 745fde2efc1d5a927b48753f6c63fc546ba876a4 (diff) |
remove drain hooks from pool.
1. drain hooks and lists of allocators make the code complicated
2. the only hooks in the system are the mbuf reclaim routines
3. if reclaim is actually able to put a meaningful amount of memory back
in the system, i think something else is dicked up. ie, if reclaiming
your ip fragment buffers makes the difference thrashing swap and not,
your system is in a load of trouble.
4. it's a scary amount of code running with very weird spl requirements
and i'd say it's pretty much totally untested. raise your hand if your
router is running at the edge of swap.
5. the reclaim stuff goes back to when mbufs lived in a tiny vm_map and
you could run out of va. that's very unlikely (like impossible) now.
ok/tested pedro krw sturm
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/subr_pool.c | 154 | ||||
-rw-r--r-- | sys/kern/uipc_mbuf.c | 5 |
2 files changed, 6 insertions, 153 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index a39f1a80e85..74ff2c3eb6b 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.45 2004/07/29 09:18:17 mickey Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.46 2006/05/07 20:06:50 tedu Exp $ */ /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ /*- @@ -81,10 +81,7 @@ static struct pool phpool; /* # of seconds to retain page after last use */ int pool_inactive_time = 10; -/* Next candidate for drainage (see pool_drain()) */ -static struct pool *drainpp; - -/* This spin lock protects both pool_head and drainpp. */ +/* This spin lock protects both pool_head */ struct simplelock pool_head_slock; struct pool_item_header { @@ -450,8 +447,6 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, pp->pr_hardlimit_ratecap.tv_usec = 0; pp->pr_hardlimit_warning_last.tv_sec = 0; pp->pr_hardlimit_warning_last.tv_usec = 0; - pp->pr_drain_hook = NULL; - pp->pr_drain_hook_arg = NULL; pp->pr_serial = ++pool_serial; if (pool_serial == 0) panic("pool_init: too much uptime"); @@ -578,9 +573,6 @@ pool_destroy(struct pool *pp) /* Remove from global pool list */ simple_lock(&pool_head_slock); TAILQ_REMOVE(&pool_head, pp, pr_poollist); - if (drainpp == pp) { - drainpp = NULL; - } simple_unlock(&pool_head_slock); #ifdef POOL_DIAGNOSTIC @@ -589,18 +581,6 @@ pool_destroy(struct pool *pp) #endif } -void -pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) -{ - /* XXX no locking -- must be used just after pool_init() */ -#ifdef DIAGNOSTIC - if (pp->pr_drain_hook != NULL) - panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); -#endif - pp->pr_drain_hook = fn; - pp->pr_drain_hook_arg = arg; -} - static struct pool_item_header * pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) { @@ -675,21 +655,6 @@ pool_get(struct pool *pp, int flags) } #endif if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { - if (pp->pr_drain_hook != NULL) { - /* - * Since the drain hook is going to free things - * back to the pool, unlock, call hook, re-lock - * and check hardlimit condition again. - */ - pr_leave(pp); - simple_unlock(&pp->pr_slock); - (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); - if (pp->pr_nout < pp->pr_hardlimit) - goto startover; - } - if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { /* * XXX: A warning isn't logged in this case. Should @@ -944,7 +909,7 @@ pool_do_put(struct pool *pp, void *v) pp->pr_flags &= ~PR_WANTED; if (ph->ph_nmissing == 0) pp->pr_nidle++; - wakeup((caddr_t)pp); + wakeup(pp); return; } @@ -1279,13 +1244,6 @@ pool_reclaim(struct pool *pp) struct timeval diff; int s; - if (pp->pr_drain_hook != NULL) { - /* - * The drain hook must be called with the pool unlocked. - */ - (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); - } - if (simple_lock_try(&pp->pr_slock) == 0) return (0); pr_enter(pp, file, line); @@ -1342,33 +1300,6 @@ pool_reclaim(struct pool *pp) return (1); } - -/* - * Drain pools, one at a time. - * - * Note, we must never be called from an interrupt context. - */ -void -pool_drain(void *arg) -{ - struct pool *pp; - int s; - - pp = NULL; - s = splvm(); - simple_lock(&pool_head_slock); - if (drainpp == NULL) { - drainpp = TAILQ_FIRST(&pool_head); - } - if (drainpp) { - pp = drainpp; - drainpp = TAILQ_NEXT(pp, pr_poollist); - } - simple_unlock(&pool_head_slock); - pool_reclaim(pp); - splx(s); -} - #ifdef DDB /* * Diagnostic helpers. @@ -1932,7 +1863,6 @@ sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep) * Pool backend allocators. * * Each pool has a backend allocator that handles allocation, deallocation - * and any additional draining that might be needed. */ void *pool_page_alloc_kmem(struct pool *, int); void pool_page_free_kmem(struct pool *, void *); @@ -1971,37 +1901,10 @@ struct pool_allocator pool_allocator_nointr = { */ void * -pool_allocator_alloc(struct pool *org, int flags) +pool_allocator_alloc(struct pool *pp, int flags) { - struct pool_allocator *pa = org->pr_alloc; - int freed; - void *res; - int s; - do { - if ((res = (*pa->pa_alloc)(org, flags)) != NULL) - return (res); - if ((flags & PR_WAITOK) == 0) { - /* - * We only run the drain hook here if PR_NOWAIT. - * In other cases the hook will be run in - * pool_reclaim. - */ - if (org->pr_drain_hook != NULL) { - (*org->pr_drain_hook)(org->pr_drain_hook_arg, - flags); - if ((res = (*pa->pa_alloc)(org, flags)) != NULL) - return (res); - } - break; - } - s = splvm(); - simple_lock(&pa->pa_slock); - freed = pool_allocator_drain(pa, org, 1); - simple_unlock(&pa->pa_slock); - splx(s); - } while (freed); - return (NULL); + return (pp->pr_alloc->pa_alloc(pp, flags)); } void @@ -2033,53 +1936,6 @@ pool_allocator_free(struct pool *pp, void *v) splx(s); } -/* - * Drain all pools, except 'org', that use this allocator. - * - * Must be called at appropriate spl level and with the allocator locked. - * - * We do this to reclaim va space. pa_alloc is responsible - * for waiting for physical memory. - * XXX - we risk looping forever if start if someone calls - * pool_destroy on 'start'. But there is no other way to - * have potentially sleeping pool_reclaim, non-sleeping - * locks on pool_allocator and some stirring of drained - * pools in the allocator. - * XXX - maybe we should use pool_head_slock for locking - * the allocators? - */ -int -pool_allocator_drain(struct pool_allocator *pa, struct pool *org, int need) -{ - struct pool *pp, *start; - int freed; - - freed = 0; - - pp = start = TAILQ_FIRST(&pa->pa_list); - do { - TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); - TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); - if (pp == org) - continue; - simple_unlock(&pa->pa_slock); - freed = pool_reclaim(pp); - simple_lock(&pa->pa_slock); - } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && (freed < need)); - - if (!freed) { - /* - * We set PA_WANT here, the caller will most likely - * sleep waiting for pages (if not, this won't hurt - * that much) and there is no way to set this in the - * caller without violating locking order. - */ - pa->pa_flags |= PA_WANT; - } - - return (freed); -} - void * pool_page_alloc(struct pool *pp, int flags) { diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 35a1f693213..237d143645d 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uipc_mbuf.c,v 1.74 2006/03/17 04:21:57 brad Exp $ */ +/* $OpenBSD: uipc_mbuf.c,v 1.75 2006/05/07 20:06:50 tedu Exp $ */ /* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */ /* @@ -115,9 +115,6 @@ mbinit(void) pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL); pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", NULL); - pool_set_drain_hook(&mbpool, m_reclaim, NULL); - pool_set_drain_hook(&mclpool, m_reclaim, NULL); - nmbclust_update(); /* |