summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/subr_pool.c154
-rw-r--r--sys/kern/uipc_mbuf.c5
-rw-r--r--sys/sys/pool.h11
-rw-r--r--sys/uvm/uvm_pdaemon.c5
4 files changed, 8 insertions, 167 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index a39f1a80e85..74ff2c3eb6b 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.45 2004/07/29 09:18:17 mickey Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.46 2006/05/07 20:06:50 tedu Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -81,10 +81,7 @@ static struct pool phpool;
/* # of seconds to retain page after last use */
int pool_inactive_time = 10;
-/* Next candidate for drainage (see pool_drain()) */
-static struct pool *drainpp;
-
-/* This spin lock protects both pool_head and drainpp. */
+/* This spin lock protects both pool_head */
struct simplelock pool_head_slock;
struct pool_item_header {
@@ -450,8 +447,6 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp->pr_hardlimit_ratecap.tv_usec = 0;
pp->pr_hardlimit_warning_last.tv_sec = 0;
pp->pr_hardlimit_warning_last.tv_usec = 0;
- pp->pr_drain_hook = NULL;
- pp->pr_drain_hook_arg = NULL;
pp->pr_serial = ++pool_serial;
if (pool_serial == 0)
panic("pool_init: too much uptime");
@@ -578,9 +573,6 @@ pool_destroy(struct pool *pp)
/* Remove from global pool list */
simple_lock(&pool_head_slock);
TAILQ_REMOVE(&pool_head, pp, pr_poollist);
- if (drainpp == pp) {
- drainpp = NULL;
- }
simple_unlock(&pool_head_slock);
#ifdef POOL_DIAGNOSTIC
@@ -589,18 +581,6 @@ pool_destroy(struct pool *pp)
#endif
}
-void
-pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
-{
- /* XXX no locking -- must be used just after pool_init() */
-#ifdef DIAGNOSTIC
- if (pp->pr_drain_hook != NULL)
- panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
-#endif
- pp->pr_drain_hook = fn;
- pp->pr_drain_hook_arg = arg;
-}
-
static struct pool_item_header *
pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
{
@@ -675,21 +655,6 @@ pool_get(struct pool *pp, int flags)
}
#endif
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
- if (pp->pr_drain_hook != NULL) {
- /*
- * Since the drain hook is going to free things
- * back to the pool, unlock, call hook, re-lock
- * and check hardlimit condition again.
- */
- pr_leave(pp);
- simple_unlock(&pp->pr_slock);
- (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
- simple_lock(&pp->pr_slock);
- pr_enter(pp, file, line);
- if (pp->pr_nout < pp->pr_hardlimit)
- goto startover;
- }
-
if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
/*
* XXX: A warning isn't logged in this case. Should
@@ -944,7 +909,7 @@ pool_do_put(struct pool *pp, void *v)
pp->pr_flags &= ~PR_WANTED;
if (ph->ph_nmissing == 0)
pp->pr_nidle++;
- wakeup((caddr_t)pp);
+ wakeup(pp);
return;
}
@@ -1279,13 +1244,6 @@ pool_reclaim(struct pool *pp)
struct timeval diff;
int s;
- if (pp->pr_drain_hook != NULL) {
- /*
- * The drain hook must be called with the pool unlocked.
- */
- (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
- }
-
if (simple_lock_try(&pp->pr_slock) == 0)
return (0);
pr_enter(pp, file, line);
@@ -1342,33 +1300,6 @@ pool_reclaim(struct pool *pp)
return (1);
}
-
-/*
- * Drain pools, one at a time.
- *
- * Note, we must never be called from an interrupt context.
- */
-void
-pool_drain(void *arg)
-{
- struct pool *pp;
- int s;
-
- pp = NULL;
- s = splvm();
- simple_lock(&pool_head_slock);
- if (drainpp == NULL) {
- drainpp = TAILQ_FIRST(&pool_head);
- }
- if (drainpp) {
- pp = drainpp;
- drainpp = TAILQ_NEXT(pp, pr_poollist);
- }
- simple_unlock(&pool_head_slock);
- pool_reclaim(pp);
- splx(s);
-}
-
#ifdef DDB
/*
* Diagnostic helpers.
@@ -1932,7 +1863,6 @@ sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep)
* Pool backend allocators.
*
* Each pool has a backend allocator that handles allocation, deallocation
- * and any additional draining that might be needed.
*/
void *pool_page_alloc_kmem(struct pool *, int);
void pool_page_free_kmem(struct pool *, void *);
@@ -1971,37 +1901,10 @@ struct pool_allocator pool_allocator_nointr = {
*/
void *
-pool_allocator_alloc(struct pool *org, int flags)
+pool_allocator_alloc(struct pool *pp, int flags)
{
- struct pool_allocator *pa = org->pr_alloc;
- int freed;
- void *res;
- int s;
- do {
- if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
- return (res);
- if ((flags & PR_WAITOK) == 0) {
- /*
- * We only run the drain hook here if PR_NOWAIT.
- * In other cases the hook will be run in
- * pool_reclaim.
- */
- if (org->pr_drain_hook != NULL) {
- (*org->pr_drain_hook)(org->pr_drain_hook_arg,
- flags);
- if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
- return (res);
- }
- break;
- }
- s = splvm();
- simple_lock(&pa->pa_slock);
- freed = pool_allocator_drain(pa, org, 1);
- simple_unlock(&pa->pa_slock);
- splx(s);
- } while (freed);
- return (NULL);
+ return (pp->pr_alloc->pa_alloc(pp, flags));
}
void
@@ -2033,53 +1936,6 @@ pool_allocator_free(struct pool *pp, void *v)
splx(s);
}
-/*
- * Drain all pools, except 'org', that use this allocator.
- *
- * Must be called at appropriate spl level and with the allocator locked.
- *
- * We do this to reclaim va space. pa_alloc is responsible
- * for waiting for physical memory.
- * XXX - we risk looping forever if start if someone calls
- * pool_destroy on 'start'. But there is no other way to
- * have potentially sleeping pool_reclaim, non-sleeping
- * locks on pool_allocator and some stirring of drained
- * pools in the allocator.
- * XXX - maybe we should use pool_head_slock for locking
- * the allocators?
- */
-int
-pool_allocator_drain(struct pool_allocator *pa, struct pool *org, int need)
-{
- struct pool *pp, *start;
- int freed;
-
- freed = 0;
-
- pp = start = TAILQ_FIRST(&pa->pa_list);
- do {
- TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
- TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
- if (pp == org)
- continue;
- simple_unlock(&pa->pa_slock);
- freed = pool_reclaim(pp);
- simple_lock(&pa->pa_slock);
- } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && (freed < need));
-
- if (!freed) {
- /*
- * We set PA_WANT here, the caller will most likely
- * sleep waiting for pages (if not, this won't hurt
- * that much) and there is no way to set this in the
- * caller without violating locking order.
- */
- pa->pa_flags |= PA_WANT;
- }
-
- return (freed);
-}
-
void *
pool_page_alloc(struct pool *pp, int flags)
{
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 35a1f693213..237d143645d 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uipc_mbuf.c,v 1.74 2006/03/17 04:21:57 brad Exp $ */
+/* $OpenBSD: uipc_mbuf.c,v 1.75 2006/05/07 20:06:50 tedu Exp $ */
/* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
/*
@@ -115,9 +115,6 @@ mbinit(void)
pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL);
pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", NULL);
- pool_set_drain_hook(&mbpool, m_reclaim, NULL);
- pool_set_drain_hook(&mclpool, m_reclaim, NULL);
-
nmbclust_update();
/*
diff --git a/sys/sys/pool.h b/sys/sys/pool.h
index c558c928c86..ac8d7def45b 100644
--- a/sys/sys/pool.h
+++ b/sys/sys/pool.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pool.h,v 1.18 2004/07/29 09:18:17 mickey Exp $ */
+/* $OpenBSD: pool.h,v 1.19 2006/05/07 20:06:50 tedu Exp $ */
/* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */
/*-
@@ -183,8 +183,6 @@ struct pool {
const char *pr_entered_file; /* reentrancy check */
long pr_entered_line;
- void (*pr_drain_hook)(void *, int);
- void *pr_drain_hook_arg;
};
#ifdef _KERNEL
@@ -195,16 +193,10 @@ extern struct pool_allocator pool_allocator_nointr;
/* previous interrupt safe allocator, allocates from kmem */
extern struct pool_allocator pool_allocator_kmem;
-int pool_allocator_drain(struct pool_allocator *, struct pool *,
- int);
-
void pool_init(struct pool *, size_t, u_int, u_int, int,
const char *, struct pool_allocator *);
void pool_destroy(struct pool *);
-void pool_set_drain_hook(struct pool *, void (*)(void *, int),
- void *);
-
void *pool_get(struct pool *, int);
void pool_put(struct pool *, void *);
int pool_reclaim(struct pool *);
@@ -225,7 +217,6 @@ int pool_prime(struct pool *, int);
void pool_setlowat(struct pool *, int);
void pool_sethiwat(struct pool *, int);
int pool_sethardlimit(struct pool *, unsigned, const char *, int);
-void pool_drain(void *);
#ifdef DDB
/*
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index e011f925412..95b97114504 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.25 2002/05/24 13:10:53 art Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.26 2006/05/07 20:06:50 tedu Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -222,9 +222,6 @@ uvm_pageout(void *arg)
uvmexp.pdwoke++;
UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
- /* drain pool resources */
- pool_drain(0);
-
/*
* now lock page queues and recompute inactive count
*/