summaryrefslogtreecommitdiff
path: root/sys/kern/uipc_mbuf.c
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2004-05-27 04:55:29 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2004-05-27 04:55:29 +0000
commit6b7157229e2c9d0e8a932b56aae6383da00f02c9 (patch)
tree39d1dc9d055be3b70591e08db11940e74b8e122d /sys/kern/uipc_mbuf.c
parent94536f73ec48910533ea84f9bbbc1922764cb755 (diff)
change uvm_km_getpage to take waitok argument and sleep if appropriate.
change both the nointr and default pool allocators to using uvm_km_getpage. change pools to default to a maxpages value of 8, so they hoard less memory. change mbuf pools to use default pool allocator. pools are now more efficient, use less of kmem_map, and a bit faster. tested mcbride, deraadt, pedro, drahn, miod to work everywhere
Diffstat (limited to 'sys/kern/uipc_mbuf.c')
-rw-r--r--sys/kern/uipc_mbuf.c26
1 files changed, 3 insertions, 23 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 732676c49ec..425587781b9 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uipc_mbuf.c,v 1.69 2004/05/23 19:41:23 tedu Exp $ */
+/* $OpenBSD: uipc_mbuf.c,v 1.70 2004/05/27 04:55:28 tedu Exp $ */
/* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
/*
@@ -99,8 +99,6 @@ int max_protohdr; /* largest protocol header */
int max_hdr; /* largest link+protocol header */
int max_datalen; /* MHLEN - max_hdr */
-void *mclpool_alloc(struct pool *, int);
-void mclpool_release(struct pool *, void *);
struct mbuf *m_copym0(struct mbuf *, int, int, int, int);
void nmbclust_update(void);
@@ -108,18 +106,14 @@ void nmbclust_update(void);
const char *mclpool_warnmsg =
"WARNING: mclpool limit reached; increase kern.maxclusters";
-struct pool_allocator mclpool_allocator = {
- mclpool_alloc, mclpool_release, 0,
-};
-
/*
* Initialize the mbuf allcator.
*/
void
mbinit()
{
- pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", &mclpool_allocator);
- pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", &mclpool_allocator);
+ pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL);
+ pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", NULL);
pool_set_drain_hook(&mbpool, m_reclaim, NULL);
pool_set_drain_hook(&mclpool, m_reclaim, NULL);
@@ -147,20 +141,6 @@ nmbclust_update(void)
(void)pool_sethardlimit(&mclpool, nmbclust, mclpool_warnmsg, 60);
}
-
-
-void *
-mclpool_alloc(struct pool *pp, int flags)
-{
- return uvm_km_getpage();
-}
-
-void
-mclpool_release(struct pool *pp, void *v)
-{
- uvm_km_putpage(v);
-}
-
void
m_reclaim(void *arg, int flags)
{