diff options
author | Niels Provos <provos@cvs.openbsd.org> | 2001-05-17 18:41:51 +0000 |
---|---|---|
committer | Niels Provos <provos@cvs.openbsd.org> | 2001-05-17 18:41:51 +0000 |
commit | bff2b5c93dc47bb0d17d1480618ead119fc3a4c0 (patch) | |
tree | a86b2f3d063adb61db0fb67479d25ee1e6437a6a /sys/kern | |
parent | af8da448cf644f7e1e008d634771b23915ab7402 (diff) |
convert mbuf and cluster allocation to pool, mostly from NetBSD
okay art@ miod@
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/init_main.c | 12 | ||||
-rw-r--r-- | sys/kern/uipc_mbuf.c | 282 | ||||
-rw-r--r-- | sys/kern/uipc_mbuf2.c | 4 |
3 files changed, 136 insertions, 162 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index bde4bd42c10..6a0e8ea168f 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -1,4 +1,4 @@ -/* $OpenBSD: init_main.c,v 1.64 2001/05/16 12:49:45 art Exp $ */ +/* $OpenBSD: init_main.c,v 1.65 2001/05/17 18:41:44 provos Exp $ */ /* $NetBSD: init_main.c,v 1.84.4.1 1996/06/02 09:08:06 mrg Exp $ */ /* @@ -222,6 +222,13 @@ main(framep) disk_init(); /* must come before autoconfiguration */ tty_init(); /* initialise tty's */ cpu_startup(); + + /* + * Initialize mbuf's. Do this now because we might attempt to + * allocate mbufs or mbuf clusters during autoconfiguration. + */ + mbinit(); + cpu_configure(); /* Initialize sysctls (must be done before any processes run) */ @@ -339,9 +346,6 @@ main(framep) /* Start real time and statistics clocks. */ initclocks(); - /* Initialize mbuf's. */ - mbinit(); - #ifdef REAL_CLISTS /* Initialize clists. */ clist_init(); diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 6ca1e718531..25e7854c879 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uipc_mbuf.c,v 1.28 2001/05/16 08:59:04 art Exp $ */ +/* $OpenBSD: uipc_mbuf.c,v 1.29 2001/05/17 18:41:44 provos Exp $ */ /* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */ /* @@ -59,6 +59,7 @@ didn't get a copy, you may request one from <license@ipv6.nrl.navy.mil>. #include <sys/syslog.h> #include <sys/domain.h> #include <sys/protosw.h> +#include <sys/pool.h> #include <machine/cpu.h> @@ -68,71 +69,75 @@ didn't get a copy, you may request one from <license@ipv6.nrl.navy.mil>. #include <uvm/uvm_extern.h> #endif +struct pool mbpool; /* mbuf pool */ +struct pool mclpool; /* mbuf cluster pool */ + extern vm_map_t mb_map; struct mbuf *mbutl; -char *mclrefcnt; int needqueuedrain; +void *mclpool_alloc __P((unsigned long, int, int)); +void mclpool_release __P((void *, unsigned long, int)); +struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int)); + +const char *mclpool_warnmsg = + "WARNING: mclpool limit reached; increase NMBCLUSTERS"; + +/* + * Initialize the mbuf allcator. + */ void mbinit() { - int s; + pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", 0, NULL, NULL, 0); + pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", 0, mclpool_alloc, + mclpool_release, 0); - s = splimp(); - if (m_clalloc(max(4096 / PAGE_SIZE, 1), M_DONTWAIT) == 0) - goto bad; - splx(s); - return; -bad: - splx(s); - panic("mbinit"); + /* + * Set the hard limit on the mclpool to the number of + * mbuf clusters the kernel is to support. Log the limit + * reached message max once a minute. + */ + pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60); + + /* + * Set a low water mark for both mbufs and clusters. This should + * help ensure that they can be allocated in a memory starvation + * situation. This is important for e.g. diskless systems which + * must allocate mbufs in order for the pagedaemon to clean pages. + */ + pool_setlowat(&mbpool, mblowat); + pool_setlowat(&mclpool, mcllowat); } -/* - * Allocate some number of mbuf clusters - * and place on cluster free list. - * Must be called at splimp. - */ -/* ARGSUSED */ -int -m_clalloc(ncl, nowait) - register int ncl; - int nowait; + +void * +mclpool_alloc(sz, flags, mtype) + unsigned long sz; + int flags; + int mtype; { - volatile static struct timeval lastlogged; - struct timeval curtime, logdiff; - register caddr_t p; - register int i; - int npg, s; +#if defined(UVM) + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - npg = ncl; + return ((void *)uvm_km_alloc_poolpage1(mb_map, uvmexp.mb_object, + waitok)); +#else + return pool_page_alloc(sz, flags, mtype); +#endif +} + +void +mclpool_release(v, sz, mtype) + void *v; + unsigned long sz; + int mtype; +{ #if defined(UVM) - p = (caddr_t)uvm_km_kmemalloc(mb_map, uvmexp.mb_object, ctob(npg), - nowait ? 0 : UVM_KMF_NOWAIT); + uvm_km_free_poolpage1(mb_map, (vaddr_t)v); #else - p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); + pool_page_free(v, sz, mtype); #endif - if (p == NULL) { - s = splclock(); - curtime = time; - splx(s); - timersub(&curtime, &lastlogged, &logdiff); - if (logdiff.tv_sec >= 60) { - lastlogged = curtime; - log(LOG_ERR, "mb_map full\n"); - } - m_reclaim(); - return (mclfree != NULL); - } - ncl = ncl * PAGE_SIZE / MCLBYTES; - for (i = 0; i < ncl; i++) { - ((union mcluster *)p)->mcl_next = mclfree; - mclfree = (union mcluster *)p; - p += MCLBYTES; - mbstat.m_clfree++; - } - mbstat.m_clusters += ncl; - return (1); } /* @@ -154,6 +159,10 @@ m_retry(i, t) #define m_retry(i, t) NULL MGET(m, i, t); #undef m_retry + if (m != NULL) + mbstat.m_wait++; + else + mbstat.m_drops++; return (m); } @@ -175,6 +184,10 @@ m_retryhdr(i, t) #define m_retryhdr(i, t) NULL MGETHDR(m, i, t); #undef m_retryhdr + if (m != NULL) + mbstat.m_wait++; + else + mbstat.m_drops++; return (m); } @@ -228,7 +241,7 @@ m_getclr(nowait, type) MGET(m, nowait, type); if (m == NULL) return (NULL); - bzero(mtod(m, caddr_t), MLEN); + memset(mtod(m, caddr_t), 0, MLEN); return (m); } @@ -298,74 +311,11 @@ int MCFail; struct mbuf * m_copym(m, off0, len, wait) - register struct mbuf *m; + struct mbuf *m; int off0, wait; - register int len; + int len; { - register struct mbuf *n, **np; - register int off = off0; - struct mbuf *top; - int copyhdr = 0; - - if (off < 0) - panic("m_copym: off %d < 0", off); - if (len < 0) - panic("m_copym: len %d < 0", len); - if (off == 0 && m->m_flags & M_PKTHDR) - copyhdr = 1; - while (off > 0) { - if (m == NULL) - panic("m_copym: null mbuf"); - if (off < m->m_len) - break; - off -= m->m_len; - m = m->m_next; - } - np = ⊤ - top = NULL; - while (len > 0) { - if (m == NULL) { - if (len != M_COPYALL) - panic("m_copym: %d not M_COPYALL", len); - break; - } - MGET(n, wait, m->m_type); - *np = n; - if (n == NULL) - goto nospace; - if (copyhdr) { - M_DUP_PKTHDR(n, m); - if (len == M_COPYALL) - n->m_pkthdr.len -= off0; - else - n->m_pkthdr.len = len; - copyhdr = 0; - } - n->m_len = min(len, m->m_len - off); - if (m->m_flags & M_EXT) { - n->m_data = m->m_data + off; - if (!m->m_ext.ext_ref) - mclrefcnt[mtocl(m->m_ext.ext_buf)]++; - else - (*(m->m_ext.ext_ref))(m); - n->m_ext = m->m_ext; - n->m_flags |= M_EXT; - } else - bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), - (unsigned)n->m_len); - if (len != M_COPYALL) - len -= n->m_len; - off = 0; - m = m->m_next; - np = &n->m_next; - } - if (top == NULL) - MCFail++; - return (top); -nospace: - m_freem(top); - MCFail++; - return (NULL); + return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */ } /* @@ -374,40 +324,48 @@ nospace: */ struct mbuf * m_copym2(m, off0, len, wait) - register struct mbuf *m; + struct mbuf *m; int off0, wait; - register int len; + int len; { - register struct mbuf *n, **np; - register int off = off0; + return m_copym0(m, off0, len, wait, 1); /* deep copy */ +} + +struct mbuf * +m_copym0(m, off0, len, wait, deep) + struct mbuf *m; + int off0, wait; + int len; + int deep; /* deep copy */ +{ + struct mbuf *n, **np; + int off = off0; struct mbuf *top; int copyhdr = 0; - if (len < 0) - panic("m_copym2: len %d < 0", len); - if (off < 0) - panic("m_copym2: off %d < 0", off); + if (off < 0 || len < 0) + panic("m_copym0: off %d, len %d", off, len); if (off == 0 && m->m_flags & M_PKTHDR) copyhdr = 1; while (off > 0) { - if (m == NULL) - panic("m_copym2: null mbuf"); + if (m == 0) + panic("m_copym0: null mbuf"); if (off < m->m_len) break; off -= m->m_len; m = m->m_next; } np = ⊤ - top = NULL; + top = 0; while (len > 0) { - if (m == NULL) { + if (m == 0) { if (len != M_COPYALL) - panic("m_copym2: %d != M_COPYALL", len); + panic("m_copym0: m == 0 and not COPYALL"); break; } MGET(n, wait, m->m_type); *np = n; - if (n == NULL) + if (n == 0) goto nospace; if (copyhdr) { M_DUP_PKTHDR(n, m); @@ -418,30 +376,47 @@ m_copym2(m, off0, len, wait) copyhdr = 0; } n->m_len = min(len, m->m_len - off); - if ((m->m_flags & M_EXT) && (n->m_len > MHLEN)) { - /* This is a cheesy hack. */ - MCLGET(n, wait); - if (n->m_flags & M_EXT) - bcopy(mtod(m, caddr_t) + off, mtod(n, caddr_t), + if (m->m_flags & M_EXT) { + if (!deep) { + n->m_data = m->m_data + off; + n->m_ext = m->m_ext; + MCLADDREFERENCE(m, n); + } else { + /* + * we are unsure about the way m was allocated. + * copy into multiple MCLBYTES cluster mbufs. + */ + MCLGET(n, wait); + n->m_len = 0; + n->m_len = M_TRAILINGSPACE(n); + n->m_len = min(n->m_len, len); + n->m_len = min(n->m_len, m->m_len - off); + memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (unsigned)n->m_len); - else - goto nospace; + } } else - bcopy(mtod(m, caddr_t) + off, mtod(n, caddr_t), + memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off, (unsigned)n->m_len); if (len != M_COPYALL) len -= n->m_len; - off = 0; - m = m->m_next; + off += n->m_len; +#ifdef DIAGNOSTIC + if (off > m->m_len) + panic("m_copym0 overrun"); +#endif + if (off == m->m_len) { + m = m->m_next; + off = 0; + } np = &n->m_next; } - if (top == NULL) + if (top == 0) MCFail++; return (top); nospace: m_freem(top); MCFail++; - return (NULL); + return (0); } /* @@ -875,12 +850,7 @@ m_split(m0, len0, wait) extpacket: if (m->m_flags & M_EXT) { n->m_flags |= M_EXT; - n->m_ext = m->m_ext; - if(!m->m_ext.ext_ref) - mclrefcnt[mtocl(m->m_ext.ext_buf)]++; - else - (*(m->m_ext.ext_ref))(m); - m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ + MCLADDREFERENCE(m, n); n->m_data = m->m_data + len; } else { bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); @@ -973,14 +943,14 @@ m_zero(m) { while (m) { if (m->m_flags & M_PKTHDR) - bzero((void *)m + sizeof(struct m_hdr) + - sizeof(struct pkthdr), MHLEN); + memset((void *)m + sizeof(struct m_hdr) + + sizeof(struct pkthdr), 0, MHLEN); else - bzero((void *)m + sizeof(struct m_hdr), MLEN); + memset((void *)m + sizeof(struct m_hdr), 0, MLEN); if ((m->m_flags & M_EXT) && (m->m_ext.ext_free == NULL) && - !mclrefcnt[mtocl((m)->m_ext.ext_buf)]) - bzero(m->m_ext.ext_buf, m->m_ext.ext_size); + !MCLISREFERENCED(m)) + memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); m = m->m_next; } } @@ -1056,4 +1026,4 @@ _sk_mclget(struct mbuf *m, int how) { _MCLGET(m, how); } -#endif /* SMALL_KERNEL */
\ No newline at end of file +#endif /* SMALL_KERNEL */ diff --git a/sys/kern/uipc_mbuf2.c b/sys/kern/uipc_mbuf2.c index d1b437b5944..3b335c320ef 100644 --- a/sys/kern/uipc_mbuf2.c +++ b/sys/kern/uipc_mbuf2.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uipc_mbuf2.c,v 1.6 2001/03/28 20:03:00 angelos Exp $ */ +/* $OpenBSD: uipc_mbuf2.c,v 1.7 2001/05/17 18:41:45 provos Exp $ */ /* $KAME: uipc_mbuf2.c,v 1.29 2001/02/14 13:42:10 itojun Exp $ */ /* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */ @@ -74,7 +74,7 @@ #define M_SHAREDCLUSTER(m) \ (((m)->m_flags & M_EXT) != 0 && \ - ((m)->m_ext.ext_free || mclrefcnt[mtocl((m)->m_ext.ext_buf)] > 1)) + ((m)->m_ext.ext_free || MCLISREFERENCED((m)))) /* can't call it m_dup(), as freebsd[34] uses m_dup() with different arg */ static struct mbuf *m_dup1 __P((struct mbuf *, int, int, int)); |