diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2002-01-23 00:39:49 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2002-01-23 00:39:49 +0000 |
commit | 428a9d0c41ac6e6a1e3b34fe87ab6ef38f3764cb (patch) | |
tree | bca3c796baa50ba1a667d9fc1450766d842763b4 /sys/kern | |
parent | ad9498378fb50081ca58cbd745f9705b789f2da8 (diff) |
Pool deals fairly well with physical memory shortage, but it doesn't deal
well (not at all) with shortages of the vm_map where the pages are mapped
(usually kmem_map).
Try to deal with it:
- group all information the backend allocator for a pool in a separate
struct. The pool will only have a pointer to that struct.
- change the pool_init API to reflect that.
- link all pools allocating from the same allocator on a linked list.
- Since an allocator is responsible to wait for physical memory it will
only fail (waitok) when it runs out of its backing vm_map, carefully
drain pools using the same allocator so that va space is freed.
(see comments in code for caveats and details).
- change pool_reclaim to return if it actually succeeded to free some
memory, use that information to make draining easier and more efficient.
- get rid of PR_URGENT, noone uses it.
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_descrip.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_event.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_malloc_debug.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_extent.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_pool.c | 308 | ||||
-rw-r--r-- | sys/kern/sys_pipe.c | 5 | ||||
-rw-r--r-- | sys/kern/uipc_mbuf.c | 25 | ||||
-rw-r--r-- | sys/kern/uipc_socket.c | 5 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 5 | ||||
-rw-r--r-- | sys/kern/vfs_cache.c | 4 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 4 |
13 files changed, 240 insertions, 142 deletions
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c index ad9d97d6cb1..625f5d62d99 100644 --- a/sys/kern/kern_descrip.c +++ b/sys/kern/kern_descrip.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_descrip.c,v 1.43 2001/11/15 13:07:53 niklas Exp $ */ +/* $OpenBSD: kern_descrip.c,v 1.44 2002/01/23 00:39:47 art Exp $ */ /* $NetBSD: kern_descrip.c,v 1.42 1996/03/30 22:24:38 christos Exp $ */ /* @@ -87,9 +87,9 @@ void filedesc_init() { pool_init(&file_pool, sizeof(struct file), 0, 0, 0, "filepl", - 0, pool_page_alloc_nointr, pool_page_free_nointr, M_PROC); + &pool_allocator_nointr); pool_init(&fdesc_pool, sizeof(struct filedesc0), 0, 0, 0, "fdescpl", - 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FILEDESC); + &pool_allocator_nointr); LIST_INIT(&filehead); } diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index b5f15fcb6cb..2ea236be7aa 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_event.c,v 1.10 2001/10/26 12:03:27 art Exp $ */ +/* $OpenBSD: kern_event.c,v 1.11 2002/01/23 00:39:47 art Exp $ */ /*- * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> @@ -890,7 +890,7 @@ void knote_init(void) { pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl", - 0, pool_page_alloc_nointr, pool_page_free_nointr, M_KNOTE); + &pool_allocator_nointr); } struct knote * diff --git a/sys/kern/kern_malloc_debug.c b/sys/kern/kern_malloc_debug.c index 56a9319067d..582467d5de0 100644 --- a/sys/kern/kern_malloc_debug.c +++ b/sys/kern/kern_malloc_debug.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_malloc_debug.c,v 1.15 2001/12/08 02:24:07 art Exp $ */ +/* $OpenBSD: kern_malloc_debug.c,v 1.16 2002/01/23 00:39:47 art Exp $ */ /* * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org> @@ -216,7 +216,7 @@ debug_malloc_init(void) debug_malloc_chunks_on_freelist = 0; pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry), - 0, 0, 0, "mdbepl", 0, NULL, NULL, 0); + 0, 0, 0, "mdbepl", NULL); } /* diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index e8fdc6b4084..3725838dfa0 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_proc.c,v 1.9 2002/01/16 20:50:17 miod Exp $ */ +/* $OpenBSD: kern_proc.c,v 1.10 2002/01/23 00:39:47 art Exp $ */ /* $NetBSD: kern_proc.c,v 1.14 1996/02/09 18:59:41 christos Exp $ */ /* @@ -110,7 +110,7 @@ procinit() uihashtbl = hashinit(maxproc / 16, M_PROC, M_WAITOK, &uihash); pool_init(&proc_pool, sizeof(struct proc), 0, 0, 0, "procpl", - 0, pool_page_alloc_nointr, pool_page_free_nointr, M_PROC); + &pool_allocator_nointr); } /* diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index c0fd97f1021..6d1c0be45ef 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sig.c,v 1.52 2002/01/19 19:00:14 millert Exp $ */ +/* $OpenBSD: kern_sig.c,v 1.53 2002/01/23 00:39:47 art Exp $ */ /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ /* @@ -152,7 +152,7 @@ void signal_init() { pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl", - 0, pool_page_alloc_nointr, pool_page_free_nointr, M_SUBPROC); + &pool_allocator_nointr); } /* diff --git a/sys/kern/subr_extent.c b/sys/kern/subr_extent.c index 1b879c6755c..be00d755fc2 100644 --- a/sys/kern/subr_extent.c +++ b/sys/kern/subr_extent.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_extent.c,v 1.18 2001/08/06 11:19:26 art Exp $ */ +/* $OpenBSD: subr_extent.c,v 1.19 2002/01/23 00:39:47 art Exp $ */ /* $NetBSD: subr_extent.c,v 1.7 1996/11/21 18:46:34 cgd Exp $ */ /*- @@ -114,7 +114,7 @@ extent_pool_init(void) if (!inited) { pool_init(&ex_region_pl, sizeof(struct extent_region), 0, 0, 0, - "extentpl", 0, 0, 0, 0); + "extentpl", NULL); inited = 1; } } diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index 18d1ca0a1a4..f8a42136896 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.19 2002/01/10 18:56:03 art Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.20 2002/01/23 00:39:47 art Exp $ */ /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ /*- @@ -109,7 +109,7 @@ struct pool_item { }; #define PR_HASH_INDEX(pp,addr) \ - (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) + (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & (PR_HASHTABSIZE - 1)) #define POOL_NEEDS_CATCHUP(pp) \ ((pp)->pr_nitems < (pp)->pr_minitems) @@ -164,8 +164,9 @@ static void pool_cache_reclaim(struct pool_cache *); static int pool_catchup(struct pool *); static void pool_prime_page(struct pool *, caddr_t, struct pool_item_header *); -static void *pool_page_alloc(unsigned long, int, int); -static void pool_page_free(void *, unsigned long, int); + +void *pool_allocator_alloc(struct pool *, int); +void pool_allocator_free(struct pool *, void *); static void pool_print1(struct pool *, const char *, int (*)(const char *, ...)); @@ -339,7 +340,7 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph, if (pq) { TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); } else { - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, ph->ph_page); if ((pp->pr_roflags & PR_PHINPAGE) == 0) { LIST_REMOVE(ph, ph_hashlist); s = splhigh(); @@ -372,10 +373,7 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph, */ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, - const char *wchan, size_t pagesz, - void *(*alloc)(unsigned long, int, int), - void (*release)(void *, unsigned long, int), - int mtype) + const char *wchan, struct pool_allocator *palloc) { int off, slack, i; @@ -390,20 +388,19 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, /* * Check arguments and construct default values. */ - if (!powerof2(pagesz)) - panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); - - if (alloc == NULL && release == NULL) { - alloc = pool_page_alloc; - release = pool_page_free; - pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ - } else if ((alloc != NULL && release != NULL) == 0) { - /* If you specifiy one, must specify both. */ - panic("pool_init: must specify alloc and release together"); + if (palloc == NULL) + palloc = &pool_allocator_kmem; + if ((palloc->pa_flags & PA_INITIALIZED) == 0) { + if (palloc->pa_pagesz == 0) + palloc->pa_pagesz = PAGE_SIZE; + + TAILQ_INIT(&palloc->pa_list); + + simple_lock_init(&palloc->pa_slock); + palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); + palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; + palloc->pa_flags |= PA_INITIALIZED; } - - if (pagesz == 0) - pagesz = PAGE_SIZE; if (align == 0) align = ALIGN(1); @@ -412,9 +409,11 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, size = sizeof(struct pool_item); size = ALIGN(size); - if (size > pagesz) +#ifdef DIAGNOSTIC + if (size > palloc->pa_pagesz) panic("pool_init: pool item size (%lu) too large", (u_long)size); +#endif /* * Initialize the pool structure. @@ -431,12 +430,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, pp->pr_size = size; pp->pr_align = align; pp->pr_wchan = wchan; - pp->pr_mtype = mtype; - pp->pr_alloc = alloc; - pp->pr_free = release; - pp->pr_pagesz = pagesz; - pp->pr_pagemask = ~(pagesz - 1); - pp->pr_pageshift = ffs(pagesz) - 1; + pp->pr_alloc = palloc; pp->pr_nitems = 0; pp->pr_nout = 0; pp->pr_hardlimit = UINT_MAX; @@ -456,15 +450,15 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, * with its header based on the page address. * We use 1/16 of the page size as the threshold (XXX: tune) */ - if (pp->pr_size < pagesz/16) { + if (pp->pr_size < palloc->pa_pagesz/16) { /* Use the end of the page for the page header */ pp->pr_roflags |= PR_PHINPAGE; pp->pr_phoffset = off = - pagesz - ALIGN(sizeof(struct pool_item_header)); + palloc->pa_pagesz - ALIGN(sizeof(struct pool_item_header)); } else { /* The page header will be taken from our page header pool */ pp->pr_phoffset = 0; - off = pagesz; + off = palloc->pa_pagesz; for (i = 0; i < PR_HASHTABSIZE; i++) { LIST_INIT(&pp->pr_hashtab[i]); } @@ -520,15 +514,20 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, */ if (phpool.pr_size == 0) { pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, - 0, "phpool", 0, 0, 0, 0); + 0, "phpool", NULL); pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", 0, 0, 0, 0); + 0, "pcgpool", NULL); } /* Insert into the list of all pools. */ simple_lock(&pool_head_slock); TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); simple_unlock(&pool_head_slock); + + /* Insert into the list of pools using this allocator. */ + simple_lock(&palloc->pa_slock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + simple_unlock(&palloc->pa_slock); } /* @@ -540,6 +539,13 @@ pool_destroy(struct pool *pp) struct pool_item_header *ph; struct pool_cache *pc; + /* + * Locking order: pool_allocator -> pool + */ + simple_lock(&pp->pr_alloc->pa_slock); + TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); + simple_unlock(&pp->pr_alloc->pa_slock); + /* Destroy all caches for this pool. */ while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) pool_cache_destroy(pc); @@ -661,9 +667,6 @@ pool_get(struct pool *pp, int flags) &pp->pr_hardlimit_ratecap)) log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); - if (flags & PR_URGENT) - panic("pool_get: urgent"); - pp->pr_nfail++; pr_leave(pp); @@ -694,7 +697,7 @@ pool_get(struct pool *pp, int flags) */ pr_leave(pp); simple_unlock(&pp->pr_slock); - v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); + v = pool_allocator_alloc(pp, flags); if (__predict_true(v != NULL)) ph = pool_alloc_item_header(pp, v, flags); simple_lock(&pp->pr_slock); @@ -702,7 +705,7 @@ pool_get(struct pool *pp, int flags) if (__predict_false(v == NULL || ph == NULL)) { if (v != NULL) - (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, v); /* * We were unable to allocate a page or item @@ -713,9 +716,6 @@ pool_get(struct pool *pp, int flags) if (pp->pr_curpage != NULL) goto startover; - if (flags & PR_URGENT) - panic("pool_get: urgent"); - if ((flags & PR_WAITOK) == 0) { pp->pr_nfail++; pr_leave(pp); @@ -726,15 +726,11 @@ pool_get(struct pool *pp, int flags) /* * Wait for items to be returned to this pool. * - * XXX: we actually want to wait just until - * the page allocator has memory again. Depending - * on this pool's usage, we might get stuck here - * for a long time. - * * XXX: maybe we should wake up once a second and * try again? */ pp->pr_flags |= PR_WANTED; + /* PA_WANTED is already set on the allocator */ pr_leave(pp); ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); pr_enter(pp, file, line); @@ -852,7 +848,7 @@ pool_do_put(struct pool *pp, void *v) LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); - page = (caddr_t)((u_long)v & pp->pr_pagemask); + page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask); #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout == 0)) { @@ -1020,7 +1016,7 @@ pool_prime(struct pool *pp, int n) while (newpages-- > 0) { simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); simple_lock(&pp->pr_slock); @@ -1028,7 +1024,7 @@ pool_prime(struct pool *pp, int n) if (__predict_false(cp == NULL || ph == NULL)) { error = ENOMEM; if (cp != NULL) - (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, cp); break; } @@ -1058,8 +1054,10 @@ pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) unsigned int ioff = pp->pr_itemoffset; int n; - if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) +#ifdef DIAGNOSTIC + if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); +#endif if ((pp->pr_roflags & PR_PHINPAGE) == 0) LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], @@ -1154,13 +1152,13 @@ pool_catchup(struct pool *pp) * the pool descriptor? */ simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { if (cp != NULL) - (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, cp); error = ENOMEM; break; } @@ -1232,48 +1230,11 @@ pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) } /* - * Default page allocator. - */ -static void * -pool_page_alloc(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage(waitok)); -} - -static void -pool_page_free(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage((vaddr_t)v); -} - -/* - * Alternate pool page allocator for pools that know they will - * never be accessed in interrupt context. - */ -void * -pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, - waitok)); -} - -void -pool_page_free_nointr(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); -} - - -/* * Release all complete pages that have not been used recently. + * + * Returns non-zero if any pages have been reclaimed. */ -void +int #ifdef POOL_DIAGNOSTIC _pool_reclaim(struct pool *pp, const char *file, long line) #else @@ -1287,10 +1248,10 @@ pool_reclaim(struct pool *pp) int s; if (pp->pr_roflags & PR_STATIC) - return; + return 0; if (simple_lock_try(&pp->pr_slock) == 0) - return; + return 0; pr_enter(pp, file, line); TAILQ_INIT(&pq); @@ -1332,11 +1293,11 @@ pool_reclaim(struct pool *pp) pr_leave(pp); simple_unlock(&pp->pr_slock); if (TAILQ_EMPTY(&pq)) { - return; + return 0; } while ((ph = TAILQ_FIRST(&pq)) != NULL) { TAILQ_REMOVE(&pq, ph, ph_pagelist); - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, ph->ph_page); if (pp->pr_roflags & PR_PHINPAGE) { continue; } @@ -1345,6 +1306,8 @@ pool_reclaim(struct pool *pp) pool_put(&phpool, ph); splx(s); } + + return 1; } @@ -1374,7 +1337,6 @@ pool_drain(void *arg) splx(s); } - /* * Diagnostic helpers. */ @@ -1420,8 +1382,7 @@ pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...)) (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); - (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); - (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); + (*pr)("\talloc %p\n", pp->pr_alloc); (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", @@ -1502,7 +1463,7 @@ pool_chk(struct pool *pp, const char *label) int n; caddr_t page; - page = (caddr_t)((u_long)ph & pp->pr_pagemask); + page = (caddr_t)((vaddr_t)ph & pp->pr_alloc->pa_pagemask); if (page != ph->ph_page && (pp->pr_roflags & PR_PHINPAGE) != 0) { if (label != NULL) @@ -1531,7 +1492,7 @@ pool_chk(struct pool *pp, const char *label) panic("pool"); } #endif - page = (caddr_t)((u_long)pi & pp->pr_pagemask); + page = (caddr_t)((vaddr_t)pi & pp->pr_alloc->pa_pagemask); if (page == ph->ph_page) continue; @@ -1899,3 +1860,146 @@ sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep) /* NOTREACHED */ return (0); /* XXX - Stupid gcc */ } + +/* + * Pool backend allocators. + * + * Each pool has a backend allocator that handles allocation, deallocation + * and any additional draining that might be needed. + * + * We provide two standard allocators. + * pool_alloc_kmem - the default used when no allocator is specified. + * pool_alloc_nointr - used for pools that will not be accessed in + * interrupt context. + */ +void *pool_page_alloc(struct pool *, int); +void pool_page_free(struct pool *, void *); +void *pool_page_alloc_nointr(struct pool *, int); +void pool_page_free_nointr(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem = { + pool_page_alloc, pool_page_free, 0, +}; +struct pool_allocator pool_allocator_nointr = { + pool_page_alloc_nointr, pool_page_free_nointr, 0, +}; + +/* + * XXX - we have at least three different resources for the same allocation + * and each resource can be depleted. First we have the ready elements in + * the pool. Then we have the resource (typically a vm_map) for this + * allocator, then we have physical memory. Waiting for any of these can + * be unnecessary when any other is freed, but the kernel doesn't support + * sleeping on multiple addresses, so we have to fake. The caller sleeps on + * the pool (so that we can be awakened when an item is returned to the pool), + * but we set PA_WANT on the allocator. When a page is returned to + * the allocator and PA_WANT is set pool_allocator_free will wakeup all + * sleeping pools belonging to this allocator. (XXX - thundering herd). + */ + +void * +pool_allocator_alloc(struct pool *org, int flags) +{ + struct pool_allocator *pa = org->pr_alloc; + struct pool *pp, *start; + int s, freed; + void *res; + + do { + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + if ((flags & PR_WAITOK) == 0) + break; + + /* + * Drain all pools, except 'org', that use this allocator. + * We do this to reclaim va space. pa_alloc is responsible + * for waiting for physical memory. + * XXX - we risk looping forever if start if someone calls + * pool_destroy on 'start'. But there is no other way to + * have potentially sleeping pool_reclaim, non-sleeping + * locks on pool_allocator and some stirring of drained + * pools in the allocator. + */ + freed = 0; + + s = splvm(); + simple_lock(&pa->pa_slock); + pp = start = TAILQ_FIRST(&pa->pa_list); + do { + TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); + TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); + if (pp == org) + continue; + simple_unlock(&pa->pa_list); + freed = pool_reclaim(pp) + simple_lock(&pa->pa_list); + } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && !freed); + + if (!freed) { + /* + * We set PA_WANT here, the caller will most likely + * sleep waiting for pages (if not, this won't hurt + * that much) and there is no way to set this in the + * caller without violating locking order. + */ + pa->pa_flags |= PA_WANT; + } + simple_unlock(&pa->pa_slock); + splx(s); + } while (freed); + return (NULL); +} + +void +pool_allocator_free(struct pool *pp, void *v) +{ + struct pool_allocator *pa = pp->pr_alloc; + + (*pa->pa_free)(pp, v); + + simple_lock(&pa->pa_slock); + if ((pa->pa_flags & PA_WANT) == 0) { + simple_unlock(&pa->pa_slock); + return; + } + + TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { + simple_lock(&pp->pr_slock); + if ((pp->pr_flags & PR_WANTED) != 0) { + pp->pr_flags &= ~PR_WANTED; + wakeup(pp); + } + } + pa->pa_flags &= ~PA_WANT; + simple_unlock(&pa->pa_slock); +} + +void * +pool_page_alloc(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *)uvm_km_alloc_poolpage(waitok)); +} + +void +pool_page_free(struct pool *pp, void *v) +{ + uvm_km_free_poolpage((vaddr_t)v); +} + +void * +pool_page_alloc_nointr(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, + waitok)); +} + +void +pool_page_free_nointr(struct pool *pp, void *v) +{ + uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); +} diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c index 39bc68dfc84..4543cbe9678 100644 --- a/sys/kern/sys_pipe.c +++ b/sys/kern/sys_pipe.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sys_pipe.c,v 1.40 2001/11/06 19:53:20 miod Exp $ */ +/* $OpenBSD: sys_pipe.c,v 1.41 2002/01/23 00:39:47 art Exp $ */ /* * Copyright (c) 1996 John S. Dyson @@ -846,7 +846,6 @@ void pipe_init() { pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl", - 0, pool_page_alloc_nointr, pool_page_free_nointr, - M_PIPE); + &pool_allocator_nointr); } diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 1e12eb88086..e28e36058d4 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uipc_mbuf.c,v 1.45 2002/01/16 20:50:17 miod Exp $ */ +/* $OpenBSD: uipc_mbuf.c,v 1.46 2002/01/23 00:39:47 art Exp $ */ /* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */ /* @@ -98,22 +98,25 @@ struct pool mclpool; /* mbuf cluster pool */ extern struct vm_map *mb_map; int needqueuedrain; -void *mclpool_alloc __P((unsigned long, int, int)); -void mclpool_release __P((void *, unsigned long, int)); +void *mclpool_alloc __P((struct pool *, int)); +void mclpool_release __P((struct pool *, void *)); struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int)); const char *mclpool_warnmsg = "WARNING: mclpool limit reached; increase NMBCLUSTERS"; +struct pool_allocator mclpool_allocator = { + mclpool_alloc, mclpool_release, 0, +}; + /* * Initialize the mbuf allcator. */ void mbinit() { - pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", 0, NULL, NULL, 0); - pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", 0, mclpool_alloc, - mclpool_release, 0); + pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL); + pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", &mclpool_allocator); /* * Set the hard limit on the mclpool to the number of @@ -134,10 +137,7 @@ mbinit() void * -mclpool_alloc(sz, flags, mtype) - unsigned long sz; - int flags; - int mtype; +mclpool_alloc(struct pool *pp, int flags) { boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; @@ -146,10 +146,7 @@ mclpool_alloc(sz, flags, mtype) } void -mclpool_release(v, sz, mtype) - void *v; - unsigned long sz; - int mtype; +mclpool_release(struct pool *pp, void *v) { uvm_km_free_poolpage1(mb_map, (vaddr_t)v); } diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c index 23840d6e98b..e667e230798 100644 --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uipc_socket.c,v 1.39 2001/11/28 17:18:00 ericj Exp $ */ +/* $OpenBSD: uipc_socket.c,v 1.40 2002/01/23 00:39:48 art Exp $ */ /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ /* @@ -79,8 +79,7 @@ void soinit(void) { - pool_init(&socket_pool, sizeof(struct socket), 0, 0, 0, - "sockpl", 0, NULL, NULL, M_SOCKET); + pool_init(&socket_pool, sizeof(struct socket), 0, 0, 0, "sockpl", NULL); } /* diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 22ef4dfb385..50ffa1f3880 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_bio.c,v 1.55 2001/12/19 08:58:06 art Exp $ */ +/* $OpenBSD: vfs_bio.c,v 1.56 2002/01/23 00:39:48 art Exp $ */ /* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */ /*- @@ -188,8 +188,7 @@ bufinit() register int i; int base, residual; - pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0, - NULL, NULL, M_DEVBUF); + pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL); for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) TAILQ_INIT(dp); bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash); diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 6bf013dcb07..4d367cf3f3c 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_cache.c,v 1.5 2001/05/02 05:55:13 fgsch Exp $ */ +/* $OpenBSD: vfs_cache.c,v 1.6 2002/01/23 00:39:48 art Exp $ */ /* $NetBSD: vfs_cache.c,v 1.13 1996/02/04 02:18:09 christos Exp $ */ /* @@ -231,7 +231,7 @@ nchinit() TAILQ_INIT(&nclruhead); nchashtbl = hashinit(desiredvnodes, M_CACHE, M_WAITOK, &nchash); pool_init(&nch_pool, sizeof(struct namecache), 0, 0, 0, "nchpl", - 0, pool_page_alloc_nointr, pool_page_free_nointr, M_CACHE); + &pool_allocator_nointr); } /* diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index aaff1342b67..da612f77f05 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_subr.c,v 1.80 2001/12/19 08:58:06 art Exp $ */ +/* $OpenBSD: vfs_subr.c,v 1.81 2002/01/23 00:39:48 art Exp $ */ /* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ /* @@ -131,7 +131,7 @@ vntblinit() { pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes", - 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VNODE); + &pool_allocator_nointr); simple_lock_init(&mntvnode_slock); simple_lock_init(&mntid_slock); simple_lock_init(&spechash_slock); |