summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/alpha/alpha/pmap.c39
-rw-r--r--sys/arch/i386/i386/pmap.c4
-rw-r--r--sys/arch/m68k/m68k/pmap_motorola.c4
-rw-r--r--sys/arch/macppc/macppc/machdep.c5
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap.c9
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c9
-rw-r--r--sys/arch/sparc/sparc/pmap.c40
-rw-r--r--sys/arch/sparc64/sparc64/pmap.c5
-rw-r--r--sys/crypto/crypto.c6
-rw-r--r--sys/dev/ic/ncr53c9x.c4
-rw-r--r--sys/dev/ic/wdc.c4
-rw-r--r--sys/dev/raidframe/rf_openbsdkintf.c5
-rw-r--r--sys/kern/kern_descrip.c6
-rw-r--r--sys/kern/kern_event.c4
-rw-r--r--sys/kern/kern_malloc_debug.c4
-rw-r--r--sys/kern/kern_proc.c4
-rw-r--r--sys/kern/kern_sig.c4
-rw-r--r--sys/kern/subr_extent.c4
-rw-r--r--sys/kern/subr_pool.c308
-rw-r--r--sys/kern/sys_pipe.c5
-rw-r--r--sys/kern/uipc_mbuf.c25
-rw-r--r--sys/kern/uipc_socket.c5
-rw-r--r--sys/kern/vfs_bio.c5
-rw-r--r--sys/kern/vfs_cache.c4
-rw-r--r--sys/kern/vfs_subr.c4
-rw-r--r--sys/net/pf.c17
-rw-r--r--sys/net/pf_norm.c6
-rw-r--r--sys/net/pfkeyv2.c5
-rw-r--r--sys/net/route.c4
-rw-r--r--sys/netinet/ip_input.c4
-rw-r--r--sys/netinet/ip_spd.c8
-rw-r--r--sys/netinet/tcp_subr.c6
-rw-r--r--sys/nfs/nfs_node.c4
-rw-r--r--sys/scsi/scsi_base.c4
-rw-r--r--sys/sys/pool.h48
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c4
-rw-r--r--sys/uvm/uvm_amap.c5
-rw-r--r--sys/uvm/uvm_aobj.c7
-rw-r--r--sys/uvm/uvm_map.c8
-rw-r--r--sys/uvm/uvm_swap.c8
40 files changed, 374 insertions, 280 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index 22fb769b976..043179c55f9 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.32 2001/12/19 08:58:05 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.33 2002/01/23 00:39:46 art Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
@@ -512,8 +512,12 @@ void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, long,
void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, long);
void pmap_l1pt_delref(pmap_t, pt_entry_t *, long);
-void *pmap_l1pt_alloc(unsigned long, int, int);
-void pmap_l1pt_free(void *, unsigned long, int);
+void *pmap_l1pt_alloc(struct pool *, int);
+void pmap_l1pt_free(struct pool *, void *);
+
+struct pool_allocator pmap_l1pt_allocator = {
+ pmap_l1pt_alloc, pmap_l1pt_free, 0,
+};
int pmap_l1pt_ctor(void *, void *, int);
@@ -525,8 +529,11 @@ void pmap_pv_remove(pmap_t, paddr_t, vaddr_t, boolean_t,
struct pv_entry **);
struct pv_entry *pmap_pv_alloc(void);
void pmap_pv_free(struct pv_entry *);
-void *pmap_pv_page_alloc(u_long, int, int);
-void pmap_pv_page_free(void *, u_long, int);
+void *pmap_pv_page_alloc(struct pool *, int);
+void pmap_pv_page_free(struct pool *, void *);
+struct pool_allocator pmap_pv_allocator = {
+ pmap_pv_page_alloc, pmap_pv_page_free, 0,
+};
#ifdef DEBUG
void pmap_pv_dump(paddr_t);
#endif
@@ -947,19 +954,17 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
*/
pmap_ncpuids = ncpuids;
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ &pool_allocator_nointr);
pool_init(&pmap_l1pt_pool, PAGE_SIZE, 0, 0, 0, "l1ptpl",
- 0, pmap_l1pt_alloc, pmap_l1pt_free, M_VMPMAP);
+ &pmap_l1pt_allocator);
pool_cache_init(&pmap_l1pt_cache, &pmap_l1pt_pool, pmap_l1pt_ctor,
NULL, NULL);
pool_init(&pmap_asn_pool, pmap_ncpuids * sizeof(u_int), 0, 0, 0,
- "pmasnpl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ "pmasnpl", &pool_allocator_nointr);
pool_init(&pmap_asngen_pool, pmap_ncpuids * sizeof(u_long), 0, 0, 0,
- "pmasngenpl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ "pmasngenpl", &pool_allocator_nointr);
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
- 0, pmap_pv_page_alloc, pmap_pv_page_free, M_VMPMAP);
+ &pmap_pv_allocator);
TAILQ_INIT(&pmap_all_pmaps);
@@ -1003,7 +1008,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
*/
pool_init(&pmap_tlb_shootdown_job_pool,
sizeof(struct pmap_tlb_shootdown_job), 0, 0, 0, "pmaptlbpl",
- 0, NULL, NULL, M_VMPMAP);
+ NULL);
for (i = 0; i < ALPHA_MAXPROCS; i++) {
TAILQ_INIT(&pmap_tlb_shootdown_q[i].pq_head);
simple_lock_init(&pmap_tlb_shootdown_q[i].pq_slock);
@@ -3203,7 +3208,7 @@ pmap_pv_free(struct pv_entry *pv)
* Allocate a page for the pv_entry pool.
*/
void *
-pmap_pv_page_alloc(u_long size, int flags, int mtype)
+pmap_pv_page_alloc(struct pool *pp, int flags)
{
paddr_t pg;
@@ -3218,7 +3223,7 @@ pmap_pv_page_alloc(u_long size, int flags, int mtype)
* Free a pv_entry pool page.
*/
void
-pmap_pv_page_free(void *v, u_long size, int mtype)
+pmap_pv_page_free(struct pool *pp, void *v)
{
pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t)v));
@@ -3600,7 +3605,7 @@ pmap_l1pt_ctor(void *arg, void *object, int flags)
* Page alloctor for L1 PT pages.
*/
void *
-pmap_l1pt_alloc(unsigned long sz, int flags, int mtype)
+pmap_l1pt_alloc(struct pool *pp, int flags)
{
paddr_t ptpa;
@@ -3627,7 +3632,7 @@ pmap_l1pt_alloc(unsigned long sz, int flags, int mtype)
* Page freer for L1 PT pages.
*/
void
-pmap_l1pt_free(void *v, unsigned long sz, int mtype)
+pmap_l1pt_free(struct pool *pp, void *v)
{
pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v));
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index d1b677fc309..d9c5df4b464 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.57 2001/12/19 08:58:05 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.58 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -837,7 +837,7 @@ pmap_bootstrap(kva_start)
*/
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ &pool_allocator_nointr);
#ifdef __NetBSD__
/*
diff --git a/sys/arch/m68k/m68k/pmap_motorola.c b/sys/arch/m68k/m68k/pmap_motorola.c
index aeff6d43056..02c67f39482 100644
--- a/sys/arch/m68k/m68k/pmap_motorola.c
+++ b/sys/arch/m68k/m68k/pmap_motorola.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap_motorola.c,v 1.15 2002/01/10 21:08:43 miod Exp $ */
+/* $OpenBSD: pmap_motorola.c,v 1.16 2002/01/23 00:39:47 art Exp $ */
/*
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -574,7 +574,7 @@ pmap_init()
* Initialize the pmap pools.
*/
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ &pool_allocator_nointr);
/*
* Now it is safe to enable pv_table recording.
diff --git a/sys/arch/macppc/macppc/machdep.c b/sys/arch/macppc/macppc/machdep.c
index 2602917e5d2..042176b0df8 100644
--- a/sys/arch/macppc/macppc/machdep.c
+++ b/sys/arch/macppc/macppc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.21 2002/01/16 20:50:16 miod Exp $ */
+/* $OpenBSD: machdep.c,v 1.22 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
@@ -481,8 +481,7 @@ where = 3;
(void)power4e_get_eth_addr();
#ifdef PPC_VECTOR_SUPPORTED
- pool_init(&ppc_vecpl, sizeof(struct vreg), 16, 0, 0, "ppcvec",
- 0, NULL, NULL, M_SUBPROC);
+ pool_init(&ppc_vecpl, sizeof(struct vreg), 16, 0, 0, "ppcvec", NULL);
#endif /* PPC_VECTOR_SUPPORTED */
}
diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c
index f8e0e907526..ac2638f72cb 100644
--- a/sys/arch/mvme88k/mvme88k/pmap.c
+++ b/sys/arch/mvme88k/mvme88k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.61 2001/12/27 22:33:46 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.62 2002/01/23 00:39:47 art Exp $ */
/*
* Copyright (c) 2001 Miodrag Vallat
* Copyright (c) 1998-2001 Steve Murphree, Jr.
@@ -1303,10 +1303,9 @@ pmap_init(void)
attr += npages;
}
- pool_init(&pmappool, sizeof(struct pmap), 0, 0, 0, "pmappl", 0,
- pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
- pool_init(&pvpool, sizeof(pv_entry_t), 0, 0, 0, "pvpl", 0,
- NULL, NULL, M_VMPVENT);
+ pool_init(&pmappool, sizeof(struct pmap), 0, 0, 0, "pmappl",
+ &pool_allocator_nointr);
+ pool_init(&pvpool, sizeof(pv_entry_t), 0, 0, 0, "pvpl", NULL);
pmap_initialized = TRUE;
} /* pmap_init() */
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index 3cf66b09549..bc4e8e14e6b 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.55 2002/01/13 05:27:40 drahn Exp $ */
+/* $OpenBSD: pmap.c,v 1.56 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: pmap.c,v 1.1 1996/09/30 16:34:52 ws Exp $ */
/*
@@ -729,13 +729,12 @@ pmap_init()
for (i = npgs; --i >= 0;)
pv++->pv_idx = -1;
#ifdef USE_PMAP_VP
- pool_init(&pmap_vp_pool, PAGE_SIZE, 0, 0, 0, "ppvl",
- 0, NULL, NULL, M_VMPMAP);
+ pool_init(&pmap_vp_pool, PAGE_SIZE, 0, 0, 0, "ppvl", NULL);
#endif
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
- 0, NULL, NULL, M_VMPMAP);
+ NULL);
pool_init(&pmap_po_pool, sizeof(struct pte_ovfl), 0, 0, 0, "popl",
- 0, NULL, NULL, M_VMPMAP);
+ NULL);
pmap_attrib = (char *)pv;
bzero(pv, npgs);
pv = (struct pv_entry *)addr;
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index f0ef4375d49..23cd2348e08 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.117 2001/12/19 08:58:05 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.118 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
/*
@@ -214,8 +214,12 @@ pvfree(pv)
*/
static struct pool L1_pool;
static struct pool L23_pool;
-void *pgt_page_alloc __P((unsigned long, int, int));
-void pgt_page_free __P((void *, unsigned long, int));
+void *pgt_page_alloc(struct pool *, int);
+void pgt_page_free(struct pool *, void *);
+
+struct pool_allocator pgt_allocator = {
+ pgt_page_alloc, pgt_page_free, 0,
+};
void pcache_flush __P((caddr_t, caddr_t, int));
void
@@ -233,30 +237,23 @@ pcache_flush(va, pa, n)
* Page table pool back-end.
*/
void *
-pgt_page_alloc(sz, flags, mtype)
- unsigned long sz;
- int flags;
- int mtype;
+pgt_page_alloc(struct pool *pp, int flags)
{
caddr_t p;
p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
- (vsize_t)sz, UVM_KMF_NOWAIT);
-
+ PAGE_SIZE, UVM_KMF_NOWAIT);
if (p != NULL && ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)) {
- pcache_flush(p, (caddr_t)VA2PA(p), sz);
- kvm_uncache(p, atop(sz));
+ pcache_flush(p, (caddr_t)VA2PA(p), PAGE_SIZE);
+ kvm_uncache(p, atop(PAGE_SIZE));
}
return (p);
}
void
-pgt_page_free(v, sz, mtype)
- void *v;
- unsigned long sz;
- int mtype;
+pgt_page_free(struct pool *pp, void *v);
{
- uvm_km_free(kernel_map, (vaddr_t)v, sz);
+ uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE);
}
#endif /* SUN4M */
@@ -3359,8 +3356,7 @@ pmap_init()
sizeof(struct pvlist);
}
- pool_init(&pvpool, sizeof(struct pvlist), 0, 0, 0, "pvpl", 0,
- NULL, NULL, 0);
+ pool_init(&pvpool, sizeof(struct pvlist), 0, 0, 0, "pvpl", NULL);
/*
* We can set it here since it's only used in pmap_enter to see
@@ -3378,12 +3374,12 @@ pmap_init()
int n;
n = SRMMU_L1SIZE * sizeof(int);
- pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable", 0,
- pgt_page_alloc, pgt_page_free, 0);
+ pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable",
+ &pgt_page_allocator);
n = SRMMU_L2SIZE * sizeof(int);
- pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable", 0,
- pgt_page_alloc, pgt_page_free, 0);
+ pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable",
+ &pgt_page_allocator);
}
#endif
}
diff --git a/sys/arch/sparc64/sparc64/pmap.c b/sys/arch/sparc64/sparc64/pmap.c
index 460480c3d83..3d5635483de 100644
--- a/sys/arch/sparc64/sparc64/pmap.c
+++ b/sys/arch/sparc64/sparc64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.8 2001/12/04 23:22:42 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.9 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: pmap.c,v 1.107 2001/08/31 16:47:41 eeh Exp $ */
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
#define HWREF
@@ -1526,8 +1526,7 @@ pmap_init()
}
/* Setup a pool for additional pvlist structures */
- pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", 0,
- NULL, NULL, 0);
+ pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL);
vm_first_phys = avail_start;
vm_num_phys = avail_end - avail_start;
diff --git a/sys/crypto/crypto.c b/sys/crypto/crypto.c
index a9192d0dcce..caf09bbde2f 100644
--- a/sys/crypto/crypto.c
+++ b/sys/crypto/crypto.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: crypto.c,v 1.30 2001/11/13 18:54:32 deraadt Exp $ */
+/* $OpenBSD: crypto.c,v 1.31 2002/01/23 00:39:47 art Exp $ */
/*
* The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
*
@@ -412,9 +412,9 @@ crypto_getreq(int num)
if (crypto_pool_initialized == 0) {
pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
- PR_FREEHEADER, "cryptop", 0, NULL, NULL, M_CRYPTO_OPS);
+ PR_FREEHEADER, "cryptop", NULL);
pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
- PR_FREEHEADER, "cryptodesc", 0, NULL, NULL, M_CRYPTO_OPS);
+ PR_FREEHEADER, "cryptodesc", NULL);
crypto_pool_initialized = 1;
}
diff --git a/sys/dev/ic/ncr53c9x.c b/sys/dev/ic/ncr53c9x.c
index 71801c15489..7f468eee780 100644
--- a/sys/dev/ic/ncr53c9x.c
+++ b/sys/dev/ic/ncr53c9x.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ncr53c9x.c,v 1.11 2001/12/17 23:13:41 nordin Exp $ */
+/* $OpenBSD: ncr53c9x.c,v 1.12 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: ncr53c9x.c,v 1.56 2000/11/30 14:41:46 thorpej Exp $ */
/*
@@ -376,7 +376,7 @@ ncr53c9x_init(sc, doreset)
if (!ecb_pool_initialized) {
/* All instances share this pool */
pool_init(&ecb_pool, sizeof(struct ncr53c9x_ecb), 0, 0, 0,
- "ncr53c9x_ecb", 0, NULL, NULL, 0);
+ "ncr53c9x_ecb", NULL);
ecb_pool_initialized = 1;
}
diff --git a/sys/dev/ic/wdc.c b/sys/dev/ic/wdc.c
index a26d906990d..3a37dbe17b9 100644
--- a/sys/dev/ic/wdc.c
+++ b/sys/dev/ic/wdc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: wdc.c,v 1.45 2002/01/12 05:36:09 jason Exp $ */
+/* $OpenBSD: wdc.c,v 1.46 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: wdc.c,v 1.68 1999/06/23 19:00:17 bouyer Exp $ */
@@ -659,7 +659,7 @@ wdcattach(chp)
if (inited == 0) {
/* Initialize the wdc_xfer pool. */
pool_init(&wdc_xfer_pool, sizeof(struct wdc_xfer), 0,
- 0, 0, "wdcspl", 0, NULL, NULL, M_DEVBUF);
+ 0, 0, "wdcspl", NULL);
inited++;
}
TAILQ_INIT(&chp->ch_queue->sc_xfer);
diff --git a/sys/dev/raidframe/rf_openbsdkintf.c b/sys/dev/raidframe/rf_openbsdkintf.c
index b1f536952b0..0c6b5861bbc 100644
--- a/sys/dev/raidframe/rf_openbsdkintf.c
+++ b/sys/dev/raidframe/rf_openbsdkintf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rf_openbsdkintf.c,v 1.12 2001/12/29 21:51:18 tdeval Exp $ */
+/* $OpenBSD: rf_openbsdkintf.c,v 1.13 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: rf_netbsdkintf.c,v 1.109 2001/07/27 03:30:07 oster Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -1641,8 +1641,7 @@ raidinit(raidPtr)
rs = &raid_softc[unit];
pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
- 0, 0, "raidpl", 0, NULL, NULL, M_RAIDFRAME);
-
+ 0, 0, "raidpl", NULL);
/* XXX should check return code first... */
rs->sc_flags |= RAIDF_INITED;
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index ad9d97d6cb1..625f5d62d99 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_descrip.c,v 1.43 2001/11/15 13:07:53 niklas Exp $ */
+/* $OpenBSD: kern_descrip.c,v 1.44 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: kern_descrip.c,v 1.42 1996/03/30 22:24:38 christos Exp $ */
/*
@@ -87,9 +87,9 @@ void
filedesc_init()
{
pool_init(&file_pool, sizeof(struct file), 0, 0, 0, "filepl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_PROC);
+ &pool_allocator_nointr);
pool_init(&fdesc_pool, sizeof(struct filedesc0), 0, 0, 0, "fdescpl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FILEDESC);
+ &pool_allocator_nointr);
LIST_INIT(&filehead);
}
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index b5f15fcb6cb..2ea236be7aa 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_event.c,v 1.10 2001/10/26 12:03:27 art Exp $ */
+/* $OpenBSD: kern_event.c,v 1.11 2002/01/23 00:39:47 art Exp $ */
/*-
* Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
@@ -890,7 +890,7 @@ void
knote_init(void)
{
pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_KNOTE);
+ &pool_allocator_nointr);
}
struct knote *
diff --git a/sys/kern/kern_malloc_debug.c b/sys/kern/kern_malloc_debug.c
index 56a9319067d..582467d5de0 100644
--- a/sys/kern/kern_malloc_debug.c
+++ b/sys/kern/kern_malloc_debug.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc_debug.c,v 1.15 2001/12/08 02:24:07 art Exp $ */
+/* $OpenBSD: kern_malloc_debug.c,v 1.16 2002/01/23 00:39:47 art Exp $ */
/*
* Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
@@ -216,7 +216,7 @@ debug_malloc_init(void)
debug_malloc_chunks_on_freelist = 0;
pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry),
- 0, 0, 0, "mdbepl", 0, NULL, NULL, 0);
+ 0, 0, 0, "mdbepl", NULL);
}
/*
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index e8fdc6b4084..3725838dfa0 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_proc.c,v 1.9 2002/01/16 20:50:17 miod Exp $ */
+/* $OpenBSD: kern_proc.c,v 1.10 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: kern_proc.c,v 1.14 1996/02/09 18:59:41 christos Exp $ */
/*
@@ -110,7 +110,7 @@ procinit()
uihashtbl = hashinit(maxproc / 16, M_PROC, M_WAITOK, &uihash);
pool_init(&proc_pool, sizeof(struct proc), 0, 0, 0, "procpl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_PROC);
+ &pool_allocator_nointr);
}
/*
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index c0fd97f1021..6d1c0be45ef 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sig.c,v 1.52 2002/01/19 19:00:14 millert Exp $ */
+/* $OpenBSD: kern_sig.c,v 1.53 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@@ -152,7 +152,7 @@ void
signal_init()
{
pool_init(&sigacts_pool, sizeof(struct sigacts), 0, 0, 0, "sigapl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_SUBPROC);
+ &pool_allocator_nointr);
}
/*
diff --git a/sys/kern/subr_extent.c b/sys/kern/subr_extent.c
index 1b879c6755c..be00d755fc2 100644
--- a/sys/kern/subr_extent.c
+++ b/sys/kern/subr_extent.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_extent.c,v 1.18 2001/08/06 11:19:26 art Exp $ */
+/* $OpenBSD: subr_extent.c,v 1.19 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: subr_extent.c,v 1.7 1996/11/21 18:46:34 cgd Exp $ */
/*-
@@ -114,7 +114,7 @@ extent_pool_init(void)
if (!inited) {
pool_init(&ex_region_pl, sizeof(struct extent_region), 0, 0, 0,
- "extentpl", 0, 0, 0, 0);
+ "extentpl", NULL);
inited = 1;
}
}
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index 18d1ca0a1a4..f8a42136896 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.19 2002/01/10 18:56:03 art Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.20 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -109,7 +109,7 @@ struct pool_item {
};
#define PR_HASH_INDEX(pp,addr) \
- (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
+ (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & (PR_HASHTABSIZE - 1))
#define POOL_NEEDS_CATCHUP(pp) \
((pp)->pr_nitems < (pp)->pr_minitems)
@@ -164,8 +164,9 @@ static void pool_cache_reclaim(struct pool_cache *);
static int pool_catchup(struct pool *);
static void pool_prime_page(struct pool *, caddr_t,
struct pool_item_header *);
-static void *pool_page_alloc(unsigned long, int, int);
-static void pool_page_free(void *, unsigned long, int);
+
+void *pool_allocator_alloc(struct pool *, int);
+void pool_allocator_free(struct pool *, void *);
static void pool_print1(struct pool *, const char *,
int (*)(const char *, ...));
@@ -339,7 +340,7 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph,
if (pq) {
TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
} else {
- (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, ph->ph_page);
if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
LIST_REMOVE(ph, ph_hashlist);
s = splhigh();
@@ -372,10 +373,7 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph,
*/
void
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
- const char *wchan, size_t pagesz,
- void *(*alloc)(unsigned long, int, int),
- void (*release)(void *, unsigned long, int),
- int mtype)
+ const char *wchan, struct pool_allocator *palloc)
{
int off, slack, i;
@@ -390,20 +388,19 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
/*
* Check arguments and construct default values.
*/
- if (!powerof2(pagesz))
- panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
-
- if (alloc == NULL && release == NULL) {
- alloc = pool_page_alloc;
- release = pool_page_free;
- pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
- } else if ((alloc != NULL && release != NULL) == 0) {
- /* If you specifiy one, must specify both. */
- panic("pool_init: must specify alloc and release together");
+ if (palloc == NULL)
+ palloc = &pool_allocator_kmem;
+ if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
+ if (palloc->pa_pagesz == 0)
+ palloc->pa_pagesz = PAGE_SIZE;
+
+ TAILQ_INIT(&palloc->pa_list);
+
+ simple_lock_init(&palloc->pa_slock);
+ palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
+ palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
+ palloc->pa_flags |= PA_INITIALIZED;
}
-
- if (pagesz == 0)
- pagesz = PAGE_SIZE;
if (align == 0)
align = ALIGN(1);
@@ -412,9 +409,11 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
size = sizeof(struct pool_item);
size = ALIGN(size);
- if (size > pagesz)
+#ifdef DIAGNOSTIC
+ if (size > palloc->pa_pagesz)
panic("pool_init: pool item size (%lu) too large",
(u_long)size);
+#endif
/*
* Initialize the pool structure.
@@ -431,12 +430,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp->pr_size = size;
pp->pr_align = align;
pp->pr_wchan = wchan;
- pp->pr_mtype = mtype;
- pp->pr_alloc = alloc;
- pp->pr_free = release;
- pp->pr_pagesz = pagesz;
- pp->pr_pagemask = ~(pagesz - 1);
- pp->pr_pageshift = ffs(pagesz) - 1;
+ pp->pr_alloc = palloc;
pp->pr_nitems = 0;
pp->pr_nout = 0;
pp->pr_hardlimit = UINT_MAX;
@@ -456,15 +450,15 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
* with its header based on the page address.
* We use 1/16 of the page size as the threshold (XXX: tune)
*/
- if (pp->pr_size < pagesz/16) {
+ if (pp->pr_size < palloc->pa_pagesz/16) {
/* Use the end of the page for the page header */
pp->pr_roflags |= PR_PHINPAGE;
pp->pr_phoffset = off =
- pagesz - ALIGN(sizeof(struct pool_item_header));
+ palloc->pa_pagesz - ALIGN(sizeof(struct pool_item_header));
} else {
/* The page header will be taken from our page header pool */
pp->pr_phoffset = 0;
- off = pagesz;
+ off = palloc->pa_pagesz;
for (i = 0; i < PR_HASHTABSIZE; i++) {
LIST_INIT(&pp->pr_hashtab[i]);
}
@@ -520,15 +514,20 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
*/
if (phpool.pr_size == 0) {
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
- 0, "phpool", 0, 0, 0, 0);
+ 0, "phpool", NULL);
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
- 0, "pcgpool", 0, 0, 0, 0);
+ 0, "pcgpool", NULL);
}
/* Insert into the list of all pools. */
simple_lock(&pool_head_slock);
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
simple_unlock(&pool_head_slock);
+
+ /* Insert into the list of pools using this allocator. */
+ simple_lock(&palloc->pa_slock);
+ TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
+ simple_unlock(&palloc->pa_slock);
}
/*
@@ -540,6 +539,13 @@ pool_destroy(struct pool *pp)
struct pool_item_header *ph;
struct pool_cache *pc;
+ /*
+ * Locking order: pool_allocator -> pool
+ */
+ simple_lock(&pp->pr_alloc->pa_slock);
+ TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
+ simple_unlock(&pp->pr_alloc->pa_slock);
+
/* Destroy all caches for this pool. */
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
pool_cache_destroy(pc);
@@ -661,9 +667,6 @@ pool_get(struct pool *pp, int flags)
&pp->pr_hardlimit_ratecap))
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
- if (flags & PR_URGENT)
- panic("pool_get: urgent");
-
pp->pr_nfail++;
pr_leave(pp);
@@ -694,7 +697,7 @@ pool_get(struct pool *pp, int flags)
*/
pr_leave(pp);
simple_unlock(&pp->pr_slock);
- v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
+ v = pool_allocator_alloc(pp, flags);
if (__predict_true(v != NULL))
ph = pool_alloc_item_header(pp, v, flags);
simple_lock(&pp->pr_slock);
@@ -702,7 +705,7 @@ pool_get(struct pool *pp, int flags)
if (__predict_false(v == NULL || ph == NULL)) {
if (v != NULL)
- (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, v);
/*
* We were unable to allocate a page or item
@@ -713,9 +716,6 @@ pool_get(struct pool *pp, int flags)
if (pp->pr_curpage != NULL)
goto startover;
- if (flags & PR_URGENT)
- panic("pool_get: urgent");
-
if ((flags & PR_WAITOK) == 0) {
pp->pr_nfail++;
pr_leave(pp);
@@ -726,15 +726,11 @@ pool_get(struct pool *pp, int flags)
/*
* Wait for items to be returned to this pool.
*
- * XXX: we actually want to wait just until
- * the page allocator has memory again. Depending
- * on this pool's usage, we might get stuck here
- * for a long time.
- *
* XXX: maybe we should wake up once a second and
* try again?
*/
pp->pr_flags |= PR_WANTED;
+ /* PA_WANTED is already set on the allocator */
pr_leave(pp);
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
pr_enter(pp, file, line);
@@ -852,7 +848,7 @@ pool_do_put(struct pool *pp, void *v)
LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
- page = (caddr_t)((u_long)v & pp->pr_pagemask);
+ page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask);
#ifdef DIAGNOSTIC
if (__predict_false(pp->pr_nout == 0)) {
@@ -1020,7 +1016,7 @@ pool_prime(struct pool *pp, int n)
while (newpages-- > 0) {
simple_unlock(&pp->pr_slock);
- cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
+ cp = pool_allocator_alloc(pp, PR_NOWAIT);
if (__predict_true(cp != NULL))
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
simple_lock(&pp->pr_slock);
@@ -1028,7 +1024,7 @@ pool_prime(struct pool *pp, int n)
if (__predict_false(cp == NULL || ph == NULL)) {
error = ENOMEM;
if (cp != NULL)
- (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, cp);
break;
}
@@ -1058,8 +1054,10 @@ pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
unsigned int ioff = pp->pr_itemoffset;
int n;
- if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
+#ifdef DIAGNOSTIC
+ if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
+#endif
if ((pp->pr_roflags & PR_PHINPAGE) == 0)
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
@@ -1154,13 +1152,13 @@ pool_catchup(struct pool *pp)
* the pool descriptor?
*/
simple_unlock(&pp->pr_slock);
- cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
+ cp = pool_allocator_alloc(pp, PR_NOWAIT);
if (__predict_true(cp != NULL))
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
simple_lock(&pp->pr_slock);
if (__predict_false(cp == NULL || ph == NULL)) {
if (cp != NULL)
- (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, cp);
error = ENOMEM;
break;
}
@@ -1232,48 +1230,11 @@ pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
}
/*
- * Default page allocator.
- */
-static void *
-pool_page_alloc(unsigned long sz, int flags, int mtype)
-{
- boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
-
- return ((void *)uvm_km_alloc_poolpage(waitok));
-}
-
-static void
-pool_page_free(void *v, unsigned long sz, int mtype)
-{
-
- uvm_km_free_poolpage((vaddr_t)v);
-}
-
-/*
- * Alternate pool page allocator for pools that know they will
- * never be accessed in interrupt context.
- */
-void *
-pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
-{
- boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
-
- return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
- waitok));
-}
-
-void
-pool_page_free_nointr(void *v, unsigned long sz, int mtype)
-{
-
- uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
-}
-
-
-/*
* Release all complete pages that have not been used recently.
+ *
+ * Returns non-zero if any pages have been reclaimed.
*/
-void
+int
#ifdef POOL_DIAGNOSTIC
_pool_reclaim(struct pool *pp, const char *file, long line)
#else
@@ -1287,10 +1248,10 @@ pool_reclaim(struct pool *pp)
int s;
if (pp->pr_roflags & PR_STATIC)
- return;
+ return 0;
if (simple_lock_try(&pp->pr_slock) == 0)
- return;
+ return 0;
pr_enter(pp, file, line);
TAILQ_INIT(&pq);
@@ -1332,11 +1293,11 @@ pool_reclaim(struct pool *pp)
pr_leave(pp);
simple_unlock(&pp->pr_slock);
if (TAILQ_EMPTY(&pq)) {
- return;
+ return 0;
}
while ((ph = TAILQ_FIRST(&pq)) != NULL) {
TAILQ_REMOVE(&pq, ph, ph_pagelist);
- (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
+ pool_allocator_free(pp, ph->ph_page);
if (pp->pr_roflags & PR_PHINPAGE) {
continue;
}
@@ -1345,6 +1306,8 @@ pool_reclaim(struct pool *pp)
pool_put(&phpool, ph);
splx(s);
}
+
+ return 1;
}
@@ -1374,7 +1337,6 @@ pool_drain(void *arg)
splx(s);
}
-
/*
* Diagnostic helpers.
*/
@@ -1420,8 +1382,7 @@ pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...))
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
pp->pr_roflags);
- (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
- (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
+ (*pr)("\talloc %p\n", pp->pr_alloc);
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
@@ -1502,7 +1463,7 @@ pool_chk(struct pool *pp, const char *label)
int n;
caddr_t page;
- page = (caddr_t)((u_long)ph & pp->pr_pagemask);
+ page = (caddr_t)((vaddr_t)ph & pp->pr_alloc->pa_pagemask);
if (page != ph->ph_page &&
(pp->pr_roflags & PR_PHINPAGE) != 0) {
if (label != NULL)
@@ -1531,7 +1492,7 @@ pool_chk(struct pool *pp, const char *label)
panic("pool");
}
#endif
- page = (caddr_t)((u_long)pi & pp->pr_pagemask);
+ page = (caddr_t)((vaddr_t)pi & pp->pr_alloc->pa_pagemask);
if (page == ph->ph_page)
continue;
@@ -1899,3 +1860,146 @@ sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep)
/* NOTREACHED */
return (0); /* XXX - Stupid gcc */
}
+
+/*
+ * Pool backend allocators.
+ *
+ * Each pool has a backend allocator that handles allocation, deallocation
+ * and any additional draining that might be needed.
+ *
+ * We provide two standard allocators.
+ * pool_alloc_kmem - the default used when no allocator is specified.
+ * pool_alloc_nointr - used for pools that will not be accessed in
+ * interrupt context.
+ */
+void *pool_page_alloc(struct pool *, int);
+void pool_page_free(struct pool *, void *);
+void *pool_page_alloc_nointr(struct pool *, int);
+void pool_page_free_nointr(struct pool *, void *);
+
+struct pool_allocator pool_allocator_kmem = {
+ pool_page_alloc, pool_page_free, 0,
+};
+struct pool_allocator pool_allocator_nointr = {
+ pool_page_alloc_nointr, pool_page_free_nointr, 0,
+};
+
+/*
+ * XXX - we have at least three different resources for the same allocation
+ * and each resource can be depleted. First we have the ready elements in
+ * the pool. Then we have the resource (typically a vm_map) for this
+ * allocator, then we have physical memory. Waiting for any of these can
+ * be unnecessary when any other is freed, but the kernel doesn't support
+ * sleeping on multiple addresses, so we have to fake. The caller sleeps on
+ * the pool (so that we can be awakened when an item is returned to the pool),
+ * but we set PA_WANT on the allocator. When a page is returned to
+ * the allocator and PA_WANT is set pool_allocator_free will wakeup all
+ * sleeping pools belonging to this allocator. (XXX - thundering herd).
+ */
+
+void *
+pool_allocator_alloc(struct pool *org, int flags)
+{
+ struct pool_allocator *pa = org->pr_alloc;
+ struct pool *pp, *start;
+ int s, freed;
+ void *res;
+
+ do {
+ if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
+ return (res);
+ if ((flags & PR_WAITOK) == 0)
+ break;
+
+ /*
+ * Drain all pools, except 'org', that use this allocator.
+ * We do this to reclaim va space. pa_alloc is responsible
+ * for waiting for physical memory.
+ * XXX - we risk looping forever if start if someone calls
+ * pool_destroy on 'start'. But there is no other way to
+ * have potentially sleeping pool_reclaim, non-sleeping
+ * locks on pool_allocator and some stirring of drained
+ * pools in the allocator.
+ */
+ freed = 0;
+
+ s = splvm();
+ simple_lock(&pa->pa_slock);
+ pp = start = TAILQ_FIRST(&pa->pa_list);
+ do {
+ TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
+ TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
+ if (pp == org)
+ continue;
+ simple_unlock(&pa->pa_list);
+ freed = pool_reclaim(pp)
+ simple_lock(&pa->pa_list);
+ } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && !freed);
+
+ if (!freed) {
+ /*
+ * We set PA_WANT here, the caller will most likely
+ * sleep waiting for pages (if not, this won't hurt
+ * that much) and there is no way to set this in the
+ * caller without violating locking order.
+ */
+ pa->pa_flags |= PA_WANT;
+ }
+ simple_unlock(&pa->pa_slock);
+ splx(s);
+ } while (freed);
+ return (NULL);
+}
+
+void
+pool_allocator_free(struct pool *pp, void *v)
+{
+ struct pool_allocator *pa = pp->pr_alloc;
+
+ (*pa->pa_free)(pp, v);
+
+ simple_lock(&pa->pa_slock);
+ if ((pa->pa_flags & PA_WANT) == 0) {
+ simple_unlock(&pa->pa_slock);
+ return;
+ }
+
+ TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
+ simple_lock(&pp->pr_slock);
+ if ((pp->pr_flags & PR_WANTED) != 0) {
+ pp->pr_flags &= ~PR_WANTED;
+ wakeup(pp);
+ }
+ }
+ pa->pa_flags &= ~PA_WANT;
+ simple_unlock(&pa->pa_slock);
+}
+
+void *
+pool_page_alloc(struct pool *pp, int flags)
+{
+ boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+
+ return ((void *)uvm_km_alloc_poolpage(waitok));
+}
+
+void
+pool_page_free(struct pool *pp, void *v)
+{
+ uvm_km_free_poolpage((vaddr_t)v);
+}
+
+void *
+pool_page_alloc_nointr(struct pool *pp, int flags)
+{
+ boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+
+ return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
+ waitok));
+}
+
+void
+pool_page_free_nointr(struct pool *pp, void *v)
+{
+ uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
+}
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index 39bc68dfc84..4543cbe9678 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sys_pipe.c,v 1.40 2001/11/06 19:53:20 miod Exp $ */
+/* $OpenBSD: sys_pipe.c,v 1.41 2002/01/23 00:39:47 art Exp $ */
/*
* Copyright (c) 1996 John S. Dyson
@@ -846,7 +846,6 @@ void
pipe_init()
{
pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr,
- M_PIPE);
+ &pool_allocator_nointr);
}
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 1e12eb88086..e28e36058d4 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uipc_mbuf.c,v 1.45 2002/01/16 20:50:17 miod Exp $ */
+/* $OpenBSD: uipc_mbuf.c,v 1.46 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
/*
@@ -98,22 +98,25 @@ struct pool mclpool; /* mbuf cluster pool */
extern struct vm_map *mb_map;
int needqueuedrain;
-void *mclpool_alloc __P((unsigned long, int, int));
-void mclpool_release __P((void *, unsigned long, int));
+void *mclpool_alloc __P((struct pool *, int));
+void mclpool_release __P((struct pool *, void *));
struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int));
const char *mclpool_warnmsg =
"WARNING: mclpool limit reached; increase NMBCLUSTERS";
+struct pool_allocator mclpool_allocator = {
+ mclpool_alloc, mclpool_release, 0,
+};
+
/*
* Initialize the mbuf allcator.
*/
void
mbinit()
{
- pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", 0, NULL, NULL, 0);
- pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", 0, mclpool_alloc,
- mclpool_release, 0);
+ pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL);
+ pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", &mclpool_allocator);
/*
* Set the hard limit on the mclpool to the number of
@@ -134,10 +137,7 @@ mbinit()
void *
-mclpool_alloc(sz, flags, mtype)
- unsigned long sz;
- int flags;
- int mtype;
+mclpool_alloc(struct pool *pp, int flags)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
@@ -146,10 +146,7 @@ mclpool_alloc(sz, flags, mtype)
}
void
-mclpool_release(v, sz, mtype)
- void *v;
- unsigned long sz;
- int mtype;
+mclpool_release(struct pool *pp, void *v)
{
uvm_km_free_poolpage1(mb_map, (vaddr_t)v);
}
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index 23840d6e98b..e667e230798 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uipc_socket.c,v 1.39 2001/11/28 17:18:00 ericj Exp $ */
+/* $OpenBSD: uipc_socket.c,v 1.40 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
/*
@@ -79,8 +79,7 @@ void
soinit(void)
{
- pool_init(&socket_pool, sizeof(struct socket), 0, 0, 0,
- "sockpl", 0, NULL, NULL, M_SOCKET);
+ pool_init(&socket_pool, sizeof(struct socket), 0, 0, 0, "sockpl", NULL);
}
/*
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 22ef4dfb385..50ffa1f3880 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_bio.c,v 1.55 2001/12/19 08:58:06 art Exp $ */
+/* $OpenBSD: vfs_bio.c,v 1.56 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */
/*-
@@ -188,8 +188,7 @@ bufinit()
register int i;
int base, residual;
- pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0,
- NULL, NULL, M_DEVBUF);
+ pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
TAILQ_INIT(dp);
bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 6bf013dcb07..4d367cf3f3c 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_cache.c,v 1.5 2001/05/02 05:55:13 fgsch Exp $ */
+/* $OpenBSD: vfs_cache.c,v 1.6 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: vfs_cache.c,v 1.13 1996/02/04 02:18:09 christos Exp $ */
/*
@@ -231,7 +231,7 @@ nchinit()
TAILQ_INIT(&nclruhead);
nchashtbl = hashinit(desiredvnodes, M_CACHE, M_WAITOK, &nchash);
pool_init(&nch_pool, sizeof(struct namecache), 0, 0, 0, "nchpl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_CACHE);
+ &pool_allocator_nointr);
}
/*
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index aaff1342b67..da612f77f05 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_subr.c,v 1.80 2001/12/19 08:58:06 art Exp $ */
+/* $OpenBSD: vfs_subr.c,v 1.81 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
/*
@@ -131,7 +131,7 @@ vntblinit()
{
pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VNODE);
+ &pool_allocator_nointr);
simple_lock_init(&mntvnode_slock);
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
diff --git a/sys/net/pf.c b/sys/net/pf.c
index 4ad3d368612..88694d58ed3 100644
--- a/sys/net/pf.c
+++ b/sys/net/pf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf.c,v 1.184 2002/01/12 01:34:49 jasoni Exp $ */
+/* $OpenBSD: pf.c,v 1.185 2002/01/23 00:39:48 art Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -959,21 +959,20 @@ pf_print_flags(u_int8_t f)
void
pfattach(int num)
{
- /* XXX - no M_* tags, but they are not used anyway */
pool_init(&pf_tree_pl, sizeof(struct pf_tree_node), 0, 0, 0, "pftrpl",
- 0, NULL, NULL, 0);
+ NULL);
pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
- 0, NULL, NULL, 0);
+ NULL);
pool_init(&pf_nat_pl, sizeof(struct pf_nat), 0, 0, 0, "pfnatpl",
- 0, NULL, NULL, 0);
+ NULL);
pool_init(&pf_binat_pl, sizeof(struct pf_binat), 0, 0, 0, "pfbinatpl",
- 0, NULL, NULL, 0);
+ NULL);
pool_init(&pf_rdr_pl, sizeof(struct pf_rdr), 0, 0, 0, "pfrdrpl",
- 0, NULL, NULL, 0);
+ NULL);
pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
- 0, NULL, NULL, 0);
+ NULL);
pool_init(&pf_sport_pl, sizeof(struct pf_port_node), 0, 0, 0, "pfsport",
- 0, NULL, NULL, 0);
+ NULL);
TAILQ_INIT(&pf_rules[0]);
TAILQ_INIT(&pf_rules[1]);
diff --git a/sys/net/pf_norm.c b/sys/net/pf_norm.c
index c5bacc9c8d7..2edd0c11ff2 100644
--- a/sys/net/pf_norm.c
+++ b/sys/net/pf_norm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_norm.c,v 1.16 2001/12/03 22:25:06 dhartmei Exp $ */
+/* $OpenBSD: pf_norm.c,v 1.17 2002/01/23 00:39:48 art Exp $ */
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
@@ -122,9 +122,9 @@ void
pf_normalize_init(void)
{
pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
- 0, NULL, NULL, 0);
+ NULL);
pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
- 0, NULL, NULL, 0);
+ NULL);
pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
diff --git a/sys/net/pfkeyv2.c b/sys/net/pfkeyv2.c
index 123be415ca4..7dac1fb3b11 100644
--- a/sys/net/pfkeyv2.c
+++ b/sys/net/pfkeyv2.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pfkeyv2.c,v 1.77 2001/08/05 11:03:07 angelos Exp $ */
+/* $OpenBSD: pfkeyv2.c,v 1.78 2002/01/23 00:39:48 art Exp $ */
/*
* @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
@@ -1543,8 +1543,7 @@ pfkeyv2_send(struct socket *socket, void *message, int len)
{
ipsec_policy_pool_initialized = 1;
pool_init(&ipsec_policy_pool, sizeof(struct ipsec_policy),
- 0, 0, PR_FREEHEADER, "ipsec policy", 0, NULL,
- NULL, M_IPSEC_POLICY);
+ 0, 0, PR_FREEHEADER, "ipsec policy", NULL);
}
/* Allocate policy entry */
diff --git a/sys/net/route.c b/sys/net/route.c
index 9813d14d614..ccef36f9320 100644
--- a/sys/net/route.c
+++ b/sys/net/route.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: route.c,v 1.27 2001/12/18 23:07:49 deraadt Exp $ */
+/* $OpenBSD: route.c,v 1.28 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */
/*
@@ -886,7 +886,7 @@ rt_timer_init()
#if 0
pool_init(&rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl",
- 0, NULL, NULL, M_RTABLE);
+ NULL);
#endif
LIST_INIT(&rttimer_queue_head);
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 49e575f136a..d56f81437e0 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ip_input.c,v 1.96 2001/12/10 12:05:40 ho Exp $ */
+/* $OpenBSD: ip_input.c,v 1.97 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $ */
/*
@@ -225,7 +225,7 @@ ip_init()
const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP;
pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 0, 0, "ipqepl",
- 0, NULL, NULL, M_IPQ);
+ NULL);
pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
if (pr == 0)
diff --git a/sys/netinet/ip_spd.c b/sys/netinet/ip_spd.c
index e8b02490009..0ec0ed825b4 100644
--- a/sys/netinet/ip_spd.c
+++ b/sys/netinet/ip_spd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ip_spd.c,v 1.41 2002/01/02 20:35:40 deraadt Exp $ */
+/* $OpenBSD: ip_spd.c,v 1.42 2002/01/23 00:39:48 art Exp $ */
/*
* The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
*
@@ -615,8 +615,7 @@ ipsec_add_policy(struct sockaddr_encap *dst, struct sockaddr_encap *mask,
if (ipsec_policy_pool_initialized == 0) {
ipsec_policy_pool_initialized = 1;
pool_init(&ipsec_policy_pool, sizeof(struct ipsec_policy),
- 0, 0, PR_FREEHEADER, "ipsec policy", 0, NULL, NULL,
- M_IPSEC_POLICY);
+ 0, 0, PR_FREEHEADER, "ipsec policy", NULL);
}
ipon = pool_get(&ipsec_policy_pool, 0);
@@ -708,8 +707,7 @@ ipsp_acquire_sa(struct ipsec_policy *ipo, union sockaddr_union *gw,
if (ipsec_acquire_pool_initialized == 0) {
ipsec_acquire_pool_initialized = 1;
pool_init(&ipsec_acquire_pool, sizeof(struct ipsec_acquire),
- 0, 0, PR_FREEHEADER, "ipsec acquire", 0, NULL,
- NULL, M_IPSEC_POLICY);
+ 0, 0, PR_FREEHEADER, "ipsec acquire", NULL);
}
ipa = pool_get(&ipsec_acquire_pool, 0);
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index d32a1252028..05bc7068064 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: tcp_subr.c,v 1.55 2002/01/15 19:18:01 provos Exp $ */
+/* $OpenBSD: tcp_subr.c,v 1.56 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $ */
/*
@@ -160,10 +160,10 @@ tcp_init()
tcp_iss = 1; /* wrong */
#endif /* TCP_COMPAT_42 */
pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
- 0, NULL, NULL, M_PCB);
+ NULL);
#ifdef TCP_SACK
pool_init(&sackhl_pool, sizeof(struct sackhole), 0, 0, 0, "sackhlpl",
- 0, NULL, NULL, M_PCB);
+ NULL);
#endif /* TCP_SACK */
in_pcbinit(&tcbtable, tcbhashsize);
tcp_now = arc4random() / 2;
diff --git a/sys/nfs/nfs_node.c b/sys/nfs/nfs_node.c
index 00eb790fe97..9d858658dc7 100644
--- a/sys/nfs/nfs_node.c
+++ b/sys/nfs/nfs_node.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: nfs_node.c,v 1.20 2002/01/16 21:51:16 ericj Exp $ */
+/* $OpenBSD: nfs_node.c,v 1.21 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: nfs_node.c,v 1.16 1996/02/18 11:53:42 fvdl Exp $ */
/*
@@ -79,7 +79,7 @@ nfs_nhinit()
lockinit(&nfs_hashlock, PINOD, "nfs_hashlock", 0, 0);
pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_NFSNODE);
+ &pool_allocator_nointr);
}
/*
diff --git a/sys/scsi/scsi_base.c b/sys/scsi/scsi_base.c
index 17d76b6c196..3c23a215da5 100644
--- a/sys/scsi/scsi_base.c
+++ b/sys/scsi/scsi_base.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: scsi_base.c,v 1.31 2001/08/25 19:29:16 fgsch Exp $ */
+/* $OpenBSD: scsi_base.c,v 1.32 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: scsi_base.c,v 1.43 1997/04/02 02:29:36 mycroft Exp $ */
/*
@@ -75,7 +75,7 @@ scsi_init()
/* Initialize the scsi_xfer pool. */
pool_init(&scsi_xfer_pool, sizeof(struct scsi_xfer), 0,
- 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
+ 0, 0, "scxspl", NULL);
}
/*
diff --git a/sys/sys/pool.h b/sys/sys/pool.h
index dcd618f6224..d79e1da39f6 100644
--- a/sys/sys/pool.h
+++ b/sys/sys/pool.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pool.h,v 1.4 2001/06/24 16:00:46 art Exp $ */
+/* $OpenBSD: pool.h,v 1.5 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */
/*-
@@ -93,6 +93,21 @@ struct pool_cache {
unsigned long pc_nitems; /* # objects currently in cache */
};
+struct pool_allocator {
+ void *(*pa_alloc)(struct pool *, int);
+ void (*pa_free)(struct pool *, void *);
+ int pa_pagesz;
+
+ /* The following fields are for internal use only */
+ struct simplelock pa_slock;
+ TAILQ_HEAD(,pool) pa_list;
+ int pa_flags;
+#define PA_INITIALIZED 0x01
+#define PA_WANT 0x02 /* wakeup any sleeping pools on free */
+ int pa_pagemask;
+ int pa_pageshift;
+};
+
struct pool {
TAILQ_ENTRY(pool)
pr_poollist;
@@ -108,9 +123,6 @@ struct pool {
unsigned int pr_minpages; /* same in page units */
unsigned int pr_maxpages; /* maximum # of pages to keep */
unsigned int pr_npages; /* # of pages allocated */
- unsigned int pr_pagesz; /* page size, must be 2^n */
- unsigned long pr_pagemask; /* abbrev. of above */
- unsigned int pr_pageshift; /* shift corr. to above */
unsigned int pr_itemsperpage;/* # items that fit in a page */
unsigned int pr_slack; /* unused space in a page */
unsigned int pr_nitems; /* number of available items in pool */
@@ -118,9 +130,8 @@ struct pool {
unsigned int pr_hardlimit; /* hard limit to number of allocated
items */
unsigned int pr_serial; /* unique serial number of the pool */
- void *(*pr_alloc)(unsigned long, int, int);
- void (*pr_free)(void *, unsigned long, int);
- int pr_mtype; /* memory allocator tag */
+ struct pool_allocator *pr_alloc;/* backend allocator */
+ TAILQ_ENTRY(pool) pr_alloc_list;/* list of pools using this allocator */
const char *pr_wchan; /* tsleep(9) identifier */
unsigned int pr_flags; /* r/w flags */
unsigned int pr_roflags; /* r/o flags */
@@ -130,7 +141,6 @@ struct pool {
#define PR_WANTED 4
#define PR_STATIC 8
#define PR_FREEHEADER 16
-#define PR_URGENT 32
#define PR_PHINPAGE 64
#define PR_LOGGING 128
#define PR_LIMITFAIL 256 /* even if waiting, fail if we hit limit */
@@ -185,16 +195,21 @@ struct pool {
#endif /* __POOL_EXPOSE */
#ifdef _KERNEL
+/*
+ * Alternate pool page allocator, provided for pools that know they
+ * will never be accessed in interrupt context.
+ */
+extern struct pool_allocator pool_allocator_nointr;
+/* Standard pool allocator, provided here for reference. */
+extern struct pool_allocator pool_allocator_kmem;
+
void pool_init(struct pool *, size_t, u_int, u_int,
- int, const char *, size_t,
- void *(*)__P((unsigned long, int, int)),
- void (*)__P((void *, unsigned long, int)),
- int);
+ int, const char *, struct pool_allocator *);
void pool_destroy(struct pool *);
void *pool_get(struct pool *, int);
void pool_put(struct pool *, void *);
-void pool_reclaim(struct pool *);
+int pool_reclaim(struct pool *);
#ifdef POOL_DIAGNOSTIC
/*
@@ -223,13 +238,6 @@ void pool_printit(struct pool *, const char *,
int pool_chk(struct pool *, const char *);
/*
- * Alternate pool page allocator, provided for pools that know they
- * will never be accessed in interrupt context.
- */
-void *pool_page_alloc_nointr(unsigned long, int, int);
-void pool_page_free_nointr(void *, unsigned long, int);
-
-/*
* Pool cache routines.
*/
void pool_cache_init(struct pool_cache *, struct pool *,
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index 8aec3d7de20..4fd34932ec3 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ffs_vfsops.c,v 1.48 2001/12/19 08:58:07 art Exp $ */
+/* $OpenBSD: ffs_vfsops.c,v 1.49 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: ffs_vfsops.c,v 1.19 1996/02/09 22:22:26 christos Exp $ */
/*
@@ -1325,7 +1325,7 @@ ffs_init(vfsp)
return (0);
done = 1;
pool_init(&ffs_ino_pool, sizeof(struct inode), 0, 0, 0, "ffsino",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
+ &pool_allocator_nointr);
softdep_initialize();
return (ufs_init(vfsp));
}
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index b0e2c00bc50..f24130c93dd 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.20 2002/01/15 20:09:56 art Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.21 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -157,8 +157,7 @@ amap_init()
* Initialize the vm_amap pool.
*/
pool_init(&uvm_amap_pool, sizeof(struct vm_amap), 0, 0, 0,
- "amappl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
- M_UVMAMAP);
+ "amappl", &pool_allocator_nointr);
}
/*
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 9a7f135cb98..828c2658c2b 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.24 2001/12/19 08:58:07 art Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.25 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -582,11 +582,10 @@ uao_init()
* kernel map!
*/
pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
- 0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
+ 0, 0, 0, "uaoeltpl", &pool_allocator_nointr);
pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
- "aobjpl", 0,
- pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
+ "aobjpl", &pool_allocator_nointr);
}
/*
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 710211c79ae..f30fae1c6da 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.36 2002/01/02 22:23:25 miod Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.37 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -355,11 +355,9 @@ uvm_map_init()
* initialize the map-related pools.
*/
pool_init(&uvm_vmspace_pool, sizeof(struct vmspace),
- 0, 0, 0, "vmsppl", 0,
- pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP);
+ 0, 0, 0, "vmsppl", &pool_allocator_nointr);
pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry),
- 0, 0, 0, "vmmpepl", 0,
- pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP);
+ 0, 0, 0, "vmmpepl", &pool_allocator_nointr);
}
/*
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index 40eca607eae..a881d3f7eb9 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_swap.c,v 1.48 2002/01/02 22:23:25 miod Exp $ */
+/* $OpenBSD: uvm_swap.c,v 1.49 2002/01/23 00:39:48 art Exp $ */
/* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */
/*
@@ -302,10 +302,10 @@ uvm_swap_init()
pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0, "swp vnx",
- 0, NULL, NULL, 0);
+ NULL);
- pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0, "swp vnd", 0,
- NULL, NULL, 0);
+ pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0, "swp vnd",
+ NULL);
/*
* Setup the initial swap partition