summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2004-06-02 22:17:23 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2004-06-02 22:17:23 +0000
commit1f21de0d02150532aaca3efc3165ce34e077f149 (patch)
tree7f9cae38d957c2a6db60dad3b838e0e657d63ff2 /sys/kern
parent5a39fe414db9196464fc4176ea67c2fd7c2701d6 (diff)
rearrange the allocators we provide for general use.
the new one remains the default and _nointr. _kmem is restored to its former position, and _oldnointr is introduced. this is to allow some pool users who don't like the new allocator to continue working. testing/ok beck@ cedric@
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/subr_pool.c51
1 files changed, 36 insertions, 15 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index 67ec10797d4..e9114b34990 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.40 2004/05/27 04:55:27 tedu Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.41 2004/06/02 22:17:22 tedu Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -397,7 +397,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
* Check arguments and construct default values.
*/
if (palloc == NULL)
- palloc = &pool_allocator_kmem;
+ palloc = &pool_allocator_nointr;
if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
if (palloc->pa_pagesz == 0)
palloc->pa_pagesz = PAGE_SIZE;
@@ -1933,22 +1933,26 @@ sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep)
*
* Each pool has a backend allocator that handles allocation, deallocation
* and any additional draining that might be needed.
- *
- * We provide two standard allocators.
- * pool_alloc_kmem - the default used when no allocator is specified.
- * pool_alloc_nointr - used for pools that will not be accessed in
- * interrupt context.
*/
+void *pool_page_alloc_kmem(struct pool *, int);
+void pool_page_free_kmem(struct pool *, void *);
+void *pool_page_alloc_oldnointr(struct pool *, int);
+void pool_page_free_oldnointr(struct pool *, void *);
void *pool_page_alloc(struct pool *, int);
void pool_page_free(struct pool *, void *);
-void *pool_page_alloc_nointr(struct pool *, int);
-void pool_page_free_nointr(struct pool *, void *);
+/* old default allocator, interrupt safe */
struct pool_allocator pool_allocator_kmem = {
- pool_page_alloc, pool_page_free, 0,
+ pool_page_alloc_kmem, pool_page_free_kmem, 0,
+};
+/* previous nointr. handles large allocations safely */
+struct pool_allocator pool_allocator_oldnointr = {
+ pool_page_alloc_oldnointr, pool_page_free_oldnointr, 0,
};
+/* safe for interrupts, name preserved for compat
+ * this is the default allocator */
struct pool_allocator pool_allocator_nointr = {
- pool_page_alloc_nointr, pool_page_free_nointr, 0,
+ pool_page_alloc, pool_page_free, 0,
};
/*
@@ -2092,19 +2096,36 @@ pool_page_free(struct pool *pp, void *v)
}
void *
-pool_page_alloc_nointr(struct pool *pp, int flags)
+pool_page_alloc_kmem(struct pool *pp, int flags)
+{
+ boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+
+ return ((void *)uvm_km_alloc_poolpage1(kmem_map, uvmexp.kmem_object,
+ waitok));
+}
+
+void
+pool_page_free_kmem(struct pool *pp, void *v)
+{
+
+ uvm_km_free_poolpage1(kmem_map, (vaddr_t)v);
+}
+
+void *
+pool_page_alloc_oldnointr(struct pool *pp, int flags)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
splassert(IPL_NONE);
- return (uvm_km_getpage(waitok));
+ return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
+ waitok));
}
void
-pool_page_free_nointr(struct pool *pp, void *v)
+pool_page_free_oldnointr(struct pool *pp, void *v)
{
splassert(IPL_NONE);
- uvm_km_putpage(v);
+ uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
}