summaryrefslogtreecommitdiff
path: root/sys/kern/subr_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/subr_pool.c')
-rw-r--r--sys/kern/subr_pool.c41
1 files changed, 13 insertions, 28 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index ea14aa06902..95197120db1 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.197 2016/09/15 01:24:08 dlg Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.198 2016/09/15 02:00:16 dlg Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -220,14 +220,13 @@ pr_find_pagehead(struct pool *pp, void *v)
* static pools that must be initialized before malloc() is available.
*/
void
-pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
+pool_init(struct pool *pp, size_t size, u_int align, int ipl, int flags,
const char *wchan, struct pool_allocator *palloc)
{
int off = 0, space;
unsigned int pgsize = PAGE_SIZE, items;
#ifdef DIAGNOSTIC
struct pool *iter;
- KASSERT(ioff == 0);
#endif
if (align == 0)
@@ -313,15 +312,14 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp->pr_hiwat = 0;
pp->pr_nidle = 0;
- pp->pr_ipl = -1;
- mtx_init(&pp->pr_mtx, IPL_NONE);
- mtx_init(&pp->pr_requests_mtx, IPL_NONE);
+ pp->pr_ipl = ipl;
+ mtx_init(&pp->pr_mtx, pp->pr_ipl);
+ mtx_init(&pp->pr_requests_mtx, pp->pr_ipl);
TAILQ_INIT(&pp->pr_requests);
if (phpool.pr_size == 0) {
- pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
- 0, "phpool", NULL);
- pool_setipl(&phpool, IPL_HIGH);
+ pool_init(&phpool, sizeof(struct pool_item_header), 0,
+ IPL_HIGH, 0, "phpool", NULL);
/* make sure phpool wont "recurse" */
KASSERT(POOL_INPGHDR(&phpool));
@@ -348,14 +346,6 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
rw_exit_write(&pool_lock);
}
-void
-pool_setipl(struct pool *pp, int ipl)
-{
- pp->pr_ipl = ipl;
- mtx_init(&pp->pr_mtx, ipl);
- mtx_init(&pp->pr_requests_mtx, ipl);
-}
-
/*
* Decommission a pool resource.
*/
@@ -433,7 +423,6 @@ pool_get(struct pool *pp, int flags)
KASSERT(flags & (PR_WAITOK | PR_NOWAIT));
-
mtx_enter(&pp->pr_mtx);
if (pp->pr_nout >= pp->pr_hardlimit) {
if (ISSET(flags, PR_NOWAIT|PR_LIMITFAIL))
@@ -449,8 +438,8 @@ pool_get(struct pool *pp, int flags)
if (v == NULL) {
struct pool_get_memory mem = {
- MUTEX_INITIALIZER((pp->pr_ipl == -1) ?
- IPL_NONE : pp->pr_ipl), NULL };
+ MUTEX_INITIALIZER(pp->pr_ipl),
+ NULL };
struct pool_request pr;
pool_request_init(&pr, pool_get_done, &mem);
@@ -552,8 +541,7 @@ pool_do_get(struct pool *pp, int flags, int *slowdown)
MUTEX_ASSERT_LOCKED(&pp->pr_mtx);
- if (pp->pr_ipl != -1)
- splassert(pp->pr_ipl);
+ splassert(pp->pr_ipl);
/*
* Account for this item now to avoid races if we need to give up
@@ -645,8 +633,7 @@ pool_put(struct pool *pp, void *v)
mtx_enter(&pp->pr_mtx);
- if (pp->pr_ipl != -1)
- splassert(pp->pr_ipl);
+ splassert(pp->pr_ipl);
ph = pr_find_pagehead(pp, v);
@@ -1327,8 +1314,7 @@ sysctl_dopool(int *name, u_int namelen, char *oldp, size_t *oldlenp)
case KERN_POOL_POOL:
memset(&pi, 0, sizeof(pi));
- if (pp->pr_ipl != -1)
- mtx_enter(&pp->pr_mtx);
+ mtx_enter(&pp->pr_mtx);
pi.pr_size = pp->pr_size;
pi.pr_pgsize = pp->pr_pgsize;
pi.pr_itemsperpage = pp->pr_itemsperpage;
@@ -1345,8 +1331,7 @@ sysctl_dopool(int *name, u_int namelen, char *oldp, size_t *oldlenp)
pi.pr_npagefree = pp->pr_npagefree;
pi.pr_hiwat = pp->pr_hiwat;
pi.pr_nidle = pp->pr_nidle;
- if (pp->pr_ipl != -1)
- mtx_leave(&pp->pr_mtx);
+ mtx_leave(&pp->pr_mtx);
rv = sysctl_rdstruct(oldp, oldlenp, NULL, &pi, sizeof(pi));
break;