summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2016-09-15 01:24:09 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2016-09-15 01:24:09 +0000
commite206f795257761b80c3437bfe396b6dd3b533b72 (patch)
tree3e3f0c4601978037f4d9598e6b66322abc281f21 /sys/kern
parent7d37e1bcb6ac494ae54693c8a0e450e8d95ea089 (diff)
move pools to using the subr_tree version of rb trees
this is half way to recovering the space used by the subr_tree code.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/subr_pool.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index ebd51713a8a..ea14aa06902 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.196 2016/09/05 09:04:31 dlg Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.197 2016/09/15 01:24:08 dlg Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -79,7 +79,7 @@ struct pool_item_header {
TAILQ_ENTRY(pool_item_header)
ph_pagelist; /* pool page list */
XSIMPLEQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
- RB_ENTRY(pool_item_header)
+ RBT_ENTRY(pool_item_header)
ph_node; /* Off-page page headers */
int ph_nmissing; /* # of chunks in use */
caddr_t ph_page; /* this page's address */
@@ -165,8 +165,11 @@ struct task pool_gc_task = TASK_INITIALIZER(pool_gc_pages, NULL);
int pool_wait_free = 1;
int pool_wait_gc = 8;
+RBT_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
+
static inline int
-phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
+phtree_compare(const struct pool_item_header *a,
+ const struct pool_item_header *b)
{
vaddr_t va = (vaddr_t)a->ph_page;
vaddr_t vb = (vaddr_t)b->ph_page;
@@ -180,8 +183,7 @@ phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
return (0);
}
-RB_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
-RB_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
+RBT_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
/*
* Return the pool page header based on page address.
@@ -200,7 +202,7 @@ pr_find_pagehead(struct pool *pp, void *v)
}
key.ph_page = v;
- ph = RB_NFIND(phtree, &pp->pr_phtree, &key);
+ ph = RBT_NFIND(phtree, &pp->pr_phtree, &key);
if (ph == NULL)
panic("%s: %s: page header missing", __func__, pp->pr_wchan);
@@ -292,7 +294,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp->pr_hardlimit_ratecap.tv_usec = 0;
pp->pr_hardlimit_warning_last.tv_sec = 0;
pp->pr_hardlimit_warning_last.tv_usec = 0;
- RB_INIT(&pp->pr_phtree);
+ RBT_INIT(phtree, &pp->pr_phtree);
/*
* Use the space between the chunks and the page header
@@ -847,7 +849,7 @@ pool_p_insert(struct pool *pp, struct pool_item_header *ph)
TAILQ_INSERT_TAIL(&pp->pr_emptypages, ph, ph_pagelist);
if (!POOL_INPGHDR(pp))
- RB_INSERT(phtree, &pp->pr_phtree, ph);
+ RBT_INSERT(phtree, &pp->pr_phtree, ph);
pp->pr_nitems += pp->pr_itemsperpage;
pp->pr_nidle++;
@@ -868,7 +870,7 @@ pool_p_remove(struct pool *pp, struct pool_item_header *ph)
pp->pr_nitems -= pp->pr_itemsperpage;
if (!POOL_INPGHDR(pp))
- RB_REMOVE(phtree, &pp->pr_phtree, ph);
+ RBT_REMOVE(phtree, &pp->pr_phtree, ph);
TAILQ_REMOVE(&pp->pr_emptypages, ph, ph_pagelist);
pool_update_curpage(pp);