summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_aobj.c4
-rw-r--r--sys/uvm/uvm_device.c4
-rw-r--r--sys/uvm/uvm_map.c6
-rw-r--r--sys/uvm/uvm_object.c4
-rw-r--r--sys/uvm/uvm_object.h8
-rw-r--r--sys/uvm/uvm_page.c12
-rw-r--r--sys/uvm/uvm_page.h4
-rw-r--r--sys/uvm/uvm_pmemrange.c80
-rw-r--r--sys/uvm/uvm_pmemrange.h14
-rw-r--r--sys/uvm/uvm_vnode.c6
10 files changed, 70 insertions, 72 deletions
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 0b2a3503d64..bdee0e30381 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.82 2016/09/15 02:00:18 dlg Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.83 2016/09/16 02:35:42 dlg Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -869,7 +869,7 @@ uao_detach_locked(struct uvm_object *uobj)
* Release swap resources then free the page.
*/
uvm_lock_pageq();
- while((pg = RB_ROOT(&uobj->memt)) != NULL) {
+ while((pg = RBT_ROOT(uvm_objtree, &uobj->memt)) != NULL) {
if (pg->pg_flags & PG_BUSY) {
atomic_setbits_int(&pg->pg_flags, PG_WANTED);
uvm_unlock_pageq();
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index ae91ab2653c..36cb8bf0eb1 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.52 2015/08/28 00:03:54 deraadt Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.53 2016/09/16 02:35:42 dlg Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -232,7 +232,7 @@ again:
uobj->uo_refs--;
return;
}
- KASSERT(uobj->uo_npages == 0 && RB_EMPTY(&uobj->memt));
+ KASSERT(uobj->uo_npages == 0 && RBT_EMPTY(uvm_objtree, &uobj->memt));
/* is it being held? if so, wait until others are done. */
mtx_enter(&udv_lock);
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index d149a5053e5..7b3a51cd1af 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.224 2016/09/16 01:09:53 dlg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.225 2016/09/16 02:35:42 dlg Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -2913,7 +2913,7 @@ uvm_object_printit(uobj, full, pr)
return;
}
(*pr)(" PAGES <pg,offset>:\n ");
- RB_FOREACH(pg, uvm_objtree, &uobj->memt) {
+ RBT_FOREACH(pg, uvm_objtree, &uobj->memt) {
(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
if ((cnt % 3) == 2) {
(*pr)("\n ");
@@ -2975,7 +2975,7 @@ uvm_page_printit(pg, full, pr)
uobj = pg->uobject;
if (uobj) {
(*pr)(" checking object list\n");
- RB_FOREACH(tpg, uvm_objtree, &uobj->memt) {
+ RBT_FOREACH(tpg, uvm_objtree, &uobj->memt) {
if (tpg == pg) {
break;
}
diff --git a/sys/uvm/uvm_object.c b/sys/uvm/uvm_object.c
index 6a666f15965..a326d7147f6 100644
--- a/sys/uvm/uvm_object.c
+++ b/sys/uvm/uvm_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_object.c,v 1.13 2015/08/21 16:04:35 visa Exp $ */
+/* $OpenBSD: uvm_object.c,v 1.14 2016/09/16 02:35:42 dlg Exp $ */
/*
* Copyright (c) 2006 The NetBSD Foundation, Inc.
@@ -50,7 +50,7 @@ void
uvm_objinit(struct uvm_object *uobj, struct uvm_pagerops *pgops, int refs)
{
uobj->pgops = pgops;
- RB_INIT(&uobj->memt);
+ RBT_INIT(uvm_objtree, &uobj->memt);
uobj->uo_npages = 0;
uobj->uo_refs = refs;
}
diff --git a/sys/uvm/uvm_object.h b/sys/uvm/uvm_object.h
index 3c7d9b4b053..0c453ef3d45 100644
--- a/sys/uvm/uvm_object.h
+++ b/sys/uvm/uvm_object.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_object.h,v 1.21 2014/07/11 16:35:40 jsg Exp $ */
+/* $OpenBSD: uvm_object.h,v 1.22 2016/09/16 02:35:42 dlg Exp $ */
/* $NetBSD: uvm_object.h,v 1.11 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -41,7 +41,7 @@
struct uvm_object {
struct uvm_pagerops *pgops; /* pager ops */
- RB_HEAD(uvm_objtree, vm_page) memt; /* pages in object */
+ RBT_HEAD(uvm_objtree, vm_page) memt; /* pages in object */
int uo_npages; /* # of pages in memt */
int uo_refs; /* reference count */
};
@@ -76,8 +76,8 @@ extern struct uvm_pagerops uvm_vnodeops;
extern struct uvm_pagerops uvm_deviceops;
/* For object trees */
-int uvm_pagecmp(struct vm_page *, struct vm_page *);
-RB_PROTOTYPE(uvm_objtree, vm_page, objt, uvm_pagecmp)
+int uvm_pagecmp(const struct vm_page *, const struct vm_page *);
+RBT_PROTOTYPE(uvm_objtree, vm_page, objt, uvm_pagecmp)
#define UVM_OBJ_IS_VNODE(uobj) \
((uobj)->pgops == &uvm_vnodeops)
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 3826a4d7dd5..24a150c859f 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.144 2015/10/30 16:47:01 miod Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.145 2016/09/16 02:35:42 dlg Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -78,10 +78,10 @@
/*
* for object trees
*/
-RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
+RBT_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
int
-uvm_pagecmp(struct vm_page *a, struct vm_page *b)
+uvm_pagecmp(const struct vm_page *a, const struct vm_page *b)
{
return (a->offset < b->offset ? -1 : a->offset > b->offset);
}
@@ -134,7 +134,7 @@ uvm_pageinsert(struct vm_page *pg)
struct vm_page *dupe;
KASSERT((pg->pg_flags & PG_TABLED) == 0);
- dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg);
+ dupe = RBT_INSERT(uvm_objtree, &pg->uobject->memt, pg);
/* not allowed to insert over another page */
KASSERT(dupe == NULL);
atomic_setbits_int(&pg->pg_flags, PG_TABLED);
@@ -150,7 +150,7 @@ static __inline void
uvm_pageremove(struct vm_page *pg)
{
KASSERT(pg->pg_flags & PG_TABLED);
- RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
+ RBT_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
pg->uobject->uo_npages--;
@@ -1203,7 +1203,7 @@ uvm_pagelookup(struct uvm_object *obj, voff_t off)
struct vm_page pg;
pg.offset = off;
- return (RB_FIND(uvm_objtree, &obj->memt, &pg));
+ return (RBT_FIND(uvm_objtree, &obj->memt, &pg));
}
/*
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index 6d74f6d4c47..ca1fc255d3c 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.h,v 1.61 2016/03/09 16:45:43 deraadt Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.62 2016/09/16 02:35:42 dlg Exp $ */
/* $NetBSD: uvm_page.h,v 1.19 2000/12/28 08:24:55 chs Exp $ */
/*
@@ -94,7 +94,7 @@ TAILQ_HEAD(pglist, vm_page);
struct vm_page {
TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
* queue or free list (P) */
- RB_ENTRY(vm_page) objt; /* object tree */
+ RBT_ENTRY(vm_page) objt; /* object tree */
struct vm_anon *uanon; /* anon (P) */
struct uvm_object *uobject; /* object (P) */
diff --git a/sys/uvm/uvm_pmemrange.c b/sys/uvm/uvm_pmemrange.c
index d7ae21f310a..3aff6ee91de 100644
--- a/sys/uvm/uvm_pmemrange.c
+++ b/sys/uvm/uvm_pmemrange.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmemrange.c,v 1.50 2016/01/29 11:50:40 tb Exp $ */
+/* $OpenBSD: uvm_pmemrange.c,v 1.51 2016/09/16 02:35:42 dlg Exp $ */
/*
* Copyright (c) 2009, 2010 Ariane van der Steldt <ariane@stack.nl>
@@ -73,8 +73,6 @@
/* Tree comparators. */
int uvm_pmemrange_addr_cmp(struct uvm_pmemrange *, struct uvm_pmemrange *);
int uvm_pmemrange_use_cmp(struct uvm_pmemrange *, struct uvm_pmemrange *);
-int uvm_pmr_addr_cmp(struct vm_page *, struct vm_page *);
-int uvm_pmr_size_cmp(struct vm_page *, struct vm_page *);
int uvm_pmr_pg_to_memtype(struct vm_page *);
#ifdef DDB
@@ -95,8 +93,8 @@ uvm_pmr_pg_to_memtype(struct vm_page *pg)
}
/* Trees. */
-RB_GENERATE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
-RB_GENERATE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
+RBT_GENERATE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
+RBT_GENERATE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
RB_GENERATE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr,
uvm_pmemrange_addr_cmp);
@@ -206,7 +204,7 @@ uvm_pmemrange_use_cmp(struct uvm_pmemrange *lhs, struct uvm_pmemrange *rhs)
}
int
-uvm_pmr_addr_cmp(struct vm_page *lhs, struct vm_page *rhs)
+uvm_pmr_addr_cmp(const struct vm_page *lhs, const struct vm_page *rhs)
{
paddr_t lhs_addr, rhs_addr;
@@ -217,7 +215,7 @@ uvm_pmr_addr_cmp(struct vm_page *lhs, struct vm_page *rhs)
}
int
-uvm_pmr_size_cmp(struct vm_page *lhs, struct vm_page *rhs)
+uvm_pmr_size_cmp(const struct vm_page *lhs, const struct vm_page *rhs)
{
psize_t lhs_size, rhs_size;
int cmp;
@@ -245,14 +243,14 @@ uvm_pmr_nfindsz(struct uvm_pmemrange *pmr, psize_t sz, int mti)
if (sz == 1 && !TAILQ_EMPTY(&pmr->single[mti]))
return TAILQ_FIRST(&pmr->single[mti]);
- node = RB_ROOT(&pmr->size[mti]);
+ node = RBT_ROOT(uvm_pmr_size, &pmr->size[mti]);
best = NULL;
while (node != NULL) {
if ((node - 1)->fpgsz >= sz) {
best = (node - 1);
- node = RB_LEFT(node, objt);
+ node = RBT_LEFT(uvm_objtree, node);
} else
- node = RB_RIGHT(node, objt);
+ node = RBT_RIGHT(uvm_objtree, node);
}
return best;
}
@@ -271,9 +269,9 @@ uvm_pmr_nextsz(struct uvm_pmemrange *pmr, struct vm_page *pg, int mt)
if (TAILQ_NEXT(pg, pageq) != NULL)
return TAILQ_NEXT(pg, pageq);
else
- npg = RB_MIN(uvm_pmr_size, &pmr->size[mt]);
+ npg = RBT_MIN(uvm_pmr_size, &pmr->size[mt]);
} else
- npg = RB_NEXT(uvm_pmr_size, &pmr->size[mt], pg + 1);
+ npg = RBT_NEXT(uvm_pmr_size, pg + 1);
return npg == NULL ? NULL : npg - 1;
}
@@ -292,11 +290,11 @@ uvm_pmr_pnaddr(struct uvm_pmemrange *pmr, struct vm_page *pg,
{
KASSERT(pg_prev != NULL && pg_next != NULL);
- *pg_next = RB_NFIND(uvm_pmr_addr, &pmr->addr, pg);
+ *pg_next = RBT_NFIND(uvm_pmr_addr, &pmr->addr, pg);
if (*pg_next == NULL)
- *pg_prev = RB_MAX(uvm_pmr_addr, &pmr->addr);
+ *pg_prev = RBT_MAX(uvm_pmr_addr, &pmr->addr);
else
- *pg_prev = RB_PREV(uvm_pmr_addr, &pmr->addr, *pg_next);
+ *pg_prev = RBT_PREV(uvm_pmr_addr, *pg_next);
KDASSERT(*pg_next == NULL ||
VM_PAGE_TO_PHYS(*pg_next) > VM_PAGE_TO_PHYS(pg));
@@ -326,9 +324,9 @@ uvm_pmr_pnaddr(struct uvm_pmemrange *pmr, struct vm_page *pg,
void
uvm_pmr_remove_addr(struct uvm_pmemrange *pmr, struct vm_page *pg)
{
- KDASSERT(RB_FIND(uvm_pmr_addr, &pmr->addr, pg) == pg);
+ KDASSERT(RBT_FIND(uvm_pmr_addr, &pmr->addr, pg) == pg);
KDASSERT(pg->pg_flags & PQ_FREE);
- RB_REMOVE(uvm_pmr_addr, &pmr->addr, pg);
+ RBT_REMOVE(uvm_pmr_addr, &pmr->addr, pg);
pmr->nsegs--;
}
@@ -359,7 +357,7 @@ uvm_pmr_remove_size(struct uvm_pmemrange *pmr, struct vm_page *pg)
} else {
KDASSERT(RB_FIND(uvm_pmr_size, &pmr->size[memtype],
pg + 1) == pg + 1);
- RB_REMOVE(uvm_pmr_size, &pmr->size[memtype], pg + 1);
+ RBT_REMOVE(uvm_pmr_size, &pmr->size[memtype], pg + 1);
}
}
/* Remove from both trees. */
@@ -420,7 +418,7 @@ uvm_pmr_insert_addr(struct uvm_pmemrange *pmr, struct vm_page *pg, int no_join)
}
}
- RB_INSERT(uvm_pmr_addr, &pmr->addr, pg);
+ RBT_INSERT(uvm_pmr_addr, &pmr->addr, pg);
pmr->nsegs++;
@@ -462,7 +460,7 @@ uvm_pmr_insert_size(struct uvm_pmemrange *pmr, struct vm_page *pg)
if (pg->fpgsz == 1)
TAILQ_INSERT_TAIL(&pmr->single[memtype], pg, pageq);
else
- RB_INSERT(uvm_pmr_size, &pmr->size[memtype], pg + 1);
+ RBT_INSERT(uvm_pmr_size, &pmr->size[memtype], pg + 1);
}
/* Insert in both trees. */
struct vm_page *
@@ -1364,14 +1362,14 @@ uvm_pmr_split(paddr_t pageno)
uvm_pmr_assertvalid(drain);
KASSERT(drain->nsegs == 0);
- RB_FOREACH(rebuild, uvm_pmr_addr, &pmr->addr) {
+ RBT_FOREACH(rebuild, uvm_pmr_addr, &pmr->addr) {
if (atop(VM_PAGE_TO_PHYS(rebuild)) >= pageno)
break;
}
if (rebuild == NULL)
- prev = RB_MAX(uvm_pmr_addr, &pmr->addr);
+ prev = RBT_MAX(uvm_pmr_addr, &pmr->addr);
else
- prev = RB_PREV(uvm_pmr_addr, &pmr->addr, rebuild);
+ prev = RBT_PREV(uvm_pmr_addr, rebuild);
KASSERT(prev == NULL || atop(VM_PAGE_TO_PHYS(prev)) < pageno);
/*
@@ -1399,7 +1397,7 @@ uvm_pmr_split(paddr_t pageno)
/* Move free chunks that no longer fall in the range. */
for (; rebuild != NULL; rebuild = next) {
- next = RB_NEXT(uvm_pmr_addr, &pmr->addr, rebuild);
+ next = RBT_NEXT(uvm_pmr_addr, rebuild);
uvm_pmr_remove(pmr, rebuild);
uvm_pmr_insert(drain, rebuild, 1);
@@ -1476,9 +1474,9 @@ uvm_pmr_allocpmr(void)
}
KASSERT(nw != NULL);
memset(nw, 0, sizeof(struct uvm_pmemrange));
- RB_INIT(&nw->addr);
+ RBT_INIT(uvm_pmr_addr, &nw->addr);
for (i = 0; i < UVM_PMR_MEMTYPE_MAX; i++) {
- RB_INIT(&nw->size[i]);
+ RBT_INIT(uvm_pmr_size, &nw->size[i]);
TAILQ_INIT(&nw->single[i]);
}
return nw;
@@ -1554,11 +1552,11 @@ uvm_pmr_isfree(struct vm_page *pg)
pmr = uvm_pmemrange_find(atop(VM_PAGE_TO_PHYS(pg)));
if (pmr == NULL)
return 0;
- r = RB_NFIND(uvm_pmr_addr, &pmr->addr, pg);
+ r = RBT_NFIND(uvm_pmr_addr, &pmr->addr, pg);
if (r == NULL)
- r = RB_MAX(uvm_pmr_addr, &pmr->addr);
+ r = RBT_MAX(uvm_pmr_addr, &pmr->addr);
else if (r != pg)
- r = RB_PREV(uvm_pmr_addr, &pmr->addr, r);
+ r = RBT_PREV(uvm_pmr_addr, r);
if (r == NULL)
return 0; /* Empty tree. */
@@ -1600,9 +1598,9 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
atop(VM_PAGE_TO_PHYS(root)) + root->fpgsz,
start, end)) {
if (direction == 1)
- root = RB_RIGHT(root, objt);
+ root = RBT_RIGHT(uvm_objtree, root);
else
- root = RB_LEFT(root, objt);
+ root = RBT_LEFT(uvm_objtree, root);
}
if (root == NULL || uvm_pmr_pg_to_memtype(root) == memtype)
return root;
@@ -1620,7 +1618,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
* Cache the upper page, so we can page-walk later.
*/
high = root;
- high_next = RB_RIGHT(high, objt);
+ high_next = RBT_RIGHT(uvm_objtree, high);
while (high_next != NULL && PMR_INTERSECTS_WITH(
atop(VM_PAGE_TO_PHYS(high_next)),
atop(VM_PAGE_TO_PHYS(high_next)) + high_next->fpgsz,
@@ -1628,7 +1626,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
high = high_next;
if (uvm_pmr_pg_to_memtype(high) == memtype)
return high;
- high_next = RB_RIGHT(high, objt);
+ high_next = RBT_RIGHT(uvm_objtree, high);
}
/*
@@ -1636,7 +1634,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
* Cache the lower page, so we can page-walk later.
*/
low = root;
- low_next = RB_LEFT(low, objt);
+ low_next = RBT_LEFT(uvm_objtree, low);
while (low_next != NULL && PMR_INTERSECTS_WITH(
atop(VM_PAGE_TO_PHYS(low_next)),
atop(VM_PAGE_TO_PHYS(low_next)) + low_next->fpgsz,
@@ -1644,16 +1642,16 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
low = low_next;
if (uvm_pmr_pg_to_memtype(low) == memtype)
return low;
- low_next = RB_LEFT(low, objt);
+ low_next = RBT_LEFT(uvm_objtree, low);
}
if (low == high)
return NULL;
/* No hits. Walk the address tree until we find something usable. */
- for (low = RB_NEXT(uvm_pmr_addr, &pmr->addr, low);
+ for (low = RBT_NEXT(uvm_pmr_addr, low);
low != high;
- low = RB_NEXT(uvm_pmr_addr, &pmr->addr, low)) {
+ low = RBT_NEXT(uvm_pmr_addr, low)) {
KDASSERT(PMR_IS_SUBRANGE_OF(atop(VM_PAGE_TO_PHYS(low)),
atop(VM_PAGE_TO_PHYS(low)) + low->fpgsz,
start, end));
@@ -1719,7 +1717,7 @@ uvm_pmr_get1page(psize_t count, int memtype_init, struct pglist *result,
* Note that a size tree gives pg[1] instead of
* pg[0].
*/
- found = RB_MIN(uvm_pmr_size,
+ found = RBT_MIN(uvm_pmr_size,
&pmr->size[memtype]);
if (found != NULL) {
found--;
@@ -1735,7 +1733,7 @@ uvm_pmr_get1page(psize_t count, int memtype_init, struct pglist *result,
* Try address-guided search to meet the page
* number constraints.
*/
- found = RB_ROOT(&pmr->addr);
+ found = RBT_ROOT(uvm_pmr_addr, &pmr->addr);
if (found != NULL) {
found = uvm_pmr_rootupdate(pmr, found,
start, end, memtype);
@@ -1858,14 +1856,14 @@ uvm_pmr_print(void)
useq_len++;
free = 0;
for (mt = 0; mt < UVM_PMR_MEMTYPE_MAX; mt++) {
- pg = RB_MAX(uvm_pmr_size, &pmr->size[mt]);
+ pg = RBT_MAX(uvm_pmr_size, &pmr->size[mt]);
if (pg != NULL)
pg--;
else
pg = TAILQ_FIRST(&pmr->single[mt]);
size[mt] = (pg == NULL ? 0 : pg->fpgsz);
- RB_FOREACH(pg, uvm_pmr_addr, &pmr->addr)
+ RBT_FOREACH(pg, uvm_pmr_addr, &pmr->addr)
free += pg->fpgsz;
}
diff --git a/sys/uvm/uvm_pmemrange.h b/sys/uvm/uvm_pmemrange.h
index 43f662b77d5..31a2068d12e 100644
--- a/sys/uvm/uvm_pmemrange.h
+++ b/sys/uvm/uvm_pmemrange.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmemrange.h,v 1.12 2015/02/05 23:51:06 mpi Exp $ */
+/* $OpenBSD: uvm_pmemrange.h,v 1.13 2016/09/16 02:35:42 dlg Exp $ */
/*
* Copyright (c) 2009 Ariane van der Steldt <ariane@stack.nl>
@@ -23,8 +23,8 @@
#ifndef _UVM_UVM_PMEMRANGE_H_
#define _UVM_UVM_PMEMRANGE_H_
-RB_HEAD(uvm_pmr_addr, vm_page);
-RB_HEAD(uvm_pmr_size, vm_page);
+RBT_HEAD(uvm_pmr_addr, vm_page);
+RBT_HEAD(uvm_pmr_size, vm_page);
/*
* Page types available:
@@ -124,11 +124,11 @@ int uvm_pmr_isfree(struct vm_page *pg);
* Internal tree logic.
*/
-int uvm_pmr_addr_cmp(struct vm_page *, struct vm_page *);
-int uvm_pmr_size_cmp(struct vm_page *, struct vm_page *);
+int uvm_pmr_addr_cmp(const struct vm_page *, const struct vm_page *);
+int uvm_pmr_size_cmp(const struct vm_page *, const struct vm_page *);
-RB_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
-RB_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
+RBT_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
+RBT_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
RB_PROTOTYPE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr,
uvm_pmemrange_addr_cmp);
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index a34de75e723..2abfac2f14d 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_vnode.c,v 1.92 2016/03/19 12:04:16 natano Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.93 2016/09/16 02:35:42 dlg Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
@@ -362,7 +362,7 @@ uvn_detach(struct uvm_object *uobj)
if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
LIST_REMOVE(uvn, u_wlist);
}
- KASSERT(RB_EMPTY(&uobj->memt));
+ KASSERT(RBT_EMPTY(uvm_objtree, &uobj->memt));
oldflags = uvn->u_flags;
uvn->u_flags = 0;
@@ -462,7 +462,7 @@ uvm_vnp_terminate(struct vnode *vp)
while (uvn->u_obj.uo_npages) {
#ifdef DEBUG
struct vm_page *pp;
- RB_FOREACH(pp, uvm_objtree, &uvn->u_obj.memt) {
+ RBT_FOREACH(pp, uvm_objtree, &uvn->u_obj.memt) {
if ((pp->pg_flags & PG_BUSY) == 0)
panic("uvm_vnp_terminate: detected unbusy pg");
}