summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_device.c
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-08-06 15:28:15 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-08-06 15:28:15 +0000
commit6e7e8975626fe603529c0e60cefa188eeb5c890e (patch)
tree3b3d95f29dc62527a2b78ff502abe9eb379a1445 /sys/uvm/uvm_device.c
parentf32484ba9d7d14a17687020bdcc98d2e3d42a940 (diff)
reintroduce the uvm_tree commit.
Now instead of the global object hashtable, we have a per object tree. Testing shows no performance difference and a slight code shrink. OTOH when locking is more fine grained this should be faster due to lock contention on uvm.hashlock. ok thib@, art@.
Diffstat (limited to 'sys/uvm/uvm_device.c')
-rw-r--r--sys/uvm/uvm_device.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index 53b28c095e5..27e079934b3 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.35 2009/06/16 23:54:57 oga Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.36 2009/08/06 15:28:14 oga Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -245,7 +245,7 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
simple_lock_init(&udv->u_obj.vmobjlock);
udv->u_obj.pgops = &uvm_deviceops;
- TAILQ_INIT(&udv->u_obj.memq);
+ RB_INIT(&udv->u_obj.memt);
udv->u_obj.uo_npages = 0;
udv->u_obj.uo_refs = 1;
udv->u_flags = 0;
@@ -305,7 +305,7 @@ again:
uobj,uobj->uo_refs,0,0);
return;
}
- KASSERT(uobj->uo_npages == 0 && TAILQ_EMPTY(&uobj->memq));
+ KASSERT(uobj->uo_npages == 0 && RB_EMPTY(&uobj->memt));
/*
* is it being held? if so, wait until others are done.