summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_device.c
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-06-02 23:00:20 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-06-02 23:00:20 +0000
commit6e1481035fcdd30fd7cc0e2650ad2e51cabb8d9d (patch)
treec0a17586063eb7ab1d1f02beb45592aa9705ab6c /sys/uvm/uvm_device.c
parenta35c336f9ebdea1410b999c9869e68c40354c1e3 (diff)
Instead of the global hash table with the terrible hashfunction and a
global lock, switch the uvm object pages to being kept in a per-object RB_TREE. Right now this is approximately the same speed, but cleaner. When biglock usage is reduced this will improve concurrency due to lock contention.. ok beck@ art@. Thanks to jasper for the speed testing.
Diffstat (limited to 'sys/uvm/uvm_device.c')
-rw-r--r--sys/uvm/uvm_device.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index 1013f46d20a..a3743490068 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.32 2009/05/12 20:49:56 oga Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.33 2009/06/02 23:00:19 oga Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -227,7 +227,7 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
simple_lock_init(&udv->u_obj.vmobjlock);
udv->u_obj.pgops = &uvm_deviceops;
- TAILQ_INIT(&udv->u_obj.memq);
+ RB_INIT(&udv->u_obj.memt);
udv->u_obj.uo_npages = 0;
udv->u_obj.uo_refs = 1;
udv->u_flags = 0;
@@ -287,7 +287,7 @@ again:
uobj,uobj->uo_refs,0,0);
return;
}
- KASSERT(uobj->uo_npages == 0 && TAILQ_EMPTY(&uobj->memq));
+ KASSERT(uobj->uo_npages == 0 && RB_EMPTY(&uobj->memt));
/*
* is it being held? if so, wait until others are done.