summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_object.h
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-08-06 15:28:15 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-08-06 15:28:15 +0000
commit6e7e8975626fe603529c0e60cefa188eeb5c890e (patch)
tree3b3d95f29dc62527a2b78ff502abe9eb379a1445 /sys/uvm/uvm_object.h
parentf32484ba9d7d14a17687020bdcc98d2e3d42a940 (diff)
reintroduce the uvm_tree commit.
Now instead of the global object hashtable, we have a per object tree. Testing shows no performance difference and a slight code shrink. OTOH when locking is more fine grained this should be faster due to lock contention on uvm.hashlock. ok thib@, art@.
Diffstat (limited to 'sys/uvm/uvm_object.h')
-rw-r--r--sys/uvm/uvm_object.h16
1 files changed, 10 insertions, 6 deletions
diff --git a/sys/uvm/uvm_object.h b/sys/uvm/uvm_object.h
index 44a16bfd7c1..b1b9366fe57 100644
--- a/sys/uvm/uvm_object.h
+++ b/sys/uvm/uvm_object.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_object.h,v 1.14 2009/06/17 00:13:59 oga Exp $ */
+/* $OpenBSD: uvm_object.h,v 1.15 2009/08/06 15:28:14 oga Exp $ */
/* $NetBSD: uvm_object.h,v 1.11 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -47,11 +47,11 @@
*/
struct uvm_object {
- simple_lock_data_t vmobjlock; /* lock on memq */
- struct uvm_pagerops *pgops; /* pager ops */
- struct pglist memq; /* pages in this object */
- int uo_npages; /* # of pages in memq */
- int uo_refs; /* reference count */
+ simple_lock_data_t vmobjlock; /* lock on memt */
+ struct uvm_pagerops *pgops; /* pager ops */
+ RB_HEAD(uvm_objtree, vm_page) memt; /* pages in object */
+ int uo_npages; /* # of pages in memt */
+ int uo_refs; /* reference count */
};
/*
@@ -83,6 +83,10 @@ struct uvm_object {
extern struct uvm_pagerops uvm_vnodeops;
extern struct uvm_pagerops uvm_deviceops;
+/* For object trees */
+int uvm_pagecmp(struct vm_page *, struct vm_page *);
+RB_PROTOTYPE(uvm_objtree, vm_page, objt, uvm_pagecmp)
+
#define UVM_OBJ_IS_VNODE(uobj) \
((uobj)->pgops == &uvm_vnodeops)