summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_device.c
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-06-16 00:11:30 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-06-16 00:11:30 +0000
commitb20700966027364e7e2e3cf5ca4613cbb4e2a25b (patch)
treedac29c9a1582e023159a8aabe2282775b21cbdc2 /sys/uvm/uvm_device.c
parentab37797a62467132f94babf9bc9d57cef8402599 (diff)
Backout all changes to uvm after pmemrange (which will be backed out
separately). a change at or just before the hackathon has either exposed or added a very very nasty memory corruption bug that is giving us hell right now. So in the interest of kernel stability these diffs are being backed out until such a time as that corruption bug has been found and squashed, then the ones that are proven good may slowly return. a quick hitlist of the main commits this backs out: mine: uvm_objwire the lock change in uvm_swap.c using trees for uvm objects instead of the hash removing the pgo_releasepg callback. art@'s: putting pmap_page_protect(VM_PROT_NONE) in uvm_pagedeactivate() since all callers called that just prior anyway. ok beck@, ariane@. prompted by deraadt@.
Diffstat (limited to 'sys/uvm/uvm_device.c')
-rw-r--r--sys/uvm/uvm_device.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index a3743490068..26b6976b266 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.33 2009/06/02 23:00:19 oga Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.34 2009/06/16 00:11:29 oga Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -227,7 +227,7 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
simple_lock_init(&udv->u_obj.vmobjlock);
udv->u_obj.pgops = &uvm_deviceops;
- RB_INIT(&udv->u_obj.memt);
+ TAILQ_INIT(&udv->u_obj.memq);
udv->u_obj.uo_npages = 0;
udv->u_obj.uo_refs = 1;
udv->u_flags = 0;
@@ -287,7 +287,7 @@ again:
uobj,uobj->uo_refs,0,0);
return;
}
- KASSERT(uobj->uo_npages == 0 && RB_EMPTY(&uobj->memt));
+ KASSERT(uobj->uo_npages == 0 && TAILQ_EMPTY(&uobj->memq));
/*
* is it being held? if so, wait until others are done.