summaryrefslogtreecommitdiff
path: root/sys/arch/hppa
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-06-16 00:11:30 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-06-16 00:11:30 +0000
commitb20700966027364e7e2e3cf5ca4613cbb4e2a25b (patch)
treedac29c9a1582e023159a8aabe2282775b21cbdc2 /sys/arch/hppa
parentab37797a62467132f94babf9bc9d57cef8402599 (diff)
Backout all changes to uvm after pmemrange (which will be backed out
separately). a change at or just before the hackathon has either exposed or added a very very nasty memory corruption bug that is giving us hell right now. So in the interest of kernel stability these diffs are being backed out until such a time as that corruption bug has been found and squashed, then the ones that are proven good may slowly return. a quick hitlist of the main commits this backs out: mine: uvm_objwire the lock change in uvm_swap.c using trees for uvm objects instead of the hash removing the pgo_releasepg callback. art@'s: putting pmap_page_protect(VM_PROT_NONE) in uvm_pagedeactivate() since all callers called that just prior anyway. ok beck@, ariane@. prompted by deraadt@.
Diffstat (limited to 'sys/arch/hppa')
-rw-r--r--sys/arch/hppa/hppa/pmap.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c
index 5f9d72ab8b9..34e5652adae 100644
--- a/sys/arch/hppa/hppa/pmap.c
+++ b/sys/arch/hppa/hppa/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.136 2009/06/11 20:10:51 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.137 2009/06/16 00:11:29 oga Exp $ */
/*
* Copyright (c) 1998-2004 Michael Shalayeff
@@ -235,7 +235,7 @@ pmap_pde_release(struct pmap *pmap, vaddr_t va, struct vm_page *ptp)
pmap_pde_set(pmap, va, 0);
pmap->pm_stats.resident_count--;
if (pmap->pm_ptphint == ptp)
- pmap->pm_ptphint = RB_ROOT(&pmap->pm_obj.memt);
+ pmap->pm_ptphint = TAILQ_FIRST(&pmap->pm_obj.memq);
ptp->wire_count = 0;
#ifdef DIAGNOSTIC
if (ptp->pg_flags & PG_BUSY)
@@ -470,7 +470,7 @@ pmap_bootstrap(vstart)
bzero(kpm, sizeof(*kpm));
simple_lock_init(&kpm->pm_lock);
kpm->pm_obj.pgops = NULL;
- RB_INIT(&kpm->pm_obj.memt);
+ TAILQ_INIT(&kpm->pm_obj.memq);
kpm->pm_obj.uo_npages = 0;
kpm->pm_obj.uo_refs = 1;
kpm->pm_space = HPPA_SID_KERNEL;
@@ -656,7 +656,7 @@ pmap_create()
simple_lock_init(&pmap->pm_lock);
pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
- RB_INIT(&pmap->pm_obj.memt);
+ TAILQ_INIT(&pmap->pm_obj.memq);
pmap->pm_obj.uo_npages = 0;
pmap->pm_obj.uo_refs = 1;
@@ -698,7 +698,7 @@ pmap_destroy(pmap)
return;
#ifdef DIAGNOSTIC
- while ((pg = RB_ROOT(&pmap->pm_obj.memt))) {
+ while ((pg = TAILQ_FIRST(&pmap->pm_obj.memq))) {
pt_entry_t *pde, *epde;
struct vm_page *sheep;
struct pv_entry *haggis;