summaryrefslogtreecommitdiff
path: root/sys/arch/i386
diff options
context:
space:
mode:
authorAriane van der Steldt <ariane@cvs.openbsd.org>2011-06-06 17:10:24 +0000
committerAriane van der Steldt <ariane@cvs.openbsd.org>2011-06-06 17:10:24 +0000
commit100bcabc80107e37b8efd45890258b7852cfc054 (patch)
treee8b5e2a61d1db84e157951ce84c29f939b88c036 /sys/arch/i386
parentd2a285457d0c89bdbfc900ff091099f892e0e4f4 (diff)
Backout vmmap in order to repair virtual address selection algorithms
outside the tree.
Diffstat (limited to 'sys/arch/i386')
-rw-r--r--sys/arch/i386/i386/pmap.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index 37a85c977e8..2d08e0bd239 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.153 2011/05/24 15:27:36 ariane Exp $ */
+/* $OpenBSD: pmap.c,v 1.154 2011/06/06 17:10:23 ariane Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -604,16 +604,14 @@ pmap_exec_fixup(struct vm_map *map, struct trapframe *tf, struct pcb *pcb)
vaddr_t va = 0;
vm_map_lock(map);
- RB_FOREACH_REVERSE(ent, uvm_map_addr, &map->addr) {
+ for (ent = (&map->header)->next; ent != &map->header; ent = ent->next) {
+ /*
+ * This entry has greater va than the entries before.
+ * We need to make it point to the last page, not past it.
+ */
if (ent->protection & VM_PROT_EXECUTE)
- break;
+ va = trunc_page(ent->end - 1);
}
- /*
- * This entry has greater va than the entries before.
- * We need to make it point to the last page, not past it.
- */
- if (ent)
- va = trunc_page(ent->end - 1);
vm_map_unlock(map);
if (va <= pm->pm_hiexec) {
@@ -1248,7 +1246,7 @@ pmap_free_pvpage(void)
{
int s;
struct vm_map *map;
- struct uvm_map_deadq dead_entries;
+ struct vm_map_entry *dead_entries;
struct pv_page *pvp;
s = splvm(); /* protect kmem_map */
@@ -1269,12 +1267,13 @@ pmap_free_pvpage(void)
TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
/* unmap the page */
- TAILQ_INIT(&dead_entries);
+ dead_entries = NULL;
uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
- &dead_entries, FALSE, TRUE);
+ &dead_entries, NULL, FALSE);
vm_map_unlock(map);
- uvm_unmap_detach(&dead_entries, 0);
+ if (dead_entries != NULL)
+ uvm_unmap_detach(dead_entries, 0);
pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
}