diff options
author | Ariane van der Steldt <ariane@cvs.openbsd.org> | 2012-03-09 13:01:30 +0000 |
---|---|---|
committer | Ariane van der Steldt <ariane@cvs.openbsd.org> | 2012-03-09 13:01:30 +0000 |
commit | 193e3efb70083a72f3d299ea5f129cf83d547115 (patch) | |
tree | e7c9b8d210fe2b29062f1cf3a40c093bdf14800d /sys/arch | |
parent | dbaaf4ad89f61a154abf6b48600210ec50ecb62c (diff) |
New vmmap implementation.
no oks (it is really a pain to review properly)
extensively tested, I'm confident it'll be stable
'now is the time' from several icb inhabitants
Diff provides:
- ability to specify different allocators for different regions/maps
- a simpler implementation of the current allocator
- currently in compatibility mode: it will generate similar addresses
as the old allocator
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/i386/i386/pmap.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index 2cdfba314d7..f8f05cb8b88 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.156 2012/02/19 17:14:28 kettenis Exp $ */ +/* $OpenBSD: pmap.c,v 1.157 2012/03/09 13:01:28 ariane Exp $ */ /* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */ /* @@ -602,14 +602,16 @@ pmap_exec_fixup(struct vm_map *map, struct trapframe *tf, struct pcb *pcb) vaddr_t va = 0; vm_map_lock(map); - for (ent = (&map->header)->next; ent != &map->header; ent = ent->next) { - /* - * This entry has greater va than the entries before. - * We need to make it point to the last page, not past it. - */ + RB_FOREACH_REVERSE(ent, uvm_map_addr, &map->addr) { if (ent->protection & VM_PROT_EXECUTE) - va = trunc_page(ent->end - 1); + break; } + /* + * This entry has greater va than the entries before. + * We need to make it point to the last page, not past it. + */ + if (ent) + va = trunc_page(ent->end - 1); vm_map_unlock(map); if (va <= pm->pm_hiexec) { @@ -1244,7 +1246,7 @@ pmap_free_pvpage(void) { int s; struct vm_map *map; - struct vm_map_entry *dead_entries; + struct uvm_map_deadq dead_entries; struct pv_page *pvp; s = splvm(); /* protect kmem_map */ @@ -1265,13 +1267,12 @@ pmap_free_pvpage(void) TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list); /* unmap the page */ - dead_entries = NULL; + TAILQ_INIT(&dead_entries); uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE, - &dead_entries, NULL, FALSE); + &dead_entries, FALSE, TRUE); vm_map_unlock(map); - if (dead_entries != NULL) - uvm_unmap_detach(dead_entries, 0); + uvm_unmap_detach(&dead_entries, 0); pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */ } |