diff options
author | Ariane van der Steldt <ariane@cvs.openbsd.org> | 2012-04-19 12:42:04 +0000 |
---|---|---|
committer | Ariane van der Steldt <ariane@cvs.openbsd.org> | 2012-04-19 12:42:04 +0000 |
commit | f9a925e83ff03511e6dc240e9854918d18691220 (patch) | |
tree | edcaecffd1633a2247373586d72033ffb92240be | |
parent | 6d759891dc056714906e392aaf305d2423b93977 (diff) |
Backout misplaced optimization in vmmap.
(That means the misplaced optimization is back in.) It broke mips and
possibly other architectures.
-rw-r--r-- | sys/uvm/uvm_map.c | 111 |
1 files changed, 99 insertions, 12 deletions
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 1081a6dfe0c..6ffe270b394 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.152 2012/04/17 20:22:52 ariane Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.153 2012/04/19 12:42:03 ariane Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -3118,21 +3118,108 @@ uvmspace_share(p1, p2) void uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end) { - int pageable = (p->p_vmspace->vm_map.flags & VM_MAP_PAGEABLE); - struct vmspace *old, *new; + struct vmspace *nvm, *ovm = p->p_vmspace; + struct vm_map *map = &ovm->vm_map; + struct uvm_map_deadq dead_entries; - /* Create new vmspace. */ - new = uvmspace_alloc(start, end, (pageable ? TRUE : FALSE), - TRUE); - old = p->p_vmspace; + KASSERT((start & (vaddr_t)PAGE_MASK) == 0); + KASSERT((end & (vaddr_t)PAGE_MASK) == 0 || + (end & (vaddr_t)PAGE_MASK) == (vaddr_t)PAGE_MASK); pmap_unuse_final(p); /* before stack addresses go away */ - pmap_deactivate(p); - p->p_vmspace = new; - pmap_activate(p); + TAILQ_INIT(&dead_entries); + + /* + * see if more than one process is using this vmspace... + */ + + if (ovm->vm_refcnt == 1) { + /* + * if p is the only process using its vmspace then we can safely + * recycle that vmspace for the program that is being exec'd. + */ + +#ifdef SYSVSHM + /* + * SYSV SHM semantics require us to kill all segments on an exec + */ + if (ovm->vm_shm) + shmexit(ovm); +#endif + + /* + * POSIX 1003.1b -- "lock future mappings" is revoked + * when a process execs another program image. + */ + vm_map_lock(map); + vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); + + /* + * now unmap the old program + * + * Instead of attempting to keep the map valid, we simply + * nuke all entries and ask uvm_map_setup to reinitialize + * the map to the new boundaries. + * + * uvm_unmap_remove will actually nuke all entries for us + * (as in, not replace them with free-memory entries). + */ + uvm_unmap_remove(map, map->min_offset, map->max_offset, + &dead_entries, TRUE, FALSE); + + KDASSERT(RB_EMPTY(&map->addr)); + + /* + * Nuke statistics and boundaries. + */ + bzero(&ovm->vm_startcopy, + (caddr_t) (ovm + 1) - (caddr_t) &ovm->vm_startcopy); - /* Throw away the old vmspace. */ - uvmspace_free(old); + + if (end & (vaddr_t)PAGE_MASK) { + end += 1; + if (end == 0) /* overflow */ + end -= PAGE_SIZE; + } + + /* + * Setup new boundaries and populate map with entries. + */ + map->min_offset = start; + map->max_offset = end; + uvm_map_setup_entries(map); + vm_map_unlock(map); + + /* + * but keep MMU holes unavailable + */ + pmap_remove_holes(map); + + } else { + + /* + * p's vmspace is being shared, so we can't reuse it for p since + * it is still being used for others. allocate a new vmspace + * for p + */ + nvm = uvmspace_alloc(start, end, + (map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE, TRUE); + + /* + * install new vmspace and drop our ref to the old one. + */ + + pmap_deactivate(p); + p->p_vmspace = nvm; + pmap_activate(p); + + uvmspace_free(ovm); + } + + /* + * Release dead entries + */ + uvm_unmap_detach(&dead_entries, 0); } /* |