summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2015-07-17 21:56:15 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2015-07-17 21:56:15 +0000
commit3114589742ba2fbc0026aa0f689b7a0e11286875 (patch)
treef2cf9be71695849c7663fc098867c5ab2e430551
parentc4b97a90058f163c6e6f211c11708ac31db54221 (diff)
Release the kernel lock while tearing down the uvm map in the reaper. Speeds
up workloads that fork a lot of processes and, more importantly reduces latency because it makes sure the reaper doesn't hold on to the kernel lock for long periods of time. This almost certainly breaks MP kernels on alpha, macppc, m88k and sgi; hppa might work, but probably doesn't. ok deraadt@, beck@
-rw-r--r--sys/uvm/uvm_addr.c7
-rw-r--r--sys/uvm/uvm_map.c33
2 files changed, 26 insertions, 14 deletions
diff --git a/sys/uvm/uvm_addr.c b/sys/uvm/uvm_addr.c
index 5b153ed5ba0..1a7377c0d69 100644
--- a/sys/uvm/uvm_addr.c
+++ b/sys/uvm/uvm_addr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_addr.c,v 1.13 2015/03/30 21:08:40 miod Exp $ */
+/* $OpenBSD: uvm_addr.c,v 1.14 2015/07/17 21:56:14 kettenis Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -287,14 +287,19 @@ uvm_addr_init(void)
{
pool_init(&uaddr_pool, sizeof(struct uvm_addr_state),
0, 0, PR_WAITOK, "uaddr", NULL);
+ pool_setipl(&uaddr_pool, IPL_VM);
pool_init(&uaddr_hint_pool, sizeof(struct uaddr_hint_state),
0, 0, PR_WAITOK, "uaddrhint", NULL);
+ pool_setipl(&uaddr_hint_pool, IPL_VM);
pool_init(&uaddr_bestfit_pool, sizeof(struct uaddr_bestfit_state),
0, 0, PR_WAITOK, "uaddrbest", NULL);
+ pool_setipl(&uaddr_bestfit_pool, IPL_VM);
pool_init(&uaddr_pivot_pool, sizeof(struct uaddr_pivot_state),
0, 0, PR_WAITOK, "uaddrpivot", NULL);
+ pool_setipl(&uaddr_pivot_pool, IPL_VM);
pool_init(&uaddr_rnd_pool, sizeof(struct uaddr_rnd_state),
0, 0, PR_WAITOK, "uaddrrnd", NULL);
+ pool_setipl(&uaddr_rnd_pool, IPL_VM);
uaddr_kbootstrap.uaddr_minaddr = PAGE_SIZE;
uaddr_kbootstrap.uaddr_maxaddr = -(vaddr_t)PAGE_SIZE;
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index d136a50f493..2dfd9d5a690 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.191 2015/04/23 00:49:37 dlg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.192 2015/07/17 21:56:14 kettenis Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -1842,8 +1842,10 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
{
/* Unwire removed map entry. */
if (VM_MAPENT_ISWIRED(entry)) {
+ KERNEL_LOCK();
entry->wired_count = 0;
uvm_fault_unwire_locked(map, entry->start, entry->end);
+ KERNEL_UNLOCK();
}
/* Entry-type specific code. */
@@ -2422,18 +2424,20 @@ void
uvm_map_teardown(struct vm_map *map)
{
struct uvm_map_deadq dead_entries;
- int i, waitok = 0;
struct vm_map_entry *entry, *tmp;
#ifdef VMMAP_DEBUG
size_t numq, numt;
#endif
+ int i;
- if ((map->flags & VM_MAP_INTRSAFE) == 0)
- waitok = 1;
- if (waitok) {
- if (rw_enter(&map->lock, RW_NOSLEEP | RW_WRITE) != 0)
- panic("uvm_map_teardown: rw_enter failed on free map");
- }
+ KERNEL_ASSERT_LOCKED();
+ KERNEL_UNLOCK();
+ KERNEL_ASSERT_UNLOCKED();
+
+ KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
+
+ if (rw_enter(&map->lock, RW_NOSLEEP | RW_WRITE) != 0)
+ panic("uvm_map_teardown: rw_enter failed on free map");
/* Remove address selectors. */
uvm_addr_destroy(map->uaddr_exe);
@@ -2466,8 +2470,7 @@ uvm_map_teardown(struct vm_map *map)
if ((entry = RB_ROOT(&map->addr)) != NULL)
DEAD_ENTRY_PUSH(&dead_entries, entry);
while (entry != NULL) {
- if (waitok)
- uvm_pause();
+ sched_pause();
uvm_unmap_kill_entry(map, entry);
if ((tmp = RB_LEFT(entry, daddrs.addr_entry)) != NULL)
DEAD_ENTRY_PUSH(&dead_entries, tmp);
@@ -2477,8 +2480,7 @@ uvm_map_teardown(struct vm_map *map)
entry = TAILQ_NEXT(entry, dfree.deadq);
}
- if (waitok)
- rw_exit(&map->lock);
+ rw_exit(&map->lock);
#ifdef VMMAP_DEBUG
numt = numq = 0;
@@ -2488,7 +2490,10 @@ uvm_map_teardown(struct vm_map *map)
numq++;
KASSERT(numt == numq);
#endif
- uvm_unmap_detach(&dead_entries, waitok ? UVM_PLA_WAITOK : 0);
+ uvm_unmap_detach(&dead_entries, UVM_PLA_WAITOK);
+
+ KERNEL_LOCK();
+
pmap_destroy(map->pmap);
map->pmap = NULL;
}
@@ -3185,6 +3190,8 @@ void
uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t min, vaddr_t max,
boolean_t pageable, boolean_t remove_holes)
{
+ KASSERT(pmap == NULL || pmap == pmap_kernel());
+
if (pmap)
pmap_reference(pmap);
else