summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorPedro Martelletto <pedro@cvs.openbsd.org>2005-09-28 00:24:05 +0000
committerPedro Martelletto <pedro@cvs.openbsd.org>2005-09-28 00:24:05 +0000
commit9f368ce71c70a77488981071f2622c3cdbd650a5 (patch)
tree5e496fcd52f3f9e43e24550d9bf04d11a0489468 /sys
parentbd960b2b5472bc650b8b4b0db04b5c76af1b7452 (diff)
- when we run out of static kernel map entries, grab a fresh page using
the uvm_km_page allocator and use it instead of calling panic() - add a counter to uvmexp so we can keep track of how many map entries we have in use idea from tedu@, long ago, okay deraadt@
Diffstat (limited to 'sys')
-rw-r--r--sys/uvm/uvm_extern.h3
-rw-r--r--sys/uvm/uvm_map.c34
2 files changed, 27 insertions, 10 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index a65dff88d1f..f9f52056fee 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.57 2005/09/12 23:05:06 miod Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.58 2005/09/28 00:24:03 pedro Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -360,6 +360,7 @@ struct uvmexp {
struct uvm_object *kmem_object;
int fpswtch; /* FPU context switches */
+ int kmapent; /* number of kernel map entries */
};
#ifdef _KERNEL
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index e0424a2cc32..e8db18b361d 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.72 2005/06/29 06:07:32 deraadt Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.73 2005/09/28 00:24:03 pedro Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -94,6 +94,9 @@
#include <uvm/uvm_ddb.h>
#endif
+static struct timeval uvm_kmapent_last_warn_time;
+static struct timeval uvm_kmapent_warn_rate = { 10, 0 };
+
struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;
struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;
const char vmmapbsy[] = "vmmapbsy";
@@ -378,22 +381,34 @@ _uvm_tree_sanity(vm_map_t map, const char *name)
struct vm_map_entry *
uvm_mapent_alloc(struct vm_map *map)
{
- struct vm_map_entry *me;
- int s;
+ struct vm_map_entry *me, *ne;
+ int s, i;
UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
if (map->flags & VM_MAP_INTRSAFE || cold) {
s = splvm();
simple_lock(&uvm.kentry_lock);
me = uvm.kentry_free;
- if (me) uvm.kentry_free = me->next;
- simple_unlock(&uvm.kentry_lock);
- splx(s);
if (me == NULL) {
- panic("uvm_mapent_alloc: out of static map entries, "
- "check MAX_KMAPENT (currently %d)",
- MAX_KMAPENT);
+ ne = uvm_km_getpage(0);
+ if (ne == NULL)
+ panic("uvm_mapent_alloc: cannot allocate map "
+ "entry");
+ for (i = 0;
+ i < PAGE_SIZE / sizeof(struct vm_map_entry) - 1;
+ i++)
+ ne[i].next = &ne[i + 1];
+ ne[i].next = NULL;
+ me = ne;
+ if (ratecheck(&uvm_kmapent_last_warn_time,
+ &uvm_kmapent_warn_rate))
+ printf("uvm_mapent_alloc: out of static "
+ "map entries\n");
}
+ uvm.kentry_free = me->next;
+ uvmexp.kmapent++;
+ simple_unlock(&uvm.kentry_lock);
+ splx(s);
me->flags = UVM_MAP_STATIC;
} else if (map == kernel_map) {
splassert(IPL_NONE);
@@ -429,6 +444,7 @@ uvm_mapent_free(struct vm_map_entry *me)
simple_lock(&uvm.kentry_lock);
me->next = uvm.kentry_free;
uvm.kentry_free = me;
+ uvmexp.kmapent--;
simple_unlock(&uvm.kentry_lock);
splx(s);
} else if (me->flags & UVM_MAP_KMEM) {