summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_map.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_map.c')
-rw-r--r--sys/uvm/uvm_map.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index e418e9bbeb6..938fb3edd16 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.120 2009/08/06 15:28:14 oga Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.121 2009/08/13 20:40:13 ariane Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -724,6 +724,38 @@ uvm_map_p(struct vm_map *map, vaddr_t *startp, vsize_t size,
map, *startp, size, flags);
UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, (u_long)uoffset,0,0);
+#ifdef KVA_GUARDPAGES
+ if (map == kernel_map) {
+ /*
+ * kva_guardstart is initialized to the start of the kernelmap
+ * and cycles through the kva space.
+ * This way we should have a long time between re-use of kva.
+ */
+ static vaddr_t kva_guardstart = 0;
+ if (kva_guardstart == 0) {
+ kva_guardstart = vm_map_min(map);
+ printf("uvm_map: kva guard pages enabled: %p\n",
+ kva_guardstart);
+ }
+ size += PAGE_SIZE; /* Add guard page at the end. */
+ /*
+ * Try to fully exhaust kva prior to wrap-around.
+ * (This may eat your ram!)
+ */
+ if (VM_MAX_KERNEL_ADDRESS < (kva_guardstart + size)) {
+ static int wrap_counter = 0;
+ printf("uvm_map: kva guard page wrap-around %d\n",
+ ++wrap_counter);
+ kva_guardstart = vm_map_min(map);
+ }
+ *startp = kva_guardstart;
+ /*
+ * Prepare for next round.
+ */
+ kva_guardstart += size;
+ }
+#endif
+
uvm_tree_sanity(map, "map entry");
if ((map->flags & VM_MAP_INTRSAFE) == 0)
@@ -1424,6 +1456,12 @@ uvm_unmap_p(vm_map_t map, vaddr_t start, vaddr_t end, struct proc *p)
UVMHIST_LOG(maphist, " (map=%p, start=0x%lx, end=0x%lx)",
map, start, end, 0);
+
+#ifdef KVA_GUARDPAGES
+ if (map == kernel_map)
+ end += PAGE_SIZE; /* Add guardpage. */
+#endif
+
/*
* work now done by helper functions. wipe the pmap's and then
* detach from the dead entries...