summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_km.c
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2007-08-03 22:49:08 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2007-08-03 22:49:08 +0000
commit51a49e6860424bd6c7f18ab82b34db1198e4da19 (patch)
tree55d4c54f4f0fa5a93ab3907162cca6854ff88273 /sys/uvm/uvm_km.c
parent766cb6d8e8aa862760d6933a15efdf26e46ca367 (diff)
Don't let pagedaemon wait for pages here. We could trigger this easily
when we hit swap before actually fully populating the buffer cache which would lead to deadlocks. From pedro, tested by many, deraadt@ ok
Diffstat (limited to 'sys/uvm/uvm_km.c')
-rw-r--r--sys/uvm/uvm_km.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index f854f418faf..02f0eb0aa4e 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.63 2007/04/29 15:46:42 art Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.64 2007/08/03 22:49:07 art Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -534,10 +534,20 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
}
simple_unlock(&uvm.kernel_object->vmobjlock);
if (__predict_false(pg == NULL)) {
- uvm_wait("km_alloc1w"); /* wait for memory */
- continue;
+ if (curproc == uvm.pagedaemon_proc) {
+ /*
+ * It is unfeasible for the page daemon to
+ * sleep for memory, so free what we have
+ * allocated and fail.
+ */
+ uvm_unmap(map, kva, loopva - kva);
+ return (NULL);
+ } else {
+ uvm_wait("km_alloc1w"); /* wait for memory */
+ continue;
+ }
}
-
+
/*
* map it in; note we're never called with an intrsafe
* object, so we always use regular old pmap_enter().