summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2004-05-27 04:55:29 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2004-05-27 04:55:29 +0000
commit6b7157229e2c9d0e8a932b56aae6383da00f02c9 (patch)
tree39d1dc9d055be3b70591e08db11940e74b8e122d /sys/uvm
parent94536f73ec48910533ea84f9bbbc1922764cb755 (diff)
change uvm_km_getpage to take waitok argument and sleep if appropriate.
change both the nointr and default pool allocators to using uvm_km_getpage. change pools to default to a maxpages value of 8, so they hoard less memory. change mbuf pools to use default pool allocator. pools are now more efficient, use less of kmem_map, and a bit faster. tested mcbride, deraadt, pedro, drahn, miod to work everywhere
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_extern.h4
-rw-r--r--sys/uvm/uvm_km.c33
2 files changed, 20 insertions, 17 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 09a5f357cc9..3efb087c537 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.52 2004/04/28 02:20:58 markus Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.53 2004/05/27 04:55:28 tedu Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -510,7 +510,7 @@ void uvm_km_free_poolpage1(vm_map_t, vaddr_t);
#define uvm_km_alloc_poolpage(waitok) uvm_km_alloc_poolpage1(kmem_map, \
uvmexp.kmem_object, (waitok))
#define uvm_km_free_poolpage(addr) uvm_km_free_poolpage1(kmem_map, (addr))
-void *uvm_km_getpage(void);
+void *uvm_km_getpage(boolean_t);
void uvm_km_putpage(void *);
/* uvm_map.c */
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 9d4ffa0d081..aa9bfce1cfd 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.38 2004/04/28 02:20:58 markus Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.39 2004/05/27 04:55:28 tedu Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -915,11 +915,10 @@ void uvm_km_thread(void *);
void
uvm_km_page_init(void)
{
- struct km_page *head, *page;
+ struct km_page *page;
int i;
- head = NULL;
for (i = 0; i < uvm_km_pages_lowat * 4; i++) {
#if defined(PMAP_MAP_POOLPAGE)
struct vm_page *pg;
@@ -938,10 +937,9 @@ uvm_km_page_init(void)
#else
page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE);
#endif
- page->next = head;
- head = page;
+ page->next = uvm_km_pages_head;
+ uvm_km_pages_head = page;
}
- uvm_km_pages_head = head;
uvm_km_pages_free = i;
kthread_create_deferred(uvm_km_createthread, NULL);
@@ -962,9 +960,7 @@ uvm_km_thread(void *arg)
for (;;) {
if (uvm_km_pages_free >= uvm_km_pages_lowat)
tsleep(&uvm_km_pages_head, PVM, "kmalloc", 0);
- want = uvm_km_pages_lowat - uvm_km_pages_free;
- if (want < 16)
- want = 16;
+ want = 16;
for (i = 0; i < want; i++) {
#if defined(PMAP_MAP_POOLPAGE)
struct vm_page *pg;
@@ -992,21 +988,28 @@ uvm_km_thread(void *arg)
uvm_km_pages_head = head;
uvm_km_pages_free += i;
splx(s);
+ wakeup(&uvm_km_pages_free);
}
}
void *
-uvm_km_getpage(void)
+uvm_km_getpage(boolean_t waitok)
{
- struct km_page *page;
+ struct km_page *page = NULL;
int s;
s = splvm();
- page = uvm_km_pages_head;
- if (page) {
- uvm_km_pages_head = page->next;
- uvm_km_pages_free--;
+ for (;;) {
+ page = uvm_km_pages_head;
+ if (page) {
+ uvm_km_pages_head = page->next;
+ uvm_km_pages_free--;
+ break;
+ }
+ if (!waitok)
+ break;
+ tsleep(&uvm_km_pages_free, PVM, "getpage", 0);
}
splx(s);
if (uvm_km_pages_free < uvm_km_pages_lowat)