summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_anon.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_anon.c')
-rw-r--r--sys/uvm/uvm_anon.c209
1 files changed, 184 insertions, 25 deletions
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index b841a4f1e65..f24114aee9e 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.c,v 1.22 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.23 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -47,7 +47,22 @@
#include <uvm/uvm.h>
#include <uvm/uvm_swap.h>
-struct pool uvm_anon_pool;
+/*
+ * anonblock_list: global list of anon blocks,
+ * locked by swap_syscall_lock (since we never remove
+ * anything from this list and we only add to it via swapctl(2)).
+ */
+
+struct uvm_anonblock {
+ LIST_ENTRY(uvm_anonblock) list;
+ int count;
+ struct vm_anon *anons;
+};
+static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
+
+
+static boolean_t anon_pagein(struct vm_anon *);
+
/*
* allocate anons
@@ -55,28 +70,104 @@ struct pool uvm_anon_pool;
void
uvm_anon_init()
{
- pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, 0, 0, "anonpl",
- &pool_allocator_nointr);
- pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16);
+ int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
+
+ simple_lock_init(&uvm.afreelock);
+ LIST_INIT(&anonblock_list);
+
+ /*
+ * Allocate the initial anons.
+ */
+ uvm_anon_add(nanon);
}
/*
- * allocate an anon
+ * add some more anons to the free pool. called when we add
+ * more swap space.
+ *
+ * => swap_syscall_lock should be held (protects anonblock_list).
*/
-struct vm_anon *
-uvm_analloc()
+int
+uvm_anon_add(count)
+ int count;
{
+ struct uvm_anonblock *anonblock;
struct vm_anon *anon;
+ int lcv, needed;
+
+ simple_lock(&uvm.afreelock);
+ uvmexp.nanonneeded += count;
+ needed = uvmexp.nanonneeded - uvmexp.nanon;
+ simple_unlock(&uvm.afreelock);
- anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
- if (anon) {
+ if (needed <= 0) {
+ return 0;
+ }
+
+ anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
+
+ /* XXX Should wait for VM to free up. */
+ if (anon == NULL) {
+ printf("uvm_anon_add: can not allocate %d anons\n", needed);
+ panic("uvm_anon_add");
+ }
+
+ MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
+
+ anonblock->count = needed;
+ anonblock->anons = anon;
+ LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
+ memset(anon, 0, sizeof(*anon) * needed);
+
+ simple_lock(&uvm.afreelock);
+ uvmexp.nanon += needed;
+ uvmexp.nfreeanon += needed;
+ for (lcv = 0; lcv < needed; lcv++) {
simple_lock_init(&anon->an_lock);
- anon->an_ref = 1;
- anon->an_page = NULL;
- anon->an_swslot = 0;
- simple_lock(&anon->an_lock);
+ anon[lcv].u.an_nxt = uvm.afree;
+ uvm.afree = &anon[lcv];
+ simple_lock_init(&uvm.afree->an_lock);
}
- return(anon);
+ simple_unlock(&uvm.afreelock);
+ return 0;
+}
+
+/*
+ * remove anons from the free pool.
+ */
+void
+uvm_anon_remove(count)
+ int count;
+{
+ /*
+ * we never actually free any anons, to avoid allocation overhead.
+ * XXX someday we might want to try to free anons.
+ */
+
+ simple_lock(&uvm.afreelock);
+ uvmexp.nanonneeded -= count;
+ simple_unlock(&uvm.afreelock);
+}
+
+/*
+ * allocate an anon
+ */
+struct vm_anon *
+uvm_analloc()
+{
+ struct vm_anon *a;
+
+ simple_lock(&uvm.afreelock);
+ a = uvm.afree;
+ if (a) {
+ uvm.afree = a->u.an_nxt;
+ uvmexp.nfreeanon--;
+ a->an_ref = 1;
+ a->an_swslot = 0;
+ a->u.an_page = NULL; /* so we can free quickly */
+ }
+ simple_unlock(&uvm.afreelock);
+ return(a);
}
/*
@@ -99,7 +190,7 @@ uvm_anfree(anon)
* get page
*/
- pg = anon->an_page;
+ pg = anon->u.an_page;
/*
* if there is a resident page and it is loaned, then anon may not
@@ -166,10 +257,11 @@ uvm_anfree(anon)
* now that we've stripped the data areas from the anon, free the anon
* itself!
*/
- KASSERT(anon->an_page == NULL);
- KASSERT(anon->an_swslot == 0);
-
- pool_put(&uvm_anon_pool, anon);
+ simple_lock(&uvm.afreelock);
+ anon->u.an_nxt = uvm.afree;
+ uvm.afree = anon;
+ uvmexp.nfreeanon++;
+ simple_unlock(&uvm.afreelock);
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
}
@@ -192,7 +284,7 @@ uvm_anon_dropswap(anon)
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;
- if (anon->an_page == NULL) {
+ if (anon->u.an_page == NULL) {
/* this page is no longer only in swap. */
simple_lock(&uvm.swap_data_lock);
uvmexp.swpgonly--;
@@ -233,7 +325,7 @@ uvm_anon_lockloanpg(anon)
* not produce an incorrect result.
*/
- while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
+ while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
/*
* quickly check to see if the page has an object before
@@ -295,6 +387,73 @@ uvm_anon_lockloanpg(anon)
return(pg);
}
+
+
+/*
+ * page in every anon that is paged out to a range of swslots.
+ *
+ * swap_syscall_lock should be held (protects anonblock_list).
+ */
+
+boolean_t
+anon_swap_off(startslot, endslot)
+ int startslot, endslot;
+{
+ struct uvm_anonblock *anonblock;
+
+ for (anonblock = LIST_FIRST(&anonblock_list);
+ anonblock != NULL;
+ anonblock = LIST_NEXT(anonblock, list)) {
+ int i;
+
+ /*
+ * loop thru all the anons in the anonblock,
+ * paging in where needed.
+ */
+
+ for (i = 0; i < anonblock->count; i++) {
+ struct vm_anon *anon = &anonblock->anons[i];
+ int slot;
+
+ /*
+ * lock anon to work on it.
+ */
+
+ simple_lock(&anon->an_lock);
+
+ /*
+ * is this anon's swap slot in range?
+ */
+
+ slot = anon->an_swslot;
+ if (slot >= startslot && slot < endslot) {
+ boolean_t rv;
+
+ /*
+ * yup, page it in.
+ */
+
+ /* locked: anon */
+ rv = anon_pagein(anon);
+ /* unlocked: anon */
+
+ if (rv) {
+ return rv;
+ }
+ } else {
+
+ /*
+ * nope, unlock and proceed.
+ */
+
+ simple_unlock(&anon->an_lock);
+ }
+ }
+ }
+ return FALSE;
+}
+
+
/*
* fetch an anon's page.
*
@@ -302,8 +461,8 @@ uvm_anon_lockloanpg(anon)
* => returns TRUE if pagein was aborted due to lack of memory.
*/
-boolean_t
-uvm_anon_pagein(anon)
+static boolean_t
+anon_pagein(anon)
struct vm_anon *anon;
{
struct vm_page *pg;
@@ -345,7 +504,7 @@ uvm_anon_pagein(anon)
* mark it as dirty, clear its swslot and un-busy it.
*/
- pg = anon->an_page;
+ pg = anon->u.an_page;
uobj = pg->uobject;
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;