summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_amap.c
diff options
context:
space:
mode:
authorMichael Shalayeff <mickey@cvs.openbsd.org>2006-06-21 16:20:06 +0000
committerMichael Shalayeff <mickey@cvs.openbsd.org>2006-06-21 16:20:06 +0000
commit9a8f5eb89d71175984d0bf3fa601941bcb8bbc1b (patch)
treec9aa284236c49f3e822b33c425cf48b4ccf8a712 /sys/uvm/uvm_amap.c
parent7e13fccd703da0bc68c2560a3c6687efdafc9273 (diff)
from netbsd: make anons dynamically allocated from pool.
this results in lesse kva waste due to static preallocation of those for every phys page and also every swap page. tested by beck krw miod
Diffstat (limited to 'sys/uvm/uvm_amap.c')
-rw-r--r--sys/uvm/uvm_amap.c168
1 files changed, 159 insertions, 9 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 6853ec54725..061e8f82113 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.29 2005/12/10 11:45:43 miod Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.30 2006/06/21 16:20:05 mickey Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -65,11 +65,34 @@
struct pool uvm_amap_pool;
+struct simplelock amap_list_lock;
+LIST_HEAD(, vm_amap) amap_list;
+
/*
* local functions
*/
static struct vm_amap *amap_alloc1(int, int, int);
+static __inline void amap_list_insert(struct vm_amap *);
+static __inline void amap_list_remove(struct vm_amap *);
+
+static __inline void
+amap_list_insert(struct vm_amap *amap)
+{
+
+ simple_lock(&amap_list_lock);
+ LIST_INSERT_HEAD(&amap_list, amap, am_list);
+ simple_unlock(&amap_list_lock);
+}
+
+static __inline void
+amap_list_remove(struct vm_amap *amap)
+{
+
+ simple_lock(&amap_list_lock);
+ LIST_REMOVE(amap, am_list);
+ simple_unlock(&amap_list_lock);
+}
#ifdef UVM_AMAP_PPREF
/*
@@ -153,6 +176,9 @@ void
amap_init()
{
+
+ simple_lock_init(&amap_list_lock);
+
/*
* Initialize the vm_amap pool.
*/
@@ -235,10 +261,13 @@ amap_alloc(sz, padsz, waitf)
AMAP_B2SLOT(padslots, padsz);
amap = amap_alloc1(slots, padslots, waitf);
- if (amap)
+ if (amap) {
memset(amap->am_anon, 0,
amap->am_maxslot * sizeof(struct vm_anon *));
+ amap_list_insert(amap);
+ }
+
UVMHIST_LOG(maphist,"<- done, amap = 0x%x, sz=%d", amap, sz, 0, 0);
return(amap);
}
@@ -257,6 +286,7 @@ amap_free(amap)
UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
+ KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
free(amap->am_slots, M_UVMAMAP);
free(amap->am_bckptr, M_UVMAMAP);
@@ -486,8 +516,8 @@ amap_share_protect(entry, prot)
for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) {
if (amap->am_anon[lcv] == NULL)
continue;
- if (amap->am_anon[lcv]->u.an_page != NULL)
- pmap_page_protect(amap->am_anon[lcv]->u.an_page,
+ if (amap->am_anon[lcv]->an_page != NULL)
+ pmap_page_protect(amap->am_anon[lcv]->an_page,
prot);
}
return;
@@ -498,8 +528,8 @@ amap_share_protect(entry, prot)
slot = amap->am_slots[lcv];
if (slot < entry->aref.ar_pageoff || slot >= stop)
continue;
- if (amap->am_anon[slot]->u.an_page != NULL)
- pmap_page_protect(amap->am_anon[slot]->u.an_page, prot);
+ if (amap->am_anon[slot]->an_page != NULL)
+ pmap_page_protect(amap->am_anon[slot]->an_page, prot);
}
return;
}
@@ -507,7 +537,7 @@ amap_share_protect(entry, prot)
/*
* amap_wipeout: wipeout all anon's in an amap; then free the amap!
*
- * => called from amap_unref when the final reference to an amap is
+ * => called from amap_unref when the final reference to an amap is
* discarded (i.e. when reference count == 1)
* => the amap should be locked (by the caller)
*/
@@ -521,13 +551,24 @@ amap_wipeout(amap)
UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);
+ KASSERT(amap->am_ref == 0);
+
+ if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
+ /*
+ * amap_swap_off will call us again.
+ */
+ amap_unlock(amap);
+ return;
+ }
+ amap_list_remove(amap);
+
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
int refs;
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
- if (anon == NULL || anon->an_ref == 0)
+ if (anon == NULL || anon->an_ref == 0)
panic("amap_wipeout: corrupt amap");
simple_lock(&anon->an_lock); /* lock anon */
@@ -708,6 +749,8 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
amap_unlock(srcamap);
+ amap_list_insert(amap);
+
/*
* install new amap.
*/
@@ -771,7 +814,7 @@ ReStart:
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
simple_lock(&anon->an_lock);
- pg = anon->u.an_page;
+ pg = anon->an_page;
/*
* page must be resident since parent is wired
@@ -1090,3 +1133,110 @@ amap_wiperange(amap, slotoff, slots)
}
#endif
+
+/*
+ * amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
+ *
+ * => called with swap_syscall_lock held.
+ * => note that we don't always traverse all anons.
+ * eg. amaps being wiped out, released anons.
+ * => return TRUE if failed.
+ */
+
+boolean_t
+amap_swap_off(int startslot, int endslot)
+{
+ struct vm_amap *am;
+ struct vm_amap *am_next;
+ struct vm_amap marker_prev;
+ struct vm_amap marker_next;
+ struct proc *p = curproc;
+ boolean_t rv = FALSE;
+
+#if defined(DIAGNOSTIC)
+ memset(&marker_prev, 0, sizeof(marker_prev));
+ memset(&marker_next, 0, sizeof(marker_next));
+#endif /* defined(DIAGNOSTIC) */
+
+ PHOLD(p);
+ simple_lock(&amap_list_lock);
+ for (am = LIST_FIRST(&amap_list); am != NULL && !rv; am = am_next) {
+ int i;
+
+ LIST_INSERT_BEFORE(am, &marker_prev, am_list);
+ LIST_INSERT_AFTER(am, &marker_next, am_list);
+
+ if (!amap_lock_try(am)) {
+ simple_unlock(&amap_list_lock);
+ preempt(NULL);
+ simple_lock(&amap_list_lock);
+ am_next = LIST_NEXT(&marker_prev, am_list);
+ if (am_next == &marker_next) {
+ am_next = LIST_NEXT(am_next, am_list);
+ } else {
+ KASSERT(LIST_NEXT(am_next, am_list) ==
+ &marker_next);
+ }
+ LIST_REMOVE(&marker_prev, am_list);
+ LIST_REMOVE(&marker_next, am_list);
+ continue;
+ }
+
+ simple_unlock(&amap_list_lock);
+
+ if (am->am_nused <= 0) {
+ amap_unlock(am);
+ goto next;
+ }
+
+ for (i = 0; i < am->am_nused; i++) {
+ int slot;
+ int swslot;
+ struct vm_anon *anon;
+
+ slot = am->am_slots[i];
+ anon = am->am_anon[slot];
+ simple_lock(&anon->an_lock);
+
+ swslot = anon->an_swslot;
+ if (swslot < startslot || endslot <= swslot) {
+ simple_unlock(&anon->an_lock);
+ continue;
+ }
+
+ am->am_flags |= AMAP_SWAPOFF;
+ amap_unlock(am);
+
+ rv = uvm_anon_pagein(anon);
+
+ amap_lock(am);
+ am->am_flags &= ~AMAP_SWAPOFF;
+ if (amap_refs(am) == 0) {
+ amap_wipeout(am);
+ am = NULL;
+ break;
+ }
+ if (rv) {
+ break;
+ }
+ i = 0;
+ }
+
+ if (am) {
+ amap_unlock(am);
+ }
+
+next:
+ simple_lock(&amap_list_lock);
+ KASSERT(LIST_NEXT(&marker_prev, am_list) == &marker_next ||
+ LIST_NEXT(LIST_NEXT(&marker_prev, am_list), am_list) ==
+ &marker_next);
+ am_next = LIST_NEXT(&marker_next, am_list);
+ LIST_REMOVE(&marker_prev, am_list);
+ LIST_REMOVE(&marker_next, am_list);
+ }
+ simple_unlock(&amap_list_lock);
+ PRELE(p);
+
+ return rv;
+}