summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2006-07-13 22:51:27 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2006-07-13 22:51:27 +0000
commit6a76bccf8a0c4378de36313c8973c7ca1fc0f545 (patch)
tree430dacf96000212238bd82a6f64553a6c6917bee
parentbd9cebb8b30f438d9adee25b85f18aaa3550285b (diff)
Back out the anon change. Apparently it was tested by a few, but most of
us did not see it or get a chance to test it before it was commited. It broke cvs, in the ami driver, making it not succeed at seeing it's devices.
-rw-r--r--lib/libkvm/kvm_proc.c6
-rw-r--r--sys/uvm/uvm.h6
-rw-r--r--sys/uvm/uvm_amap.c168
-rw-r--r--sys/uvm/uvm_amap.h6
-rw-r--r--sys/uvm/uvm_amap_i.h10
-rw-r--r--sys/uvm/uvm_anon.c209
-rw-r--r--sys/uvm/uvm_anon.h23
-rw-r--r--sys/uvm/uvm_fault.c40
-rw-r--r--sys/uvm/uvm_init.c10
-rw-r--r--sys/uvm/uvm_loan.c8
-rw-r--r--sys/uvm/uvm_map.c8
-rw-r--r--sys/uvm/uvm_mmap.c4
-rw-r--r--sys/uvm/uvm_page.c12
-rw-r--r--sys/uvm/uvm_pdaemon.c6
-rw-r--r--sys/uvm/uvm_swap.c22
15 files changed, 279 insertions, 259 deletions
diff --git a/lib/libkvm/kvm_proc.c b/lib/libkvm/kvm_proc.c
index b44ad1e16e6..5ede373d0d5 100644
--- a/lib/libkvm/kvm_proc.c
+++ b/lib/libkvm/kvm_proc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kvm_proc.c,v 1.29 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: kvm_proc.c,v 1.30 2006/07/13 22:51:24 deraadt Exp $ */
/* $NetBSD: kvm_proc.c,v 1.30 1999/03/24 05:50:50 mrg Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@@ -73,7 +73,7 @@
#if 0
static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
#else
-static char *rcsid = "$OpenBSD: kvm_proc.c,v 1.29 2006/06/21 16:20:05 mickey Exp $";
+static char *rcsid = "$OpenBSD: kvm_proc.c,v 1.30 2006/07/13 22:51:24 deraadt Exp $";
#endif
#endif /* LIBC_SCCS and not lint */
@@ -228,7 +228,7 @@ _kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long va, u_long *cnt)
if (KREAD(kd, addr, &anon))
return (NULL);
- addr = (u_long)anon.an_page;
+ addr = (u_long)anon.u.an_page;
if (addr) {
if (KREAD(kd, addr, &pg))
return (NULL);
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h
index 95e2f3df9a3..87194aeee40 100644
--- a/sys/uvm/uvm.h
+++ b/sys/uvm/uvm.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm.h,v 1.19 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm.h,v 1.20 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm.h,v 1.24 2000/11/27 08:40:02 chs Exp $ */
/*
@@ -102,6 +102,10 @@ struct uvm {
int page_hashmask; /* hash mask */
simple_lock_data_t hashlock; /* lock on page_hash array */
+ /* anon stuff */
+ struct vm_anon *afree; /* anon free list */
+ simple_lock_data_t afreelock; /* lock on anon free list */
+
/* static kernel map entry pool */
vm_map_entry_t kentry_free; /* free page pool */
simple_lock_data_t kentry_lock;
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 061e8f82113..a9ae6253c7a 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.30 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.31 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -65,34 +65,11 @@
struct pool uvm_amap_pool;
-struct simplelock amap_list_lock;
-LIST_HEAD(, vm_amap) amap_list;
-
/*
* local functions
*/
static struct vm_amap *amap_alloc1(int, int, int);
-static __inline void amap_list_insert(struct vm_amap *);
-static __inline void amap_list_remove(struct vm_amap *);
-
-static __inline void
-amap_list_insert(struct vm_amap *amap)
-{
-
- simple_lock(&amap_list_lock);
- LIST_INSERT_HEAD(&amap_list, amap, am_list);
- simple_unlock(&amap_list_lock);
-}
-
-static __inline void
-amap_list_remove(struct vm_amap *amap)
-{
-
- simple_lock(&amap_list_lock);
- LIST_REMOVE(amap, am_list);
- simple_unlock(&amap_list_lock);
-}
#ifdef UVM_AMAP_PPREF
/*
@@ -176,9 +153,6 @@ void
amap_init()
{
-
- simple_lock_init(&amap_list_lock);
-
/*
* Initialize the vm_amap pool.
*/
@@ -261,13 +235,10 @@ amap_alloc(sz, padsz, waitf)
AMAP_B2SLOT(padslots, padsz);
amap = amap_alloc1(slots, padslots, waitf);
- if (amap) {
+ if (amap)
memset(amap->am_anon, 0,
amap->am_maxslot * sizeof(struct vm_anon *));
- amap_list_insert(amap);
- }
-
UVMHIST_LOG(maphist,"<- done, amap = 0x%x, sz=%d", amap, sz, 0, 0);
return(amap);
}
@@ -286,7 +257,6 @@ amap_free(amap)
UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
- KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
free(amap->am_slots, M_UVMAMAP);
free(amap->am_bckptr, M_UVMAMAP);
@@ -516,8 +486,8 @@ amap_share_protect(entry, prot)
for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) {
if (amap->am_anon[lcv] == NULL)
continue;
- if (amap->am_anon[lcv]->an_page != NULL)
- pmap_page_protect(amap->am_anon[lcv]->an_page,
+ if (amap->am_anon[lcv]->u.an_page != NULL)
+ pmap_page_protect(amap->am_anon[lcv]->u.an_page,
prot);
}
return;
@@ -528,8 +498,8 @@ amap_share_protect(entry, prot)
slot = amap->am_slots[lcv];
if (slot < entry->aref.ar_pageoff || slot >= stop)
continue;
- if (amap->am_anon[slot]->an_page != NULL)
- pmap_page_protect(amap->am_anon[slot]->an_page, prot);
+ if (amap->am_anon[slot]->u.an_page != NULL)
+ pmap_page_protect(amap->am_anon[slot]->u.an_page, prot);
}
return;
}
@@ -537,7 +507,7 @@ amap_share_protect(entry, prot)
/*
* amap_wipeout: wipeout all anon's in an amap; then free the amap!
*
- * => called from amap_unref when the final reference to an amap is
+ * => called from amap_unref when the final reference to an amap is
* discarded (i.e. when reference count == 1)
* => the amap should be locked (by the caller)
*/
@@ -551,24 +521,13 @@ amap_wipeout(amap)
UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);
- KASSERT(amap->am_ref == 0);
-
- if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
- /*
- * amap_swap_off will call us again.
- */
- amap_unlock(amap);
- return;
- }
- amap_list_remove(amap);
-
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
int refs;
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
- if (anon == NULL || anon->an_ref == 0)
+ if (anon == NULL || anon->an_ref == 0)
panic("amap_wipeout: corrupt amap");
simple_lock(&anon->an_lock); /* lock anon */
@@ -749,8 +708,6 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
amap_unlock(srcamap);
- amap_list_insert(amap);
-
/*
* install new amap.
*/
@@ -814,7 +771,7 @@ ReStart:
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
simple_lock(&anon->an_lock);
- pg = anon->an_page;
+ pg = anon->u.an_page;
/*
* page must be resident since parent is wired
@@ -1133,110 +1090,3 @@ amap_wiperange(amap, slotoff, slots)
}
#endif
-
-/*
- * amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
- *
- * => called with swap_syscall_lock held.
- * => note that we don't always traverse all anons.
- * eg. amaps being wiped out, released anons.
- * => return TRUE if failed.
- */
-
-boolean_t
-amap_swap_off(int startslot, int endslot)
-{
- struct vm_amap *am;
- struct vm_amap *am_next;
- struct vm_amap marker_prev;
- struct vm_amap marker_next;
- struct proc *p = curproc;
- boolean_t rv = FALSE;
-
-#if defined(DIAGNOSTIC)
- memset(&marker_prev, 0, sizeof(marker_prev));
- memset(&marker_next, 0, sizeof(marker_next));
-#endif /* defined(DIAGNOSTIC) */
-
- PHOLD(p);
- simple_lock(&amap_list_lock);
- for (am = LIST_FIRST(&amap_list); am != NULL && !rv; am = am_next) {
- int i;
-
- LIST_INSERT_BEFORE(am, &marker_prev, am_list);
- LIST_INSERT_AFTER(am, &marker_next, am_list);
-
- if (!amap_lock_try(am)) {
- simple_unlock(&amap_list_lock);
- preempt(NULL);
- simple_lock(&amap_list_lock);
- am_next = LIST_NEXT(&marker_prev, am_list);
- if (am_next == &marker_next) {
- am_next = LIST_NEXT(am_next, am_list);
- } else {
- KASSERT(LIST_NEXT(am_next, am_list) ==
- &marker_next);
- }
- LIST_REMOVE(&marker_prev, am_list);
- LIST_REMOVE(&marker_next, am_list);
- continue;
- }
-
- simple_unlock(&amap_list_lock);
-
- if (am->am_nused <= 0) {
- amap_unlock(am);
- goto next;
- }
-
- for (i = 0; i < am->am_nused; i++) {
- int slot;
- int swslot;
- struct vm_anon *anon;
-
- slot = am->am_slots[i];
- anon = am->am_anon[slot];
- simple_lock(&anon->an_lock);
-
- swslot = anon->an_swslot;
- if (swslot < startslot || endslot <= swslot) {
- simple_unlock(&anon->an_lock);
- continue;
- }
-
- am->am_flags |= AMAP_SWAPOFF;
- amap_unlock(am);
-
- rv = uvm_anon_pagein(anon);
-
- amap_lock(am);
- am->am_flags &= ~AMAP_SWAPOFF;
- if (amap_refs(am) == 0) {
- amap_wipeout(am);
- am = NULL;
- break;
- }
- if (rv) {
- break;
- }
- i = 0;
- }
-
- if (am) {
- amap_unlock(am);
- }
-
-next:
- simple_lock(&amap_list_lock);
- KASSERT(LIST_NEXT(&marker_prev, am_list) == &marker_next ||
- LIST_NEXT(LIST_NEXT(&marker_prev, am_list), am_list) ==
- &marker_next);
- am_next = LIST_NEXT(&marker_next, am_list);
- LIST_REMOVE(&marker_prev, am_list);
- LIST_REMOVE(&marker_next, am_list);
- }
- simple_unlock(&amap_list_lock);
- PRELE(p);
-
- return rv;
-}
diff --git a/sys/uvm/uvm_amap.h b/sys/uvm/uvm_amap.h
index a2750c67a44..88e45beb01a 100644
--- a/sys/uvm/uvm_amap.h
+++ b/sys/uvm/uvm_amap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.h,v 1.14 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_amap.h,v 1.15 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -120,7 +120,6 @@ AMAP_INLINE /* drop reference to an amap */
void amap_unref(struct vm_amap *, vaddr_t, vsize_t, int);
/* remove all anons from amap */
void amap_wipeout(struct vm_amap *);
-boolean_t amap_swap_off(int, int);
/*
* amap flag values
@@ -128,7 +127,6 @@ boolean_t amap_swap_off(int, int);
#define AMAP_SHARED 0x1 /* amap is shared */
#define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */
-#define AMAP_SWAPOFF 0x4 /* amap_swap_off() is in progress */
#endif /* _KERNEL */
@@ -164,7 +162,6 @@ struct vm_amap {
#ifdef UVM_AMAP_PPREF
int *am_ppref; /* per page reference count (if !NULL) */
#endif
- LIST_ENTRY(vm_amap) am_list;
};
/*
@@ -253,7 +250,6 @@ struct vm_amap {
#define amap_flags(AMAP) ((AMAP)->am_flags)
#define amap_lock(AMAP) simple_lock(&(AMAP)->am_l)
-#define amap_lock_try(AMAP) simple_lock_try(&(AMAP)->am_l)
#define amap_refs(AMAP) ((AMAP)->am_ref)
#define amap_unlock(AMAP) simple_unlock(&(AMAP)->am_l)
diff --git a/sys/uvm/uvm_amap_i.h b/sys/uvm/uvm_amap_i.h
index 68cdc428b62..a290527b378 100644
--- a/sys/uvm/uvm_amap_i.h
+++ b/sys/uvm/uvm_amap_i.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap_i.h,v 1.13 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_amap_i.h,v 1.14 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_amap_i.h,v 1.15 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -135,9 +135,9 @@ amap_add(aref, offset, anon, replace)
if (amap->am_anon[slot] == NULL)
panic("amap_add: replacing null anon");
- if (amap->am_anon[slot]->an_page != NULL &&
+ if (amap->am_anon[slot]->u.an_page != NULL &&
(amap->am_flags & AMAP_SHARED) != 0) {
- pmap_page_protect(amap->am_anon[slot]->an_page,
+ pmap_page_protect(amap->am_anon[slot]->u.an_page,
VM_PROT_NONE);
/*
* XXX: suppose page is supposed to be wired somewhere?
@@ -255,7 +255,7 @@ amap_unref(amap, offset, len, all)
* if we are the last reference, free the amap and return.
*/
- if (amap->am_ref-- == 1) {
+ if (amap->am_ref == 1) {
amap_wipeout(amap); /* drops final ref and frees */
UVMHIST_LOG(maphist,"<- done (was last ref)!", 0, 0, 0, 0);
return; /* no need to unlock */
@@ -264,6 +264,8 @@ amap_unref(amap, offset, len, all)
/*
* otherwise just drop the reference count(s)
*/
+
+ amap->am_ref--;
if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
#ifdef UVM_AMAP_PPREF
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index b841a4f1e65..f24114aee9e 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.c,v 1.22 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.23 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -47,7 +47,22 @@
#include <uvm/uvm.h>
#include <uvm/uvm_swap.h>
-struct pool uvm_anon_pool;
+/*
+ * anonblock_list: global list of anon blocks,
+ * locked by swap_syscall_lock (since we never remove
+ * anything from this list and we only add to it via swapctl(2)).
+ */
+
+struct uvm_anonblock {
+ LIST_ENTRY(uvm_anonblock) list;
+ int count;
+ struct vm_anon *anons;
+};
+static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
+
+
+static boolean_t anon_pagein(struct vm_anon *);
+
/*
* allocate anons
@@ -55,28 +70,104 @@ struct pool uvm_anon_pool;
void
uvm_anon_init()
{
- pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, 0, 0, "anonpl",
- &pool_allocator_nointr);
- pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16);
+ int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
+
+ simple_lock_init(&uvm.afreelock);
+ LIST_INIT(&anonblock_list);
+
+ /*
+ * Allocate the initial anons.
+ */
+ uvm_anon_add(nanon);
}
/*
- * allocate an anon
+ * add some more anons to the free pool. called when we add
+ * more swap space.
+ *
+ * => swap_syscall_lock should be held (protects anonblock_list).
*/
-struct vm_anon *
-uvm_analloc()
+int
+uvm_anon_add(count)
+ int count;
{
+ struct uvm_anonblock *anonblock;
struct vm_anon *anon;
+ int lcv, needed;
+
+ simple_lock(&uvm.afreelock);
+ uvmexp.nanonneeded += count;
+ needed = uvmexp.nanonneeded - uvmexp.nanon;
+ simple_unlock(&uvm.afreelock);
- anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
- if (anon) {
+ if (needed <= 0) {
+ return 0;
+ }
+
+ anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
+
+ /* XXX Should wait for VM to free up. */
+ if (anon == NULL) {
+ printf("uvm_anon_add: can not allocate %d anons\n", needed);
+ panic("uvm_anon_add");
+ }
+
+ MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
+
+ anonblock->count = needed;
+ anonblock->anons = anon;
+ LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
+ memset(anon, 0, sizeof(*anon) * needed);
+
+ simple_lock(&uvm.afreelock);
+ uvmexp.nanon += needed;
+ uvmexp.nfreeanon += needed;
+ for (lcv = 0; lcv < needed; lcv++) {
simple_lock_init(&anon->an_lock);
- anon->an_ref = 1;
- anon->an_page = NULL;
- anon->an_swslot = 0;
- simple_lock(&anon->an_lock);
+ anon[lcv].u.an_nxt = uvm.afree;
+ uvm.afree = &anon[lcv];
+ simple_lock_init(&uvm.afree->an_lock);
}
- return(anon);
+ simple_unlock(&uvm.afreelock);
+ return 0;
+}
+
+/*
+ * remove anons from the free pool.
+ */
+void
+uvm_anon_remove(count)
+ int count;
+{
+ /*
+ * we never actually free any anons, to avoid allocation overhead.
+ * XXX someday we might want to try to free anons.
+ */
+
+ simple_lock(&uvm.afreelock);
+ uvmexp.nanonneeded -= count;
+ simple_unlock(&uvm.afreelock);
+}
+
+/*
+ * allocate an anon
+ */
+struct vm_anon *
+uvm_analloc()
+{
+ struct vm_anon *a;
+
+ simple_lock(&uvm.afreelock);
+ a = uvm.afree;
+ if (a) {
+ uvm.afree = a->u.an_nxt;
+ uvmexp.nfreeanon--;
+ a->an_ref = 1;
+ a->an_swslot = 0;
+ a->u.an_page = NULL; /* so we can free quickly */
+ }
+ simple_unlock(&uvm.afreelock);
+ return(a);
}
/*
@@ -99,7 +190,7 @@ uvm_anfree(anon)
* get page
*/
- pg = anon->an_page;
+ pg = anon->u.an_page;
/*
* if there is a resident page and it is loaned, then anon may not
@@ -166,10 +257,11 @@ uvm_anfree(anon)
* now that we've stripped the data areas from the anon, free the anon
* itself!
*/
- KASSERT(anon->an_page == NULL);
- KASSERT(anon->an_swslot == 0);
-
- pool_put(&uvm_anon_pool, anon);
+ simple_lock(&uvm.afreelock);
+ anon->u.an_nxt = uvm.afree;
+ uvm.afree = anon;
+ uvmexp.nfreeanon++;
+ simple_unlock(&uvm.afreelock);
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
}
@@ -192,7 +284,7 @@ uvm_anon_dropswap(anon)
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;
- if (anon->an_page == NULL) {
+ if (anon->u.an_page == NULL) {
/* this page is no longer only in swap. */
simple_lock(&uvm.swap_data_lock);
uvmexp.swpgonly--;
@@ -233,7 +325,7 @@ uvm_anon_lockloanpg(anon)
* not produce an incorrect result.
*/
- while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
+ while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
/*
* quickly check to see if the page has an object before
@@ -295,6 +387,73 @@ uvm_anon_lockloanpg(anon)
return(pg);
}
+
+
+/*
+ * page in every anon that is paged out to a range of swslots.
+ *
+ * swap_syscall_lock should be held (protects anonblock_list).
+ */
+
+boolean_t
+anon_swap_off(startslot, endslot)
+ int startslot, endslot;
+{
+ struct uvm_anonblock *anonblock;
+
+ for (anonblock = LIST_FIRST(&anonblock_list);
+ anonblock != NULL;
+ anonblock = LIST_NEXT(anonblock, list)) {
+ int i;
+
+ /*
+ * loop thru all the anons in the anonblock,
+ * paging in where needed.
+ */
+
+ for (i = 0; i < anonblock->count; i++) {
+ struct vm_anon *anon = &anonblock->anons[i];
+ int slot;
+
+ /*
+ * lock anon to work on it.
+ */
+
+ simple_lock(&anon->an_lock);
+
+ /*
+ * is this anon's swap slot in range?
+ */
+
+ slot = anon->an_swslot;
+ if (slot >= startslot && slot < endslot) {
+ boolean_t rv;
+
+ /*
+ * yup, page it in.
+ */
+
+ /* locked: anon */
+ rv = anon_pagein(anon);
+ /* unlocked: anon */
+
+ if (rv) {
+ return rv;
+ }
+ } else {
+
+ /*
+ * nope, unlock and proceed.
+ */
+
+ simple_unlock(&anon->an_lock);
+ }
+ }
+ }
+ return FALSE;
+}
+
+
/*
* fetch an anon's page.
*
@@ -302,8 +461,8 @@ uvm_anon_lockloanpg(anon)
* => returns TRUE if pagein was aborted due to lack of memory.
*/
-boolean_t
-uvm_anon_pagein(anon)
+static boolean_t
+anon_pagein(anon)
struct vm_anon *anon;
{
struct vm_page *pg;
@@ -345,7 +504,7 @@ uvm_anon_pagein(anon)
* mark it as dirty, clear its swslot and un-busy it.
*/
- pg = anon->an_page;
+ pg = anon->u.an_page;
uobj = pg->uobject;
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;
diff --git a/sys/uvm/uvm_anon.h b/sys/uvm/uvm_anon.h
index 2ecb8e42c3a..551802048bf 100644
--- a/sys/uvm/uvm_anon.h
+++ b/sys/uvm/uvm_anon.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.h,v 1.12 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_anon.h,v 1.13 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_anon.h,v 1.13 2000/12/27 09:17:04 chs Exp $ */
/*
@@ -49,19 +49,24 @@
*/
struct vm_anon {
- struct vm_page *an_page; /* if in RAM [an_lock] */
int an_ref; /* reference count [an_lock] */
+ simple_lock_data_t an_lock; /* lock for an_ref */
+ union {
+ struct vm_anon *an_nxt; /* if on free list [afreelock] */
+ struct vm_page *an_page;/* if in RAM [an_lock] */
+ } u;
int an_swslot; /* drum swap slot # (if != 0)
[an_lock. also, it is ok to read
an_swslot if we hold an_page PG_BUSY] */
- simple_lock_data_t an_lock; /* lock for an_ref */
};
/*
- * for active vm_anon's the data can be in one of the following state:
- * [1] in a vm_page with no backing store allocated yet, [2] in a vm_page
- * with backing store allocated, or [3] paged out to backing store
- * (no vm_page).
+ * a pool of vm_anon data structures is allocated and put on a global
+ * free list at boot time. vm_anon's on the free list use "an_nxt" as
+ * a pointer to the next item on the free list. for active vm_anon's
+ * the data can be in one of the following state: [1] in a vm_page
+ * with no backing store allocated yet, [2] in a vm_page with backing
+ * store allocated, or [3] paged out to backing store (no vm_page).
*
* for pageout in case [2]: if the page has been modified then we must
* flush it out to backing store, otherwise we can just dump the
@@ -97,9 +102,11 @@ struct vm_aref {
struct vm_anon *uvm_analloc(void);
void uvm_anfree(struct vm_anon *);
void uvm_anon_init(void);
+int uvm_anon_add(int);
+void uvm_anon_remove(int);
struct vm_page *uvm_anon_lockloanpg(struct vm_anon *);
void uvm_anon_dropswap(struct vm_anon *);
-boolean_t uvm_anon_pagein(struct vm_anon *);
+boolean_t anon_swap_off(int, int);
#endif /* _KERNEL */
#endif /* _UVM_UVM_ANON_H_ */
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 1401962e34f..f40c692c50d 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.38 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.39 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -200,7 +200,7 @@ uvmfault_anonflush(anons, n)
if (anons[lcv] == NULL)
continue;
simple_lock(&anons[lcv]->an_lock);
- pg = anons[lcv]->an_page;
+ pg = anons[lcv]->u.an_page;
if (pg && (pg->flags & PG_BUSY) == 0 && pg->loan_count == 0) {
uvm_lock_pageq();
if (pg->wire_count == 0) {
@@ -306,7 +306,7 @@ uvmfault_anonget(ufi, amap, anon)
result = 0; /* XXX shut up gcc */
uvmexp.fltanget++;
/* bump rusage counters */
- if (anon->an_page)
+ if (anon->u.an_page)
curproc->p_addr->u_stats.p_ru.ru_minflt++;
else
curproc->p_addr->u_stats.p_ru.ru_majflt++;
@@ -318,7 +318,7 @@ uvmfault_anonget(ufi, amap, anon)
while (1) {
we_own = FALSE; /* TRUE if we set PG_BUSY on a page */
- pg = anon->an_page;
+ pg = anon->u.an_page;
/*
* if there is a resident page and it is loaned, then anon
@@ -464,7 +464,7 @@ uvmfault_anonget(ufi, amap, anon)
KASSERT(result != VM_PAGER_PEND);
/* remove page from anon */
- anon->an_page = NULL;
+ anon->u.an_page = NULL;
/*
* remove the swap slot from the anon
@@ -838,14 +838,14 @@ ReFault:
anon = anons[lcv];
simple_lock(&anon->an_lock);
/* ignore loaned pages */
- if (anon->an_page && anon->an_page->loan_count == 0 &&
- (anon->an_page->flags & (PG_RELEASED|PG_BUSY)) == 0) {
+ if (anon->u.an_page && anon->u.an_page->loan_count == 0 &&
+ (anon->u.an_page->flags & (PG_RELEASED|PG_BUSY)) == 0) {
uvm_lock_pageq();
- uvm_pageactivate(anon->an_page); /* reactivate */
+ uvm_pageactivate(anon->u.an_page); /* reactivate */
uvm_unlock_pageq();
UVMHIST_LOG(maphist,
" MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x",
- ufi.orig_map->pmap, currva, anon->an_page, 0);
+ ufi.orig_map->pmap, currva, anon->u.an_page, 0);
uvmexp.fltnamap++;
/*
@@ -855,7 +855,7 @@ ReFault:
*/
(void) pmap_enter(ufi.orig_map->pmap, currva,
- VM_PAGE_TO_PHYS(anon->an_page),
+ VM_PAGE_TO_PHYS(anon->u.an_page),
(anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
enter_prot,
PMAP_CANFAIL |
@@ -1089,7 +1089,7 @@ ReFault:
* uobj is non null if the page is on loan from an object (i.e. uobj)
*/
- uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
+ uobj = anon->u.an_page->uobject; /* locked by anonget if !NULL */
/* locked: maps(read), amap, anon, uobj(if one) */
@@ -1097,7 +1097,7 @@ ReFault:
* special handling for loaned pages
*/
- if (anon->an_page->loan_count) {
+ if (anon->u.an_page->loan_count) {
if ((access_type & VM_PROT_WRITE) == 0) {
@@ -1139,18 +1139,18 @@ ReFault:
* (if any)
*/
/* copy old -> new */
- uvm_pagecopy(anon->an_page, pg);
+ uvm_pagecopy(anon->u.an_page, pg);
/* force reload */
- pmap_page_protect(anon->an_page,
+ pmap_page_protect(anon->u.an_page,
VM_PROT_NONE);
uvm_lock_pageq(); /* KILL loan */
if (uobj)
/* if we were loaning */
- anon->an_page->loan_count--;
- anon->an_page->uanon = NULL;
+ anon->u.an_page->loan_count--;
+ anon->u.an_page->uanon = NULL;
/* in case we owned */
- anon->an_page->pqflags &= ~PQ_ANON;
+ anon->u.an_page->pqflags &= ~PQ_ANON;
uvm_pageactivate(pg);
uvm_unlock_pageq();
if (uobj) {
@@ -1159,7 +1159,7 @@ ReFault:
}
/* install new page in anon */
- anon->an_page = pg;
+ anon->u.an_page = pg;
pg->uanon = anon;
pg->pqflags |= PQ_ANON;
pg->flags &= ~(PG_BUSY|PG_FAKE);
@@ -1213,7 +1213,7 @@ ReFault:
/* got all resources, replace anon with nanon */
- uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */
+ uvm_pagecopy(oanon->u.an_page, pg); /* pg now !PG_CLEAN */
pg->flags &= ~(PG_BUSY|PG_FAKE); /* un-busy! new page */
UVM_PAGE_OWN(pg, NULL);
amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
@@ -1232,7 +1232,7 @@ ReFault:
uvmexp.flt_anon++;
oanon = anon; /* old, locked anon is same as anon */
- pg = anon->an_page;
+ pg = anon->u.an_page;
if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
enter_prot = enter_prot & ~VM_PROT_WRITE;
diff --git a/sys/uvm/uvm_init.c b/sys/uvm/uvm_init.c
index 3dd313b8da1..92f14bcccbc 100644
--- a/sys/uvm/uvm_init.c
+++ b/sys/uvm/uvm_init.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_init.c,v 1.12 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_init.c,v 1.13 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_init.c,v 1.14 2000/06/27 17:29:23 mrg Exp $ */
/*
@@ -134,10 +134,11 @@ uvm_init()
uvm_pager_init();
/*
- * step 8: init anonymous memory system
+ * step 8: init anonymous memory systems (both amap and anons)
*/
amap_init(); /* init amap module */
+ uvm_anon_init(); /* allocate initial anons */
/*
* the VM system is now up! now that malloc is up we can resize the
@@ -152,7 +153,8 @@ uvm_init()
uvm_km_page_init();
/*
- * init anonymous memory systems
+ * done!
*/
- uvm_anon_init();
+
+ return;
}
diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c
index cadc0fdd151..16fa98729ba 100644
--- a/sys/uvm/uvm_loan.c
+++ b/sys/uvm/uvm_loan.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_loan.c,v 1.21 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_loan.c,v 1.22 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_loan.c,v 1.22 2000/06/27 17:29:25 mrg Exp $ */
/*
@@ -334,7 +334,7 @@ uvm_loananon(ufi, output, flags, anon)
*/
if (flags & UVM_LOAN_TOANON) {
simple_lock(&anon->an_lock);
- pg = anon->an_page;
+ pg = anon->u.an_page;
if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1)
/* read protect it */
pmap_page_protect(pg, VM_PROT_READ);
@@ -379,7 +379,7 @@ uvm_loananon(ufi, output, flags, anon)
* we have the page and its owner locked: do the loan now.
*/
- pg = anon->an_page;
+ pg = anon->u.an_page;
uvm_lock_pageq();
if (pg->loan_count == 0)
pmap_page_protect(pg, VM_PROT_READ);
@@ -584,7 +584,7 @@ uvm_loanuobj(ufi, output, flags, va)
uvmfault_unlockall(ufi, amap, uobj, NULL);
return(-1);
}
- anon->an_page = pg;
+ anon->u.an_page = pg;
pg->uanon = anon;
uvm_lock_pageq();
if (pg->loan_count == 0)
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index a7a5230d9de..bb7bedfda20 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.79 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.80 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -2997,7 +2997,7 @@ uvm_map_clean(map, start, end, flags)
simple_lock(&anon->an_lock);
- pg = anon->an_page;
+ pg = anon->u.an_page;
if (pg == NULL) {
simple_unlock(&anon->an_lock);
continue;
@@ -3811,9 +3811,9 @@ uvm_page_printit(pg, full, pr)
/* cross-verify object/anon */
if ((pg->pqflags & PQ_FREE) == 0) {
if (pg->pqflags & PQ_ANON) {
- if (pg->uanon == NULL || pg->uanon->an_page != pg)
+ if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
(*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
- (pg->uanon) ? pg->uanon->an_page : NULL);
+ (pg->uanon) ? pg->uanon->u.an_page : NULL);
else
(*pr)(" anon backpointer is OK\n");
} else {
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 1728c3b5409..ce094e8f5ea 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_mmap.c,v 1.62 2006/06/29 17:02:16 mickey Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.63 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -316,7 +316,7 @@ sys_mincore(p, v, retval)
anon = amap_lookup(&entry->aref,
start - entry->start);
/* Don't need to lock anon here. */
- if (anon != NULL && anon->an_page != NULL) {
+ if (anon != NULL && anon->u.an_page != NULL) {
/*
* Anon has the page for this entry
* offset.
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 974ec929a2b..c38afca8159 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.53 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.54 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -1042,7 +1042,7 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
pg->version++;
if (anon) {
- anon->an_page = pg;
+ anon->u.an_page = pg;
pg->pqflags = PQ_ANON;
#ifdef UBC
uvm_pgcnt_anon++;
@@ -1171,7 +1171,7 @@ uvm_pagefree(pg)
if (saved_loan_count)
return;
- } else if (saved_loan_count && pg->uanon) {
+ } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
/*
* if our page is owned by an anon and is loaned out to the
@@ -1182,7 +1182,6 @@ uvm_pagefree(pg)
*/
pg->pqflags &= ~PQ_ANON;
- pg->uanon->an_page = NULL;
pg->uanon = NULL;
return;
}
@@ -1214,12 +1213,11 @@ uvm_pagefree(pg)
pg->wire_count = 0;
uvmexp.wired--;
}
- if (pg->uanon) {
- pg->uanon->an_page = NULL;
#ifdef UBC
+ if (pg->uanon) {
uvm_pgcnt_anon--;
-#endif
}
+#endif
/*
* and put on free queue
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 3eeb36796a9..1f6b60edd13 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.28 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.29 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -544,7 +544,7 @@ uvmpd_scan_inactive(pglst)
KASSERT(anon->an_swslot != 0);
/* remove from object */
- anon->an_page = NULL;
+ anon->u.an_page = NULL;
simple_unlock(&anon->an_lock);
} else {
/* pagefree has already removed the
@@ -857,7 +857,7 @@ uvmpd_scan_inactive(pglst)
if (p->flags & PG_RELEASED) {
if (anon) {
/* remove page so we can get nextpg */
- anon->an_page = NULL;
+ anon->u.an_page = NULL;
simple_unlock(&anon->an_lock);
uvm_anfree(anon); /* kills anon */
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index dbb1572e18b..71b1a7b9481 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_swap.c,v 1.62 2006/06/21 16:20:05 mickey Exp $ */
+/* $OpenBSD: uvm_swap.c,v 1.63 2006/07/13 22:51:26 deraadt Exp $ */
/* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */
/*
@@ -1061,6 +1061,11 @@ swap_on(p, sdp)
*/
vref(vp);
+ /*
+ * add anons to reflect the new swap space
+ */
+ uvm_anon_add(size);
+
#ifdef UVM_SWAP_ENCRYPT
if (uvm_doswapencrypt)
uvm_swap_initcrypt(sdp, npages);
@@ -1096,7 +1101,6 @@ swap_off(p, sdp)
struct proc *p;
struct swapdev *sdp;
{
- int error;
UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
UVMHIST_LOG(pdhist, " dev=%x", sdp->swd_dev,0,0,0);
@@ -1113,20 +1117,15 @@ swap_off(p, sdp)
if (uao_swap_off(sdp->swd_drumoffset,
sdp->swd_drumoffset + sdp->swd_drumsize) ||
- amap_swap_off(sdp->swd_drumoffset,
+ anon_swap_off(sdp->swd_drumoffset,
sdp->swd_drumoffset + sdp->swd_drumsize)) {
- error = ENOMEM;
- } else if (sdp->swd_npginuse > sdp->swd_npgbad) {
- error = EBUSY;
- }
-
- if (error) {
simple_lock(&uvm.swap_data_lock);
sdp->swd_flags |= SWF_ENABLE;
simple_unlock(&uvm.swap_data_lock);
- return (error);
+ return ENOMEM;
}
+ KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
/*
* done with the vnode and saved creds.
@@ -1141,6 +1140,9 @@ swap_off(p, sdp)
(void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
}
+ /* remove anons from the system */
+ uvm_anon_remove(sdp->swd_npages);
+
simple_lock(&uvm.swap_data_lock);
uvmexp.swpages -= sdp->swd_npages;