summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-11-11 01:16:57 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-11-11 01:16:57 +0000
commit31841b464a8e7abd8403d541b0496303b288f9e8 (patch)
treec5fbafd0a9c0cda0c556bc03fe98bdcd6d087e5c /sys/uvm
parentaffcd7b9c20647deae01d4bd097aec4b5ec81a96 (diff)
Sync in more stuff from NetBSD.
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_amap.c29
-rw-r--r--sys/uvm/uvm_amap.h15
-rw-r--r--sys/uvm/uvm_anon.c47
-rw-r--r--sys/uvm/uvm_anon.h6
-rw-r--r--sys/uvm/uvm_aobj.c45
-rw-r--r--sys/uvm/uvm_km.c22
-rw-r--r--sys/uvm/uvm_loan.c19
7 files changed, 90 insertions, 93 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index b0e1e040488..42350cf9e0e 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap.c,v 1.14 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.15 2001/11/11 01:16:56 art Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.30 2001/02/18 21:19:09 chs Exp $ */
/*
*
@@ -254,10 +254,8 @@ amap_free(amap)
{
UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
-#ifdef DIAGNOSTIC
- if (amap->am_ref || amap->am_nused)
- panic("amap_free");
-#endif
+ KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
+ LOCK_ASSERT(simple_lock_held(&amap->am_l));
free(amap->am_slots, M_UVMAMAP);
free(amap->am_bckptr, M_UVMAMAP);
@@ -381,11 +379,7 @@ amap_extend(entry, addsize)
newover = malloc(slotneed * sizeof(struct vm_anon *),
M_UVMAMAP, M_WAITOK);
amap_lock(amap); /* re-lock! */
-
-#ifdef DIAGNOSTIC
- if (amap->am_maxslot >= slotneed)
- panic("amap_extend: amap changed during malloc");
-#endif
+ KASSERT(amap->am_maxslot < slotneed);
/*
* now copy everything over to new malloc'd areas...
@@ -464,6 +458,8 @@ amap_share_protect(entry, prot)
struct vm_amap *amap = entry->aref.ar_amap;
int slots, lcv, slot, stop;
+ LOCK_ASSERT(simple_lock_held(&amap->am_l));
+
AMAP_B2SLOT(slots, (entry->end - entry->start));
stop = entry->aref.ar_pageoff + slots;
@@ -507,6 +503,8 @@ amap_wipeout(amap)
UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);
+ LOCK_ASSERT(simple_lock_held(&amap->am_l));
+
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
int refs;
@@ -793,9 +791,10 @@ ReStart:
* ok, time to do a copy-on-write to a new anon
*/
nanon = uvm_analloc();
- if (nanon)
+ if (nanon) {
+ /* nanon is locked! */
npg = uvm_pagealloc(NULL, 0, nanon, 0);
- else
+ } else
npg = NULL; /* XXX: quiet gcc warning */
if (nanon == NULL || npg == NULL) {
@@ -805,7 +804,8 @@ ReStart:
* we can't ...
*/
if (nanon) {
- simple_lock(&nanon->an_lock);
+ nanon->an_ref--;
+ simple_unlock(&nanon->an_lock);
uvm_anfree(nanon);
}
simple_unlock(&anon->an_lock);
@@ -832,6 +832,7 @@ ReStart:
uvm_lock_pageq();
uvm_pageactivate(npg);
uvm_unlock_pageq();
+ simple_unlock(&nanon->an_lock);
}
simple_unlock(&anon->an_lock);
diff --git a/sys/uvm/uvm_amap.h b/sys/uvm/uvm_amap.h
index 2aff5399dcf..4bdf119d927 100644
--- a/sys/uvm/uvm_amap.h
+++ b/sys/uvm/uvm_amap.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap.h,v 1.7 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_amap.h,v 1.13 2000/11/25 06:27:59 chs Exp $ */
+/* $OpenBSD: uvm_amap.h,v 1.8 2001/11/11 01:16:56 art Exp $ */
+/* $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $ */
/*
*
@@ -248,15 +248,10 @@ struct vm_amap {
*/
/* AMAP_B2SLOT: convert byte offset to slot */
-#ifdef DIAGNOSTIC
-#define AMAP_B2SLOT(S,B) { \
- if ((B) & (PAGE_SIZE - 1)) \
- panic("AMAP_B2SLOT: invalid byte count"); \
- (S) = (B) >> PAGE_SHIFT; \
+#define AMAP_B2SLOT(S,B) { \
+ KASSERT(((B) & (PAGE_SIZE - 1)) == 0); \
+ (S) = (B) >> PAGE_SHIFT; \
}
-#else
-#define AMAP_B2SLOT(S,B) (S) = (B) >> PAGE_SHIFT
-#endif
/*
* lock/unlock/refs/flags macros
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index c474db7ffef..347867e47b8 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_anon.c,v 1.14 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.15 2001/11/11 01:16:56 art Exp $ */
+/* $NetBSD: uvm_anon.c,v 1.15 2001/02/18 21:19:08 chs Exp $ */
/*
*
@@ -87,7 +87,7 @@ uvm_anon_init()
*
* => swap_syscall_lock should be held (protects anonblock_list).
*/
-void
+int
uvm_anon_add(count)
int count;
{
@@ -101,17 +101,16 @@ uvm_anon_add(count)
simple_unlock(&uvm.afreelock);
if (needed <= 0) {
- return;
+ return 0;
}
-
- MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
-
- /* XXX Should wait for VM to free up. */
- if (anonblock == NULL || anon == NULL) {
- printf("uvm_anon_add: can not allocate %d anons\n", needed);
- panic("uvm_anon_add");
+ if (anon == NULL) {
+ simple_lock(&uvm.afreelock);
+ uvmexp.nanonneeded -= count;
+ simple_unlock(&uvm.afreelock);
+ return ENOMEM;
}
+ MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
anonblock->count = needed;
anonblock->anons = anon;
@@ -128,6 +127,7 @@ uvm_anon_add(count)
simple_lock_init(&uvm.afree->an_lock);
}
simple_unlock(&uvm.afreelock);
+ return 0;
}
/*
@@ -149,6 +149,8 @@ uvm_anon_remove(count)
/*
* allocate an anon
+ *
+ * => new anon is returned locked!
*/
struct vm_anon *
uvm_analloc()
@@ -163,6 +165,8 @@ uvm_analloc()
a->an_ref = 1;
a->an_swslot = 0;
a->u.an_page = NULL; /* so we can free quickly */
+ LOCK_ASSERT(simple_lock_held(&a->an_lock) == 0);
+ simple_lock(&a->an_lock);
}
simple_unlock(&uvm.afreelock);
return(a);
@@ -184,6 +188,9 @@ uvm_anfree(anon)
UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
+ KASSERT(anon->an_ref == 0);
+ LOCK_ASSERT(simple_lock_held(&anon->an_lock) == 0);
+
/*
* get page
*/
@@ -273,9 +280,9 @@ uvm_anon_dropswap(anon)
struct vm_anon *anon;
{
UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
- if (anon->an_swslot == 0) {
+
+ if (anon->an_swslot == 0)
return;
- }
UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
anon, anon->an_swslot, 0, 0);
@@ -314,6 +321,8 @@ uvm_anon_lockloanpg(anon)
struct vm_page *pg;
boolean_t locked = FALSE;
+ LOCK_ASSERT(simple_lock_held(&anon->an_lock));
+
/*
* loop while we have a resident page that has a non-zero loan count.
* if we successfully get our lock, we will "break" the loop.
@@ -468,7 +477,10 @@ anon_pagein(anon)
int rv;
/* locked: anon */
+ LOCK_ASSERT(simple_lock_held(&anon->an_lock));
+
rv = uvmfault_anonget(NULL, NULL, anon);
+
/*
* if rv == VM_PAGER_OK, anon is still locked, else anon
* is unlocked
@@ -488,13 +500,6 @@ anon_pagein(anon)
*/
return FALSE;
-
- default:
-#ifdef DIAGNOSTIC
- panic("anon_pagein: uvmfault_anonget -> %d", rv);
-#else
- return FALSE;
-#endif
}
/*
@@ -513,7 +518,9 @@ anon_pagein(anon)
*/
pmap_clear_reference(pg);
+#ifndef UBC
pmap_page_protect(pg, VM_PROT_NONE);
+#endif
uvm_lock_pageq();
uvm_pagedeactivate(pg);
uvm_unlock_pageq();
diff --git a/sys/uvm/uvm_anon.h b/sys/uvm/uvm_anon.h
index c7a743f2d07..77173d8014a 100644
--- a/sys/uvm/uvm_anon.h
+++ b/sys/uvm/uvm_anon.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_anon.h,v 1.7 2001/07/26 19:37:13 art Exp $ */
-/* $NetBSD: uvm_anon.h,v 1.12 2000/01/11 06:57:49 chs Exp $ */
+/* $OpenBSD: uvm_anon.h,v 1.8 2001/11/11 01:16:56 art Exp $ */
+/* $NetBSD: uvm_anon.h,v 1.13 2000/12/27 09:17:04 chs Exp $ */
/*
*
@@ -102,7 +102,7 @@ struct vm_aref {
struct vm_anon *uvm_analloc __P((void));
void uvm_anfree __P((struct vm_anon *));
void uvm_anon_init __P((void));
-void uvm_anon_add __P((int));
+int uvm_anon_add __P((int));
void uvm_anon_remove __P((int));
struct vm_page *uvm_anon_lockloanpg __P((struct vm_anon *));
void uvm_anon_dropswap __P((struct vm_anon *));
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index d13e8cf9c2b..85ce0a495f6 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_aobj.c,v 1.19 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_aobj.c,v 1.37 2000/11/25 06:27:59 chs Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.20 2001/11/11 01:16:56 art Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -335,18 +335,17 @@ uao_set_swslot(uobj, pageidx, slot)
*/
if (UAO_USES_SWHASH(aobj)) {
+
/*
* Avoid allocating an entry just to free it again if
* the page had not swap slot in the first place, and
* we are freeing.
*/
+
struct uao_swhash_elt *elt =
uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
if (elt == NULL) {
-#ifdef DIAGNOSTIC
- if (slot)
- panic("uao_set_swslot: didn't create elt");
-#endif
+ KASSERT(slot == 0);
return (0);
}
@@ -879,10 +878,15 @@ uao_flush(uobj, start, stop, flags)
pp->wire_count != 0)
continue;
+#ifdef UBC
+ /* ...and deactivate the page. */
+ pmap_clear_reference(pp);
+#else
/* zap all mappings for the page. */
pmap_page_protect(pp, VM_PROT_NONE);
/* ...and deactivate the page. */
+#endif
uvm_pagedeactivate(pp);
continue;
@@ -919,9 +923,6 @@ uao_flush(uobj, start, stop, flags)
default:
panic("uao_flush: weird flags");
}
-#ifdef DIAGNOSTIC
- panic("uao_flush: unreachable code");
-#endif
}
uvm_unlock_pageq();
@@ -1260,10 +1261,7 @@ uao_releasepg(pg, nextpgp)
{
struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
-#ifdef DIAGNOSTIC
- if ((pg->flags & PG_RELEASED) == 0)
- panic("uao_releasepg: page not released!");
-#endif
+ KASSERT(pg->flags & PG_RELEASED);
/*
* dispose of the page [caller handles PG_WANTED] and swap slot.
@@ -1290,10 +1288,7 @@ uao_releasepg(pg, nextpgp)
if (aobj->u_obj.uo_npages != 0)
return TRUE;
-#ifdef DIAGNOSTIC
- if (TAILQ_FIRST(&aobj->u_obj.memq))
- panic("uvn_releasepg: pages in object with npages == 0");
-#endif
+ KASSERT(TAILQ_EMPTY(&aobj->u_obj.memq));
/*
* finally, free the rest.
@@ -1512,20 +1507,8 @@ uao_pagein_page(aobj, pageidx)
*/
return FALSE;
-#ifdef DIAGNOSTIC
- default:
- panic("uao_pagein_page: uao_get -> %d\n", rv);
-#endif
}
-
-#ifdef DIAGNOSTIC
- /*
- * this should never happen, since we have a reference on the aobj.
- */
- if (pg->flags & PG_RELEASED) {
- panic("uao_pagein_page: found PG_RELEASED page?\n");
- }
-#endif
+ KASSERT((pg->flags & PG_RELEASED) == 0);
/*
* ok, we've got the page now.
@@ -1540,7 +1523,9 @@ uao_pagein_page(aobj, pageidx)
* deactivate the page (to put it on a page queue).
*/
pmap_clear_reference(pg);
+#ifndef UBC
pmap_page_protect(pg, VM_PROT_NONE);
+#endif
uvm_lock_pageq();
uvm_pagedeactivate(pg);
uvm_unlock_pageq();
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index c9f9609104f..b15ba7cdbef 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.21 2001/11/09 03:32:23 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.41 2000/11/27 04:36:40 nisimura Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.22 2001/11/11 01:16:56 art Exp $ */
+/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -92,9 +92,9 @@
* the vm system has several standard kernel submaps, including:
* kmem_map => contains only wired kernel memory for the kernel
* malloc. *** access to kmem_map must be protected
- * by splimp() because we are allowed to call malloc()
+ * by splvm() because we are allowed to call malloc()
* at interrupt time ***
- * mb_map => memory for large mbufs, *** protected by splimp ***
+ * mb_map => memory for large mbufs, *** protected by splvm ***
* pager_map => used to map "buf" structures into kernel space
* exec_map => used during exec to handle exec args
* etc...
@@ -108,7 +108,7 @@
*
* most kernel private memory lives in kernel_object. the only exception
* to this is for memory that belongs to submaps that must be protected
- * by splimp(). each of these submaps has their own private kernel
+ * by splvm(). each of these submaps has their own private kernel
* object (e.g. kmem_object, mb_object).
*
* note that just because a kernel object spans the entire kernel virutal
@@ -864,16 +864,16 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
int s;
/*
- * NOTE: We may be called with a map that doens't require splimp
+ * NOTE: We may be called with a map that doens't require splvm
* protection (e.g. kernel_map). However, it does not hurt to
- * go to splimp in this case (since unprocted maps will never be
+ * go to splvm in this case (since unprocted maps will never be
* accessed in interrupt context).
*
* XXX We may want to consider changing the interface to this
* XXX function.
*/
- s = splimp();
+ s = splvm();
va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
splx(s);
return (va);
@@ -901,16 +901,16 @@ uvm_km_free_poolpage1(map, addr)
int s;
/*
- * NOTE: We may be called with a map that doens't require splimp
+ * NOTE: We may be called with a map that doens't require splvm
* protection (e.g. kernel_map). However, it does not hurt to
- * go to splimp in this case (since unprocted maps will never be
+ * go to splvm in this case (since unprocted maps will never be
* accessed in interrupt context).
*
* XXX We may want to consider changing the interface to this
* XXX function.
*/
- s = splimp();
+ s = splvm();
uvm_km_free(map, addr, PAGE_SIZE);
splx(s);
#endif /* PMAP_UNMAP_POOLPAGE */
diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c
index 5c20fbe2f60..481c6bec810 100644
--- a/sys/uvm/uvm_loan.c
+++ b/sys/uvm/uvm_loan.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_loan.c,v 1.11 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_loan.c,v 1.22 2000/06/27 17:29:25 mrg Exp $ */
+/* $OpenBSD: uvm_loan.c,v 1.12 2001/11/11 01:16:56 art Exp $ */
+/* $NetBSD: uvm_loan.c,v 1.23 2001/01/23 02:27:39 thorpej Exp $ */
/*
*
@@ -579,6 +579,7 @@ uvm_loanuobj(ufi, output, flags, va)
uvmfault_unlockall(ufi, amap, uobj, NULL);
return(-1);
}
+ /* anon is locked! */
anon->u.an_page = pg;
pg->uanon = anon;
uvm_lock_pageq();
@@ -593,6 +594,7 @@ uvm_loanuobj(ufi, output, flags, va)
wakeup(pg);
pg->flags &= ~(PG_WANTED|PG_BUSY);
UVM_PAGE_OWN(pg, NULL);
+ simple_unlock(&anon->an_lock);
return(1);
}
@@ -648,15 +650,22 @@ uvm_loanzero(ufi, output, flags)
/* loaning to an anon */
while ((anon = uvm_analloc()) == NULL ||
(pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) {
-
+
/* unlock everything */
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
- ufi->entry->object.uvm_obj, NULL);
-
+ ufi->entry->object.uvm_obj, anon);
+
/* out of swap causes us to fail */
if (anon == NULL)
return(-1);
+ /*
+ * drop our reference; we're the only one,
+ * so it's okay that the anon isn't locked
+ * here.
+ */
+ anon->an_ref--;
+
uvm_anfree(anon);
uvm_wait("loanzero2"); /* wait for pagedaemon */