summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-11-07 02:55:52 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-11-07 02:55:52 +0000
commitcc211db56384036a7e588d635b51f11a921dd54e (patch)
tree901408843f90b295b4287da701ff27afbb5c876c /sys/uvm
parent253f31ab10d5b90c1d64de9c7dec0385fa0c9f7c (diff)
Another sync of uvm to NetBSD. Just minor fiddling, no major changes.
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_amap.c34
-rw-r--r--sys/uvm/uvm_amap.h10
-rw-r--r--sys/uvm/uvm_amap_i.h43
-rw-r--r--sys/uvm/uvm_anon.c28
-rw-r--r--sys/uvm/uvm_aobj.c48
-rw-r--r--sys/uvm/uvm_ddb.h21
-rw-r--r--sys/uvm/uvm_device.c114
-rw-r--r--sys/uvm/uvm_extern.h36
-rw-r--r--sys/uvm/uvm_glue.c54
-rw-r--r--sys/uvm/uvm_km.c92
-rw-r--r--sys/uvm/uvm_map.c380
-rw-r--r--sys/uvm/uvm_map.h17
-rw-r--r--sys/uvm/uvm_meter.c13
-rw-r--r--sys/uvm/uvm_mmap.c6
-rw-r--r--sys/uvm/uvm_page.c31
-rw-r--r--sys/uvm/uvm_page.h7
-rw-r--r--sys/uvm/uvm_pager.c5
-rw-r--r--sys/uvm/uvm_pager.h16
-rw-r--r--sys/uvm/uvm_pager_i.h14
-rw-r--r--sys/uvm/uvm_pglist.c43
-rw-r--r--sys/uvm/uvm_pglist.h4
-rw-r--r--sys/uvm/uvm_stat.c56
-rw-r--r--sys/uvm/uvm_swap.c30
-rw-r--r--sys/uvm/uvm_swap.h4
-rw-r--r--sys/uvm/uvm_unix.c10
-rw-r--r--sys/uvm/uvm_vnode.c42
26 files changed, 478 insertions, 680 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 800a0dec20c..b0e1e040488 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap.c,v 1.13 2001/11/06 13:36:52 art Exp $ */
-/* $NetBSD: uvm_amap.c,v 1.26 2000/08/03 00:47:02 thorpej Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.14 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
*
@@ -48,6 +48,7 @@
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/kernel.h>
#include <sys/pool.h>
#define UVM_AMAP_C /* ensure disabled inlines are in */
@@ -321,7 +322,7 @@ amap_extend(entry, addsize)
if (amap->am_nslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- amap_pp_adjref(amap, slotoff + slotmapped, addsize, 1);
+ amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1);
}
#endif
amap_unlock(amap);
@@ -339,8 +340,8 @@ amap_extend(entry, addsize)
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if ((slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
- (amap->am_nslot - (slotoff + slotmapped)) <<
- PAGE_SHIFT, 1);
+ (amap->am_nslot - (slotoff + slotmapped)),
+ 1);
pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
}
@@ -418,8 +419,7 @@ amap_extend(entry, addsize)
amap->am_ppref = newppref;
if ((slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
- (amap->am_nslot - (slotoff + slotmapped)) <<
- PAGE_SHIFT, 1);
+ (amap->am_nslot - (slotoff + slotmapped)), 1);
pp_setreflen(newppref, amap->am_nslot, 1, slotadded);
}
#endif
@@ -567,7 +567,8 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
int slots, lcv;
vaddr_t chunksize;
UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist, " (map=%p, entry=%p, waitf=%d)", map, entry, waitf, 0);
+ UVMHIST_LOG(maphist, " (map=%p, entry=%p, waitf=%d)",
+ map, entry, waitf, 0);
/*
* is there a map to copy? if not, create one from scratch.
@@ -685,7 +686,7 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
#ifdef UVM_AMAP_PPREF
if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
- entry->end - entry->start, -1);
+ (entry->end - entry->start) >> PAGE_SHIFT, -1);
}
#endif
@@ -803,8 +804,10 @@ ReStart:
* XXXCDC: we should cause fork to fail, but
* we can't ...
*/
- if (nanon)
+ if (nanon) {
+ simple_lock(&nanon->an_lock);
uvm_anfree(nanon);
+ }
simple_unlock(&anon->an_lock);
amap_unlock(amap);
uvm_wait("cownowpage");
@@ -854,7 +857,6 @@ amap_splitref(origref, splitref, offset)
vaddr_t offset;
{
int leftslots;
- UVMHIST_FUNC("amap_splitref"); UVMHIST_CALLED(maphist);
AMAP_B2SLOT(leftslots, offset);
if (leftslots == 0)
@@ -926,21 +928,20 @@ amap_pp_establish(amap)
* => caller must check that ppref != PPREF_NONE before calling
*/
void
-amap_pp_adjref(amap, curslot, bytelen, adjval)
+amap_pp_adjref(amap, curslot, slotlen, adjval)
struct vm_amap *amap;
int curslot;
- vsize_t bytelen;
+ vsize_t slotlen;
int adjval;
{
- int slots, stopslot, *ppref, lcv;
+ int stopslot, *ppref, lcv;
int ref, len;
/*
* get init values
*/
- AMAP_B2SLOT(slots, bytelen);
- stopslot = curslot + slots;
+ stopslot = curslot + slotlen;
ppref = amap->am_ppref;
/*
@@ -995,7 +996,6 @@ amap_wiperange(amap, slotoff, slots)
{
int byanon, lcv, stop, curslot, ptr;
struct vm_anon *anon;
- UVMHIST_FUNC("amap_wiperange"); UVMHIST_CALLED(maphist);
/*
* we can either traverse the amap by am_anon or by am_slots depending
diff --git a/sys/uvm/uvm_amap.h b/sys/uvm/uvm_amap.h
index 22eecd6f120..2aff5399dcf 100644
--- a/sys/uvm/uvm_amap.h
+++ b/sys/uvm/uvm_amap.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap.h,v 1.6 2001/05/10 14:51:21 art Exp $ */
-/* $NetBSD: uvm_amap.h,v 1.12 1999/07/07 05:31:40 thorpej Exp $ */
+/* $OpenBSD: uvm_amap.h,v 1.7 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_amap.h,v 1.13 2000/11/25 06:27:59 chs Exp $ */
/*
*
@@ -84,7 +84,7 @@ struct vm_amap;
AMAP_INLINE
void amap_add /* add an anon to an amap */
__P((struct vm_aref *, vaddr_t,
- struct vm_anon *, int));
+ struct vm_anon *, boolean_t));
struct vm_amap *amap_alloc /* allocate a new amap */
__P((vaddr_t, vaddr_t, int));
void amap_copy /* clear amap needs-copy flag */
@@ -111,7 +111,7 @@ void amap_lookups /* lookup multiple anons */
struct vm_anon **, int));
AMAP_INLINE
void amap_ref /* add a reference to an amap */
- __P((vm_map_entry_t, int));
+ __P((struct vm_amap *, vaddr_t, vsize_t, int));
int amap_refs /* get number of references of amap */
__P((struct vm_amap *));
void amap_share_protect /* protect pages in a shared amap */
@@ -126,7 +126,7 @@ void amap_unlock /* unlock amap */
__P((struct vm_amap *));
AMAP_INLINE
void amap_unref /* drop reference to an amap */
- __P((vm_map_entry_t, int));
+ __P((struct vm_amap *, vaddr_t, vsize_t, int));
void amap_wipeout /* remove all anons from amap */
__P((struct vm_amap *));
diff --git a/sys/uvm/uvm_amap_i.h b/sys/uvm/uvm_amap_i.h
index 8b6d76848ec..adcdc76072b 100644
--- a/sys/uvm/uvm_amap_i.h
+++ b/sys/uvm/uvm_amap_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap_i.h,v 1.9 2001/07/18 10:47:05 art Exp $ */
-/* $NetBSD: uvm_amap_i.h,v 1.14 1999/09/12 01:17:34 chs Exp $ */
+/* $OpenBSD: uvm_amap_i.h,v 1.10 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_amap_i.h,v 1.15 2000/11/25 06:27:59 chs Exp $ */
/*
*
@@ -119,7 +119,7 @@ amap_add(aref, offset, anon, replace)
struct vm_aref *aref;
vaddr_t offset;
struct vm_anon *anon;
- int replace;
+ boolean_t replace;
{
int slot;
struct vm_amap *amap = aref->ar_amap;
@@ -195,14 +195,16 @@ amap_unadd(aref, offset)
* amap_ref: gain a reference to an amap
*
* => amap must not be locked (we will lock)
+ * => "offset" and "len" are in units of pages
* => called at fork time to gain the child's reference
*/
AMAP_INLINE void
-amap_ref(entry, flags)
- vm_map_entry_t entry;
+amap_ref(amap, offset, len, flags)
+ struct vm_amap *amap;
+ vaddr_t offset;
+ vsize_t len;
int flags;
{
- struct vm_amap *amap = entry->aref.ar_amap;
UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
amap_lock(amap);
@@ -211,14 +213,13 @@ amap_ref(entry, flags)
amap->am_flags |= AMAP_SHARED;
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
- (entry->start - entry->end) >> PAGE_SHIFT != amap->am_nslot)
+ len != amap->am_nslot)
amap_pp_establish(amap);
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if (flags & AMAP_REFALL)
- amap_pp_adjref(amap, 0, amap->am_nslot << PAGE_SHIFT, 1);
+ amap_pp_adjref(amap, 0, amap->am_nslot, 1);
else
- amap_pp_adjref(amap, entry->aref.ar_pageoff,
- entry->end - entry->start, 1);
+ amap_pp_adjref(amap, offset, len, 1);
}
#endif
amap_unlock(amap);
@@ -235,20 +236,20 @@ amap_ref(entry, flags)
* => amap must be unlocked (we will lock it).
*/
AMAP_INLINE void
-amap_unref(entry, all)
- vm_map_entry_t entry;
- int all;
+amap_unref(amap, offset, len, all)
+ struct vm_amap *amap;
+ vaddr_t offset;
+ vsize_t len;
+ boolean_t all;
{
- struct vm_amap *amap = entry->aref.ar_amap;
UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
/*
* lock it
*/
amap_lock(amap);
-
- UVMHIST_LOG(maphist,"(entry=0x%x) amap=0x%x refs=%d, nused=%d",
- entry, amap, amap->am_ref, amap->am_nused);
+ UVMHIST_LOG(maphist," amap=0x%x refs=%d, nused=%d",
+ amap, amap->am_ref, amap->am_nused, 0);
/*
* if we are the last reference, free the amap and return.
@@ -268,15 +269,13 @@ amap_unref(entry, all)
if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
#ifdef UVM_AMAP_PPREF
- if (amap->am_ppref == NULL && all == 0 &&
- (entry->start - entry->end) >> PAGE_SHIFT != amap->am_nslot)
+ if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot)
amap_pp_establish(amap);
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if (all)
- amap_pp_adjref(amap, 0, amap->am_nslot << PAGE_SHIFT, -1);
+ amap_pp_adjref(amap, 0, amap->am_nslot, -1);
else
- amap_pp_adjref(amap, entry->aref.ar_pageoff,
- entry->end - entry->start, -1);
+ amap_pp_adjref(amap, offset, len, -1);
}
#endif
amap_unlock(amap);
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index 84ddfaf8235..c474db7ffef 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_anon.c,v 1.13 2001/11/06 13:36:52 art Exp $ */
-/* $NetBSD: uvm_anon.c,v 1.9 2000/08/06 00:21:57 thorpej Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.14 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
*
@@ -212,19 +212,12 @@ uvm_anfree(anon)
*/
if (pg->uobject) {
-
- /* kill loan */
uvm_lock_pageq();
-#ifdef DIAGNOSTIC
- if (pg->loan_count < 1)
- panic("uvm_anfree: obj owned page "
- "with no loan count");
-#endif
+ KASSERT(pg->loan_count > 0);
pg->loan_count--;
pg->uanon = NULL;
uvm_unlock_pageq();
simple_unlock(&pg->uobject->vmobjlock);
-
} else {
/*
@@ -244,13 +237,11 @@ uvm_anfree(anon)
anon, pg, 0, 0);
return;
}
-
pmap_page_protect(pg, VM_PROT_NONE);
uvm_lock_pageq(); /* lock out pagedaemon */
uvm_pagefree(pg); /* bye bye */
uvm_unlock_pageq(); /* free the daemon */
-
- UVMHIST_LOG(maphist," anon 0x%x, page 0x%x: freed now!",
+ UVMHIST_LOG(maphist,"anon 0x%x, page 0x%x: freed now!",
anon, pg, 0, 0);
}
}
@@ -362,12 +353,14 @@ uvm_anon_lockloanpg(anon)
if (!locked) {
simple_unlock(&anon->an_lock);
+
/*
* someone locking the object has a chance to
* lock us right now
*/
+
simple_lock(&anon->an_lock);
- continue; /* start over */
+ continue;
}
}
@@ -386,13 +379,9 @@ uvm_anon_lockloanpg(anon)
/*
* we did it! break the loop
*/
+
break;
}
-
- /*
- * done!
- */
-
return(pg);
}
@@ -477,7 +466,6 @@ anon_pagein(anon)
struct vm_page *pg;
struct uvm_object *uobj;
int rv;
- UVMHIST_FUNC("anon_pagein"); UVMHIST_CALLED(pdhist);
/* locked: anon */
rv = uvmfault_anonget(NULL, NULL, anon);
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index bae91b851e3..d13e8cf9c2b 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_aobj.c,v 1.18 2001/11/06 13:36:52 art Exp $ */
-/* $NetBSD: uvm_aobj.c,v 1.34 2000/08/02 20:23:23 thorpej Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.19 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.37 2000/11/25 06:27:59 chs Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -47,6 +47,7 @@
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/kernel.h>
#include <sys/pool.h>
#include <sys/kernel.h>
@@ -180,8 +181,6 @@ static boolean_t uao_releasepg __P((struct vm_page *,
static boolean_t uao_pagein __P((struct uvm_aobj *, int, int));
static boolean_t uao_pagein_page __P((struct uvm_aobj *, int));
-
-
/*
* aobj_pager
*
@@ -195,11 +194,9 @@ struct uvm_pagerops aobj_pager = {
NULL, /* fault */
uao_flush, /* flush */
uao_get, /* get */
- NULL, /* asyncget */
NULL, /* put (done by pagedaemon) */
NULL, /* cluster */
NULL, /* mk_pcluster */
- NULL, /* aiodone */
uao_releasepg /* releasepg */
};
@@ -242,7 +239,7 @@ uao_find_swhash_elt(aobj, pageidx, create)
/*
* now search the bucket for the requested tag
*/
- for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
+ LIST_FOREACH(elt, swhash, list) {
if (elt->tag == page_tag)
return(elt);
}
@@ -374,7 +371,6 @@ uao_set_swslot(uobj, pageidx, slot)
pool_put(&uao_swhash_elt_pool, elt);
}
}
-
} else {
/* we are using an array */
oldslot = aobj->u_swslots[pageidx];
@@ -417,17 +413,18 @@ uao_free(aobj)
for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
int slot = elt->slots[j];
- if (slot) {
- uvm_swap_free(slot, 1);
-
- /*
- * this page is no longer
- * only in swap.
- */
- simple_lock(&uvm.swap_data_lock);
- uvmexp.swpgonly--;
- simple_unlock(&uvm.swap_data_lock);
+ if (slot == 0) {
+ continue;
}
+ uvm_swap_free(slot, 1);
+
+ /*
+ * this page is no longer
+ * only in swap.
+ */
+ simple_lock(&uvm.swap_data_lock);
+ uvmexp.swpgonly--;
+ simple_unlock(&uvm.swap_data_lock);
}
next = LIST_NEXT(elt, list);
@@ -851,7 +848,7 @@ uao_flush(uobj, start, stop, flags)
for ( ; (by_list && pp != NULL) ||
(!by_list && curoff < stop) ; pp = ppnext) {
if (by_list) {
- ppnext = pp->listq.tqe_next;
+ ppnext = TAILQ_NEXT(pp, listq);
/* range check */
if (pp->offset < start || pp->offset >= stop)
@@ -971,7 +968,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
aobj, offset, flags,0);
-
+
/*
* get number of pages
*/
@@ -1250,7 +1247,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
* => returns TRUE if page's object is still alive, FALSE if we
* killed the page's object. if we return TRUE, then we
* return with the object locked.
- * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
+ * => if (nextpgp != NULL) => we return the next page on the queue, and return
* with the page queues locked [for pagedaemon]
* => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
* => we kill the aobj if it is not referenced and we are suppose to
@@ -1275,7 +1272,7 @@ uao_releasepg(pg, nextpgp)
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
uvm_lock_pageq();
if (nextpgp)
- *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
+ *nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
uvm_pagefree(pg);
if (!nextpgp)
uvm_unlock_pageq(); /* keep locked for daemon */
@@ -1285,11 +1282,7 @@ uao_releasepg(pg, nextpgp)
*/
if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
return TRUE;
-
-#ifdef DIAGNOSTIC
- if (aobj->u_obj.uo_refs)
- panic("uvm_km_releasepg: kill flag set on referenced object!");
-#endif
+ KASSERT(aobj->u_obj.uo_refs == 0);
/*
* if there are still pages in the object, we're done for now.
@@ -1493,7 +1486,6 @@ uao_pagein_page(aobj, pageidx)
{
struct vm_page *pg;
int rv, slot, npages;
- UVMHIST_FUNC("uao_pagein_page"); UVMHIST_CALLED(pdhist);
pg = NULL;
npages = 1;
diff --git a/sys/uvm/uvm_ddb.h b/sys/uvm/uvm_ddb.h
index 43d6d7be22a..e80d8cf3db6 100644
--- a/sys/uvm/uvm_ddb.h
+++ b/sys/uvm/uvm_ddb.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_ddb.h,v 1.6 2001/03/09 05:34:38 smart Exp $ */
-/* $NetBSD: uvm_ddb.h,v 1.3 1999/06/21 17:25:11 thorpej Exp $ */
+/* $OpenBSD: uvm_ddb.h,v 1.7 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_ddb.h,v 1.5 2000/11/25 06:27:59 chs Exp $ */
/*
*
@@ -41,16 +41,13 @@
#ifdef _KERNEL
#ifdef DDB
-void uvm_map_print __P((vm_map_t, boolean_t));
-void uvm_map_printit __P((vm_map_t, boolean_t,
- int (*) __P((const char *, ...))));
-
-void uvm_object_print __P((struct uvm_object *, boolean_t));
-void uvm_object_printit __P((struct uvm_object *, boolean_t,
- int (*) __P((const char *, ...))));
-void uvm_page_print __P((struct vm_page *, boolean_t));
-void uvm_page_printit __P((struct vm_page *, boolean_t,
- int (*) __P((const char *, ...))));
+void uvm_map_printit __P((vm_map_t, boolean_t,
+ int (*) __P((const char *, ...))));
+void uvm_object_printit __P((struct uvm_object *, boolean_t,
+ int (*) __P((const char *, ...))));
+void uvm_page_printit __P((struct vm_page *, boolean_t,
+ int (*) __P((const char *, ...))));
+void uvmexp_print(void (*)(const char *, ...));
#endif /* DDB */
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index a2542a7de68..932fdfd5ec3 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_device.c,v 1.16 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_device.c,v 1.28 2000/06/27 17:29:20 mrg Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.17 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
*
@@ -69,12 +69,8 @@ static void udv_detach __P((struct uvm_object *));
static int udv_fault __P((struct uvm_faultinfo *, vaddr_t,
vm_page_t *, int, int, vm_fault_t,
vm_prot_t, int));
-static int udv_asyncget __P((struct uvm_object *, voff_t,
- int));
static boolean_t udv_flush __P((struct uvm_object *, voff_t, voff_t,
int));
-static int udv_put __P((struct uvm_object *, vm_page_t *,
- int, boolean_t));
/*
* master pager structure
@@ -86,13 +82,6 @@ struct uvm_pagerops uvm_deviceops = {
udv_detach,
udv_fault,
udv_flush,
- NULL, /* no get function since we have udv_fault */
- udv_asyncget,
- udv_put,
- NULL, /* no cluster function */
- NULL, /* no put cluster function */
- NULL, /* no AIO-DONE function since no async i/o */
- NULL, /* no releasepg function since no normal pages */
};
/*
@@ -129,7 +118,7 @@ udv_attach(arg, accessprot, off, size)
voff_t off; /* used only for access check */
vsize_t size; /* used only for access check */
{
- dev_t device = *((dev_t *) arg);
+ dev_t device = *((dev_t *)arg);
struct uvm_device *udv, *lcv;
paddr_t (*mapfn) __P((dev_t, off_t, int));
UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
@@ -142,13 +131,14 @@ udv_attach(arg, accessprot, off, size)
mapfn = cdevsw[major(device)].d_mmap;
if (mapfn == NULL ||
- mapfn == (paddr_t (*) __P((dev_t, off_t, int))) enodev ||
- mapfn == (paddr_t (*) __P((dev_t, off_t, int))) nullop)
+ mapfn == (paddr_t (*) __P((dev_t, off_t, int))) enodev ||
+ mapfn == (paddr_t (*) __P((dev_t, off_t, int))) nullop)
return(NULL);
/*
* Negative offsets on the object are not allowed.
*/
+
if (off < 0)
return(NULL);
@@ -170,14 +160,14 @@ udv_attach(arg, accessprot, off, size)
* keep looping until we get it
*/
- while (1) {
+ for (;;) {
/*
* first, attempt to find it on the main list
*/
simple_lock(&udv_lock);
- for (lcv = udv_list.lh_first ; lcv != NULL ; lcv = lcv->u_list.le_next) {
+ LIST_FOREACH(lcv, &udv_list, u_list) {
if (device == lcv->u_device)
break;
}
@@ -211,7 +201,7 @@ udv_attach(arg, accessprot, off, size)
simple_lock(&lcv->u_obj.vmobjlock);
lcv->u_obj.uo_refs++;
simple_unlock(&lcv->u_obj.vmobjlock);
-
+
simple_lock(&udv_lock);
if (lcv->u_flags & UVM_DEVICE_WANTED)
wakeup(lcv);
@@ -226,7 +216,8 @@ udv_attach(arg, accessprot, off, size)
simple_unlock(&udv_lock);
/* NOTE: we could sleep in the following malloc() */
- MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP, M_WAITOK);
+ MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP,
+ M_WAITOK);
simple_lock(&udv_lock);
/*
@@ -234,14 +225,14 @@ udv_attach(arg, accessprot, off, size)
* to the list while we were sleeping...
*/
- for (lcv = udv_list.lh_first ; lcv != NULL ;
- lcv = lcv->u_list.le_next) {
+ LIST_FOREACH(lcv, &udv_list, u_list) {
if (device == lcv->u_device)
break;
}
/*
- * did we lose a race to someone else? free our memory and retry.
+ * did we lose a race to someone else?
+ * free our memory and retry.
*/
if (lcv) {
@@ -257,18 +248,15 @@ udv_attach(arg, accessprot, off, size)
simple_lock_init(&udv->u_obj.vmobjlock);
udv->u_obj.pgops = &uvm_deviceops;
- TAILQ_INIT(&udv->u_obj.memq); /* not used, but be safe */
+ TAILQ_INIT(&udv->u_obj.memq);
udv->u_obj.uo_npages = 0;
udv->u_obj.uo_refs = 1;
udv->u_flags = 0;
udv->u_device = device;
LIST_INSERT_HEAD(&udv_list, udv, u_list);
simple_unlock(&udv_lock);
-
return(&udv->u_obj);
-
- } /* while(1) loop */
-
+ }
/*NOTREACHED*/
}
@@ -291,7 +279,7 @@ udv_reference(uobj)
simple_lock(&uobj->vmobjlock);
uobj->uo_refs++;
UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
- uobj, uobj->uo_refs,0,0);
+ uobj, uobj->uo_refs,0,0);
simple_unlock(&uobj->vmobjlock);
}
@@ -307,37 +295,28 @@ static void
udv_detach(uobj)
struct uvm_object *uobj;
{
- struct uvm_device *udv = (struct uvm_device *) uobj;
+ struct uvm_device *udv = (struct uvm_device *)uobj;
UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
-
/*
* loop until done
*/
again:
simple_lock(&uobj->vmobjlock);
-
if (uobj->uo_refs > 1) {
- uobj->uo_refs--; /* drop ref! */
+ uobj->uo_refs--;
simple_unlock(&uobj->vmobjlock);
UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
uobj,uobj->uo_refs,0,0);
return;
}
-
-#ifdef DIAGNOSTIC
- if (uobj->uo_npages || !TAILQ_EMPTY(&uobj->memq))
- panic("udv_detach: pages in a device object?");
-#endif
-
- /*
- * now lock udv_lock
- */
- simple_lock(&udv_lock);
+ KASSERT(uobj->uo_npages == 0 && TAILQ_EMPTY(&uobj->memq));
/*
* is it being held? if so, wait until others are done.
*/
+
+ simple_lock(&udv_lock);
if (udv->u_flags & UVM_DEVICE_HOLD) {
udv->u_flags |= UVM_DEVICE_WANTED;
simple_unlock(&uobj->vmobjlock);
@@ -348,15 +327,14 @@ again:
/*
* got it! nuke it now.
*/
+
LIST_REMOVE(udv, u_list);
if (udv->u_flags & UVM_DEVICE_WANTED)
wakeup(udv);
simple_unlock(&udv_lock);
simple_unlock(&uobj->vmobjlock);
FREE(udv, M_TEMP);
-
UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
- return;
}
@@ -366,7 +344,8 @@ again:
* flush pages out of a uvm object. a no-op for devices.
*/
-static boolean_t udv_flush(uobj, start, stop, flags)
+static boolean_t
+udv_flush(uobj, start, stop, flags)
struct uvm_object *uobj;
voff_t start, stop;
int flags;
@@ -414,13 +393,6 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
UVMHIST_LOG(maphist," flags=%d", flags,0,0,0);
/*
- * XXX: !PGO_LOCKED calls are currently not allowed (or used)
- */
-
- if ((flags & PGO_LOCKED) == 0)
- panic("udv_fault: !PGO_LOCKED fault");
-
- /*
* we do not allow device mappings to be mapped copy-on-write
* so we kill any attempt to do so here.
*/
@@ -435,6 +407,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
/*
* get device map function.
*/
+
device = udv->u_device;
mapfn = cdevsw[major(device)].d_mmap;
@@ -444,6 +417,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
* for pmap_enter (even if we have a submap). since virtual
* addresses in a submap must match the main map, this is ok.
*/
+
/* udv offset = (offset from start of entry) + entry's offset */
curr_offset = entry->offset + (vaddr - entry->start);
/* pmap va = vaddr (virtual address of pps[0]) */
@@ -494,37 +468,3 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
return (retval);
}
-
-/*
- * udv_asyncget: start async I/O to bring pages into ram
- *
- * => caller must lock object(???XXX: see if this is best)
- * => a no-op for devices
- */
-
-static int
-udv_asyncget(uobj, offset, npages)
- struct uvm_object *uobj;
- voff_t offset;
- int npages;
-{
-
- return(KERN_SUCCESS);
-}
-
-/*
- * udv_put: flush page data to backing store.
- *
- * => this function should never be called (since we never have any
- * page structures to "put")
- */
-
-static int
-udv_put(uobj, pps, npages, flags)
- struct uvm_object *uobj;
- struct vm_page **pps;
- int npages, flags;
-{
-
- panic("udv_put: trying to page out to a device!");
-}
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 3f6eaa5aeb6..3ea0d031190 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.29 2001/11/07 01:18:01 art Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.49 2000/09/13 15:00:25 thorpej Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.30 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.51 2000/09/28 19:05:06 eeh Exp $ */
/*
*
@@ -307,6 +307,8 @@ struct uvmexp {
was available */
int pga_zeromiss; /* pagealloc where zero wanted and zero
not available */
+ int zeroaborts; /* number of times page zeroing was
+ aborted */
/* fault subcounters */
int fltnoram; /* number of times fault was out of ram */
@@ -383,6 +385,7 @@ struct vmspace {
caddr_t vm_taddr; /* user virtual address of text XXX */
caddr_t vm_daddr; /* user virtual address of data XXX */
caddr_t vm_maxsaddr; /* user VA at max stack growth */
+ caddr_t vm_minsaddr; /* user VA at top of stack */
};
#ifdef _KERNEL
@@ -426,6 +429,19 @@ struct core;
#ifdef _KERNEL
+/* vm_machdep.c */
+void vmapbuf __P((struct buf *, vsize_t));
+void vunmapbuf __P((struct buf *, vsize_t));
+void pagemove __P((caddr_t, caddr_t, size_t));
+#ifndef cpu_swapin
+void cpu_swapin __P((struct proc *));
+#endif
+#ifndef cpu_swapout
+void cpu_swapout __P((struct proc *));
+#endif
+void cpu_fork __P((struct proc *, struct proc *, void *, size_t,
+ void (*)(void *), void *));
+
/* uvm_aobj.c */
struct uvm_object *uao_create __P((vsize_t, int));
void uao_detach __P((struct uvm_object *));
@@ -565,20 +581,6 @@ void kmeminit_nkmempages __P((void));
void kmeminit __P((void));
extern int nkmempages;
-void swstrategy __P((struct buf *));
-
-/* Machine dependent portion */
-void vmapbuf __P((struct buf *, vsize_t));
-void vunmapbuf __P((struct buf *, vsize_t));
-void pagemove __P((caddr_t, caddr_t, size_t));
-void cpu_fork __P((struct proc *, struct proc *, void *, size_t,
- void (*)(void *), void *));
-#ifndef cpu_swapin
-void cpu_swapin __P((struct proc *));
-#endif
-#ifndef cpu_swapout
-void cpu_swapout __P((struct proc *));
-#endif
-
#endif /* _KERNEL */
+
#endif /* _UVM_UVM_EXTERN_H_ */
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index 95f24367cc1..809350d603f 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_glue.c,v 1.24 2001/11/06 18:41:10 art Exp $ */
-/* $NetBSD: uvm_glue.c,v 1.40 2000/08/21 02:29:32 thorpej Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.25 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.43 2000/11/25 06:27:59 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -119,7 +119,7 @@ uvm_kernacc(addr, len, rw)
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
saddr = trunc_page((vaddr_t)addr);
- eaddr = round_page((vaddr_t)addr+len);
+ eaddr = round_page((vaddr_t)addr + len);
vm_map_lock_read(kernel_map);
rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
vm_map_unlock_read(kernel_map);
@@ -160,7 +160,7 @@ uvm_useracc(addr, len, rw)
vm_map_lock_read(map);
rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr),
- round_page((vaddr_t)addr+len), prot);
+ round_page((vaddr_t)addr + len), prot);
vm_map_unlock_read(map);
return(rv);
@@ -246,7 +246,7 @@ uvm_vsunlock(p, addr, len)
size_t len;
{
uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),
- round_page((vaddr_t)addr+len));
+ round_page((vaddr_t)addr + len));
}
/*
@@ -276,9 +276,10 @@ uvm_fork(p1, p2, shared, stack, stacksize, func, arg)
struct user *up = p2->p_addr;
int rv;
- if (shared == TRUE)
+ if (shared == TRUE) {
+ p2->p_vmspace = NULL;
uvmspace_share(p1, p2); /* share vmspace */
- else
+ } else
p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */
/*
@@ -301,11 +302,11 @@ uvm_fork(p1, p2, shared, stack, stacksize, func, arg)
*/
p2->p_stats = &up->u_stats;
memset(&up->u_stats.pstat_startzero, 0,
- (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
- (caddr_t)&up->u_stats.pstat_startzero));
+ ((caddr_t)&up->u_stats.pstat_endzero -
+ (caddr_t)&up->u_stats.pstat_startzero));
memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy,
- ((caddr_t)&up->u_stats.pstat_endcopy -
- (caddr_t)&up->u_stats.pstat_startcopy));
+ ((caddr_t)&up->u_stats.pstat_endcopy -
+ (caddr_t)&up->u_stats.pstat_startcopy));
/*
* cpu_fork() copy and update the pcb, and make the child ready
@@ -329,9 +330,12 @@ void
uvm_exit(p)
struct proc *p;
{
+ vaddr_t va = (vaddr_t)p->p_addr;
uvmspace_free(p->p_vmspace);
- uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE);
+ p->p_flag &= ~P_INMEM;
+ uvm_fault_unwire(kernel_map, va, va + USPACE);
+ uvm_km_free(kernel_map, va, USPACE);
p->p_addr = NULL;
}
@@ -412,16 +416,15 @@ uvm_scheduler()
int pri;
struct proc *pp;
int ppri;
- UVMHIST_FUNC("uvm_scheduler"); UVMHIST_CALLED(maphist);
loop:
#ifdef DEBUG
while (!enableswap)
- tsleep((caddr_t)&proc0, PVM, "noswap", 0);
+ tsleep(&proc0, PVM, "noswap", 0);
#endif
pp = NULL; /* process to choose */
ppri = INT_MIN; /* its priority */
- for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
+ LIST_FOREACH(p, &allproc, p_list) {
/* is it a runnable swapped out process? */
if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
@@ -442,7 +445,7 @@ loop:
* Nothing to do, back to sleep
*/
if ((p = pp) == NULL) {
- tsleep((caddr_t)&proc0, PVM, "scheduler", 0);
+ tsleep(&proc0, PVM, "scheduler", 0);
goto loop;
}
@@ -518,7 +521,7 @@ uvm_swapout_threads()
*/
outp = outp2 = NULL;
outpri = outpri2 = 0;
- for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
+ LIST_FOREACH(p, &allproc, p_list) {
if (!swappable(p))
continue;
switch (p->p_stat) {
@@ -532,7 +535,7 @@ uvm_swapout_threads()
case SSLEEP:
case SSTOP:
if (p->p_slptime >= maxslp) {
- uvm_swapout(p); /* zap! */
+ uvm_swapout(p);
didswap++;
} else if (p->p_slptime > outpri) {
outp = p;
@@ -558,6 +561,7 @@ uvm_swapout_threads()
if (p)
uvm_swapout(p);
}
+ pmap_update();
}
/*
@@ -589,13 +593,6 @@ uvm_swapout(p)
cpu_swapout(p);
/*
- * Unwire the to-be-swapped process's user struct and kernel stack.
- */
- addr = (vaddr_t)p->p_addr;
- uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */
- pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
-
- /*
* Mark it as (potentially) swapped out.
*/
s = splstatclock();
@@ -605,5 +602,12 @@ uvm_swapout(p)
splx(s);
p->p_swtime = 0;
++uvmexp.swapouts;
+
+ /*
+ * Unwire the to-be-swapped process's user struct and kernel stack.
+ */
+ addr = (vaddr_t)p->p_addr;
+ uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */
+ pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
}
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 08f373e54de..aa5895fb37c 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.19 2001/11/07 01:18:01 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.39 2000/09/13 15:00:25 thorpej Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.20 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_km.c,v 1.40 2000/11/24 07:07:27 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -328,12 +328,8 @@ uvm_km_pgremove(uobj, start, end)
vaddr_t curoff;
UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
- simple_lock(&uobj->vmobjlock); /* lock object */
-
-#ifdef DIAGNOSTIC
- if (__predict_false(uobj->pgops != &aobj_pager))
- panic("uvm_km_pgremove: object %p not an aobj", uobj);
-#endif
+ KASSERT(uobj->pgops == &aobj_pager);
+ simple_lock(&uobj->vmobjlock);
/* choose cheapest traversal */
by_list = (uobj->uo_npages <=
@@ -368,15 +364,14 @@ uvm_km_pgremove(uobj, start, end)
uvm_pagefree(pp);
uvm_unlock_pageq();
}
- /* done */
}
simple_unlock(&uobj->vmobjlock);
return;
loop_by_list:
- for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
- ppnext = pp->listq.tqe_next;
+ for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
+ ppnext = TAILQ_NEXT(pp, listq);
if (pp->offset < start || pp->offset >= end) {
continue;
}
@@ -384,7 +379,6 @@ loop_by_list:
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
- /* now do the actual work */
if (pp->flags & PG_BUSY) {
/* owner must check for this when done */
pp->flags |= PG_RELEASED;
@@ -400,10 +394,8 @@ loop_by_list:
uvm_pagefree(pp);
uvm_unlock_pageq();
}
- /* done */
}
simple_unlock(&uobj->vmobjlock);
- return;
}
@@ -428,13 +420,9 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
vaddr_t curoff;
UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
+ KASSERT(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj));
simple_lock(&uobj->vmobjlock); /* lock object */
-#ifdef DIAGNOSTIC
- if (__predict_false(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0))
- panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj);
-#endif
-
/* choose cheapest traversal */
by_list = (uobj->uo_npages <=
((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
@@ -446,21 +434,15 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
pp = uvm_pagelookup(uobj, curoff);
- if (pp == NULL)
+ if (pp == NULL) {
continue;
+ }
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
-#ifdef DIAGNOSTIC
- if (__predict_false(pp->flags & PG_BUSY))
- panic("uvm_km_pgremove_intrsafe: busy page");
- if (__predict_false(pp->pqflags & PQ_ACTIVE))
- panic("uvm_km_pgremove_intrsafe: active page");
- if (__predict_false(pp->pqflags & PQ_INACTIVE))
- panic("uvm_km_pgremove_intrsafe: inactive page");
-#endif
-
- /* free the page */
+ KASSERT((pp->flags & PG_BUSY) == 0);
+ KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
+ KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
uvm_pagefree(pp);
}
simple_unlock(&uobj->vmobjlock);
@@ -468,29 +450,20 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
loop_by_list:
- for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
- ppnext = pp->listq.tqe_next;
+ for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
+ ppnext = TAILQ_NEXT(pp, listq);
if (pp->offset < start || pp->offset >= end) {
continue;
}
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
-
-#ifdef DIAGNOSTIC
- if (__predict_false(pp->flags & PG_BUSY))
- panic("uvm_km_pgremove_intrsafe: busy page");
- if (__predict_false(pp->pqflags & PQ_ACTIVE))
- panic("uvm_km_pgremove_intrsafe: active page");
- if (__predict_false(pp->pqflags & PQ_INACTIVE))
- panic("uvm_km_pgremove_intrsafe: inactive page");
-#endif
-
- /* free the page */
+ KASSERT((pp->flags & PG_BUSY) == 0);
+ KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
+ KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
uvm_pagefree(pp);
}
simple_unlock(&uobj->vmobjlock);
- return;
}
@@ -518,14 +491,9 @@ uvm_km_kmemalloc(map, obj, size, flags)
struct vm_page *pg;
UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
-
UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
- map, obj, size, flags);
-#ifdef DIAGNOSTIC
- /* sanity check */
- if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
- panic("uvm_km_kmemalloc: invalid map");
-#endif
+ map, obj, size, flags);
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
/*
* setup for call
@@ -554,6 +522,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
return(kva);
}
+
/*
* recover object offset from virtual address
*/
@@ -597,6 +566,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
* (because if pmap_enter wants to allocate out of kmem_object
* it will need to lock it itself!)
*/
+
if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {
pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
VM_PROT_ALL);
@@ -609,7 +579,6 @@ uvm_km_kmemalloc(map, obj, size, flags)
offset += PAGE_SIZE;
size -= PAGE_SIZE;
}
-
UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
return(kva);
}
@@ -624,7 +593,6 @@ uvm_km_free(map, addr, size)
vaddr_t addr;
vsize_t size;
{
-
uvm_unmap(map, trunc_page(addr), round_page(addr+size));
}
@@ -670,11 +638,7 @@ uvm_km_alloc1(map, size, zeroit)
UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
-
-#ifdef DIAGNOSTIC
- if (vm_map_pmap(map) != pmap_kernel())
- panic("uvm_km_alloc1");
-#endif
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
size = round_page(size);
kva = vm_map_min(map); /* hint */
@@ -771,11 +735,7 @@ uvm_km_valloc(map, size)
UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
-
-#ifdef DIAGNOSTIC
- if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
- panic("uvm_km_valloc");
-#endif
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
size = round_page(size);
kva = vm_map_min(map); /* hint */
@@ -814,11 +774,7 @@ uvm_km_valloc_prefer_wait(map, size, prefer)
UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
-
-#ifdef DIAGNOSTIC
- if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
- panic("uvm_km_valloc_wait");
-#endif
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
size = round_page(size);
if (size > vm_map_max(map) - vm_map_min(map))
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index d3d00972295..46293b3b882 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.28 2001/11/07 01:18:01 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.81 2000/09/13 15:00:25 thorpej Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.29 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_map.c,v 1.85 2000/11/25 06:27:59 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -149,9 +149,10 @@ vaddr_t uvm_maxkaddr;
*
* => map need not be locked (protected by hint_lock).
*/
-#define SAVE_HINT(map,value) do { \
+#define SAVE_HINT(map,check,value) do { \
simple_lock(&(map)->hint_lock); \
- (map)->hint = (value); \
+ if ((map)->hint == (check)) \
+ (map)->hint = (value); \
simple_unlock(&(map)->hint_lock); \
} while (0)
@@ -178,6 +179,8 @@ static vm_map_entry_t uvm_mapent_alloc __P((vm_map_t));
static void uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));
static void uvm_mapent_free __P((vm_map_entry_t));
static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
+static void uvm_map_reference_amap __P((vm_map_entry_t, int));
+static void uvm_map_unreference_amap __P((vm_map_entry_t, int));
/*
* local inlines
@@ -277,6 +280,33 @@ uvm_map_entry_unwire(map, entry)
uvm_fault_unwire_locked(map, entry->start, entry->end);
}
+
+/*
+ * wrapper for calling amap_ref()
+ */
+static __inline void
+uvm_map_reference_amap(entry, flags)
+ vm_map_entry_t entry;
+ int flags;
+{
+ amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
+ (entry->end - entry->start) >> PAGE_SHIFT, flags);
+}
+
+
+/*
+ * wrapper for calling amap_unref()
+ */
+static __inline void
+uvm_map_unreference_amap(entry, flags)
+ vm_map_entry_t entry;
+ int flags;
+{
+ amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
+ (entry->end - entry->start) >> PAGE_SHIFT, flags);
+}
+
+
/*
* uvm_map_init: init mapping system at boot time. note that we allocate
* and init the static pool of vm_map_entry_t's for the kernel here.
@@ -362,7 +392,7 @@ void uvm_map_clip_start(map, entry, start)
new_entry = uvm_mapent_alloc(map);
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
-
+
new_entry->end = start;
new_adj = start - new_entry->start;
if (entry->object.uvm_obj)
@@ -374,7 +404,7 @@ void uvm_map_clip_start(map, entry, start)
}
uvm_map_entry_link(map, entry->prev, new_entry);
-
+
if (UVM_ET_ISSUBMAP(entry)) {
/* ... unlikely to happen, but play it safe */
uvm_map_reference(new_entry->object.sub_map);
@@ -546,11 +576,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
uoffset = 0;
} else {
if (uoffset == UVM_UNKNOWN_OFFSET) {
-#ifdef DIAGNOSTIC
- if (UVM_OBJ_IS_KERN_OBJECT(uobj) == 0)
- panic("uvm_map: unknown offset with "
- "non-kernel object");
-#endif
+ KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
uoffset = *startp - vm_map_min(kernel_map);
}
}
@@ -590,12 +616,12 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
* look at refs since we don't care about its exact value.
* if it is one (i.e. we have only reference) it will stay there
*/
-
+
if (prev_entry->aref.ar_amap &&
amap_refs(prev_entry->aref.ar_amap) != 1) {
goto step3;
}
-
+
/* got it! */
UVMCNT_INCR(map_backmerge);
@@ -767,7 +793,7 @@ uvm_map_lookup_entry(map, address, entry)
*/
*entry = cur;
- SAVE_HINT(map, cur);
+ SAVE_HINT(map, map->hint, cur);
UVMHIST_LOG(maphist,"<- search got it (0x%x)",
cur, 0, 0, 0);
return (TRUE);
@@ -777,7 +803,7 @@ uvm_map_lookup_entry(map, address, entry)
cur = cur->next;
}
*entry = cur->prev;
- SAVE_HINT(map, *entry);
+ SAVE_HINT(map, map->hint, *entry);
UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
return (FALSE);
}
@@ -812,22 +838,17 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
- map, hint, length, flags);
-
-#ifdef DIAGNOSTIC
- if ((align & (align - 1)) != 0)
- panic("uvm_map_findspace: alignment not power of 2");
- if ((flags & UVM_FLAG_FIXED) != 0 && align != 0)
- panic("uvm_map_findslace: fixed and alignment both specified");
-#endif
+ map, hint, length, flags);
+ KASSERT((align & (align - 1)) == 0);
+ KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
/*
* remember the original hint. if we are aligning, then we
* may have to try again with no alignment constraint if
* we fail the first time.
*/
- orig_hint = hint;
+ orig_hint = hint;
if (hint < map->min_offset) { /* check ranges ... */
if (flags & UVM_FLAG_FIXED) {
UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
@@ -913,7 +934,7 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
return(NULL); /* only one shot at it ... */
}
}
- SAVE_HINT(map, entry);
+ SAVE_HINT(map, map->hint, entry);
*result = hint;
UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
return (entry);
@@ -956,7 +977,7 @@ uvm_unmap_remove(map, start, end, entry_list)
entry = first_entry;
UVM_MAP_CLIP_START(map, entry, start);
/* critical! prevents stale hint */
- SAVE_HINT(map, entry->prev);
+ SAVE_HINT(map, entry, entry->prev);
} else {
entry = first_entry->next;
@@ -1013,11 +1034,7 @@ uvm_unmap_remove(map, start, end, entry_list)
*/
if (UVM_ET_ISOBJ(entry) &&
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
-#ifdef DIAGNOSTIC
- if (vm_map_pmap(map) != pmap_kernel())
- panic("uvm_unmap_remove: kernel object "
- "mapped by non-kernel map");
-#endif
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
/*
* note: kernel object mappings are currently used in
@@ -1088,11 +1105,7 @@ uvm_unmap_remove(map, start, end, entry_list)
UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
/* critical! prevents stale hint */
- /* XXX: need SAVE_HINT with three parms */
- simple_lock(&map->hint_lock);
- if (map->hint == entry)
- map->hint = entry->prev;
- simple_unlock(&map->hint_lock);
+ SAVE_HINT(map, entry, entry->prev);
uvm_map_entry_unlink(map, entry);
map->size -= len;
@@ -1118,24 +1131,15 @@ uvm_unmap_remove(map, start, end, entry_list)
*/
void
-uvm_unmap_detach(first_entry, amap_unref_flags)
+uvm_unmap_detach(first_entry, flags)
vm_map_entry_t first_entry;
- int amap_unref_flags;
+ int flags;
{
vm_map_entry_t next_entry;
UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
while (first_entry) {
-
-#ifdef DIAGNOSTIC
- /*
- * sanity check
- */
- /* was part of vm_map_entry_delete() */
- if (VM_MAPENT_ISWIRED(first_entry))
- panic("unmap: still wired!");
-#endif
-
+ KASSERT(!VM_MAPENT_ISWIRED(first_entry));
UVMHIST_LOG(maphist,
" detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
first_entry, first_entry->aref.ar_amap,
@@ -1147,12 +1151,12 @@ uvm_unmap_detach(first_entry, amap_unref_flags)
*/
if (first_entry->aref.ar_amap)
- amap_unref(first_entry, amap_unref_flags);
+ uvm_map_unreference_amap(first_entry, flags);
/*
* drop reference to our backing object, if we've got one
*/
-
+
if (UVM_ET_ISSUBMAP(first_entry)) {
/* ... unlikely to happen, but play it safe */
uvm_map_deallocate(first_entry->object.sub_map);
@@ -1163,19 +1167,11 @@ uvm_unmap_detach(first_entry, amap_unref_flags)
pgo_detach(first_entry->object.uvm_obj);
}
- /*
- * next entry
- */
next_entry = first_entry->next;
uvm_mapent_free(first_entry);
first_entry = next_entry;
}
-
- /*
- * done!
- */
UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
- return;
}
/*
@@ -1201,25 +1197,25 @@ uvm_map_reserve(map, size, offset, align, raddr)
vaddr_t *raddr; /* IN:hint, OUT: reserved VA */
{
UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
-
+
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
map,size,offset,raddr);
-
+
size = round_page(size);
if (*raddr < vm_map_min(map))
*raddr = vm_map_min(map); /* hint */
-
+
/*
* reserve some virtual space.
*/
-
+
if (uvm_map(map, raddr, size, NULL, offset, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
return (FALSE);
}
-
+
UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
return (TRUE);
}
@@ -1243,17 +1239,15 @@ uvm_map_replace(map, start, end, newents, nnewents)
int nnewents;
{
vm_map_entry_t oldent, last;
- UVMHIST_FUNC("uvm_map_replace");
- UVMHIST_CALLED(maphist);
/*
* first find the blank map entry at the specified address
*/
-
+
if (!uvm_map_lookup_entry(map, start, &oldent)) {
return(FALSE);
}
-
+
/*
* check to make sure we have a proper blank entry
*/
@@ -1306,7 +1300,7 @@ uvm_map_replace(map, start, end, newents, nnewents)
last = newents->prev; /* we expect this */
/* critical: flush stale hints out of map */
- SAVE_HINT(map, newents);
+ SAVE_HINT(map, map->hint, newents);
if (map->first_free == oldent)
map->first_free = last;
@@ -1319,7 +1313,7 @@ uvm_map_replace(map, start, end, newents, nnewents)
} else {
/* critical: flush stale hints out of map */
- SAVE_HINT(map, oldent->prev);
+ SAVE_HINT(map, map->hint, oldent->prev);
if (map->first_free == oldent)
map->first_free = oldent->prev;
@@ -1368,23 +1362,20 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
vsize_t elen;
int nchain, error, copy_ok;
UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
+
UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
len,0);
UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
-#ifdef DIAGNOSTIC
/*
* step 0: sanity check: start must be on a page boundary, length
* must be page sized. can't ask for CONTIG/QREF if you asked for
* REMOVE.
*/
- if ((start & PAGE_MASK) || (len & PAGE_MASK))
- panic("uvm_map_extract1");
- if (flags & UVM_EXTRACT_REMOVE)
- if (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF))
- panic("uvm_map_extract2");
-#endif
+ KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
+ KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
+ (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
/*
* step 1: reserve space in the target map for the extracted area
@@ -1396,7 +1387,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
*dstaddrp = dstaddr; /* pass address back to caller */
UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
-
/*
* step 2: setup for the extraction process loop by init'ing the
* map entry chain, locking src map, and looking up the first useful
@@ -1413,6 +1403,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
/* "start" is within an entry */
if (flags & UVM_EXTRACT_QREF) {
+
/*
* for quick references we don't clip the entry, so
* the entry may map space "before" the starting
@@ -1420,19 +1411,21 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
* (which can be non-zero only the first time
* through the "while" loop in step 3).
*/
+
fudge = start - entry->start;
} else {
+
/*
* normal reference: we clip the map to fit (thus
* fudge is zero)
*/
+
UVM_MAP_CLIP_START(srcmap, entry, start);
- SAVE_HINT(srcmap, entry->prev);
+ SAVE_HINT(srcmap, srcmap->hint, entry->prev);
fudge = 0;
}
-
} else {
-
+
/* "start" is not within an entry ... skip to next entry */
if (flags & UVM_EXTRACT_CONTIG) {
error = EINVAL;
@@ -1442,18 +1435,18 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
entry = entry->next;
fudge = 0;
}
+
/* save values from srcmap for step 6 */
orig_entry = entry;
orig_fudge = fudge;
-
/*
* step 3: now start looping through the map entries, extracting
* as we go.
*/
while (entry->start < end && entry != &srcmap->header) {
-
+
/* if we are not doing a quick reference, clip it */
if ((flags & UVM_EXTRACT_QREF) == 0)
UVM_MAP_CLIP_END(srcmap, entry, end);
@@ -1469,6 +1462,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
error = ENOMEM;
goto bad;
}
+
/* amap_copy could clip (during chunk)! update fudge */
if (fudge) {
fudge = fudge - (entry->start - oldstart);
@@ -1513,7 +1507,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
if (newentry->aref.ar_amap) {
newentry->aref.ar_pageoff =
entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
- amap_ref(newentry, AMAP_SHARED |
+ uvm_map_reference_amap(newentry, AMAP_SHARED |
((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
} else {
newentry->aref.ar_pageoff = 0;
@@ -1540,7 +1534,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
fudge = 0;
}
-
/*
* step 4: close off chain (in format expected by uvm_map_replace)
*/
@@ -1548,16 +1541,14 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
if (chain)
chain->prev = endchain;
-
/*
* step 5: attempt to lock the dest map so we can pmap_copy.
* note usage of copy_ok:
* 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
* 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
*/
-
- if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
+ if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
copy_ok = 1;
if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
nchain)) {
@@ -1566,15 +1557,11 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
error = EIO;
goto bad;
}
-
} else {
-
copy_ok = 0;
/* replace defered until step 7 */
-
}
-
/*
* step 6: traverse the srcmap a second time to do the following:
* - if we got a lock on the dstmap do pmap_copy
@@ -1586,7 +1573,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
/* purge possible stale hints from srcmap */
if (flags & UVM_EXTRACT_REMOVE) {
- SAVE_HINT(srcmap, orig_entry->prev);
+ SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
if (srcmap->first_free->start >= start)
srcmap->first_free = orig_entry->prev;
}
@@ -1596,7 +1583,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
while (entry->start < end && entry != &srcmap->header) {
-
if (copy_ok) {
oldoffset = (entry->start + fudge) - start;
elen = min(end, entry->end) -
@@ -1628,6 +1614,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
* unlock dstmap. we will dispose of deadentry in
* step 7 if needed
*/
+
if (copy_ok && srcmap != dstmap)
vm_map_unlock(dstmap);
@@ -1657,10 +1644,6 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
goto bad2;
}
}
-
- /*
- * done!
- */
return(0);
/*
@@ -1694,7 +1677,7 @@ bad2: /* src already unlocked */
* => submap must have been init'd and have a zero reference count.
* [need not be locked as we don't actually reference it]
*/
-
+
int
uvm_map_submap(map, start, end, submap)
vm_map_t map, submap;
@@ -1702,17 +1685,15 @@ uvm_map_submap(map, start, end, submap)
{
vm_map_entry_t entry;
int result;
- UVMHIST_FUNC("uvm_map_submap"); UVMHIST_CALLED(maphist);
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
-
+
if (uvm_map_lookup_entry(map, start, &entry)) {
UVM_MAP_CLIP_START(map, entry, start);
UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
- }
- else {
+ } else {
entry = NULL;
}
@@ -1720,10 +1701,6 @@ uvm_map_submap(map, start, end, submap)
entry->start == start && entry->end == end &&
entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
!UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
-
- /*
- * doit!
- */
entry->etype |= UVM_ET_SUBMAP;
entry->object.sub_map = submap;
entry->offset = 0;
@@ -1733,7 +1710,6 @@ uvm_map_submap(map, start, end, submap)
result = KERN_INVALID_ARGUMENT;
}
vm_map_unlock(map);
-
return(result);
}
@@ -1760,12 +1736,12 @@ uvm_map_protect(map, start, end, new_prot, set_max)
int rv = KERN_SUCCESS;
UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
- map, start, end, new_prot);
-
+ map, start, end, new_prot);
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
-
+
if (uvm_map_lookup_entry(map, start, &entry)) {
UVM_MAP_CLIP_START(map, entry, start);
} else {
@@ -1795,7 +1771,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
while ((current != &map->header) && (current->start < end)) {
vm_prot_t old_prot;
-
+
UVM_MAP_CLIP_END(map, current, end);
old_prot = current->protection;
@@ -1848,7 +1824,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
current = current->next;
}
-
+
out:
vm_map_unlock(map);
UVMHIST_LOG(maphist, "<- done, rv=%d",rv,0,0,0);
@@ -1895,15 +1871,13 @@ uvm_map_inherit(map, start, end, new_inheritance)
if (uvm_map_lookup_entry(map, start, &temp_entry)) {
entry = temp_entry;
UVM_MAP_CLIP_START(map, entry, start);
- } else {
+ } else {
entry = temp_entry->next;
}
while ((entry != &map->header) && (entry->start < end)) {
UVM_MAP_CLIP_END(map, entry, end);
-
entry->inheritance = new_inheritance;
-
entry = entry->next;
}
@@ -1931,9 +1905,7 @@ uvm_map_advice(map, start, end, new_advice)
map, start, end, new_advice);
vm_map_lock(map);
-
VM_MAP_RANGE_CHECK(map, start, end);
-
if (uvm_map_lookup_entry(map, start, &temp_entry)) {
entry = temp_entry;
UVM_MAP_CLIP_START(map, entry, start);
@@ -1960,10 +1932,7 @@ uvm_map_advice(map, start, end, new_advice)
UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
return (KERN_INVALID_ARGUMENT);
}
-
-
entry->advice = new_advice;
-
entry = entry->next;
}
@@ -2000,12 +1969,8 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
#endif
UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
- map, start, end, new_pageable);
-
-#ifdef DIAGNOSTIC
- if ((map->flags & VM_MAP_PAGEABLE) == 0)
- panic("uvm_map_pageable: map %p not pageable", map);
-#endif
+ map, start, end, new_pageable);
+ KASSERT(map->flags & VM_MAP_PAGEABLE);
if ((lockflags & UVM_LK_ENTER) == 0)
vm_map_lock(map);
@@ -2023,7 +1988,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
if ((lockflags & UVM_LK_EXIT) == 0)
vm_map_unlock(map);
-
+
UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
return (KERN_INVALID_ADDRESS);
}
@@ -2035,10 +2000,12 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
if (new_pageable) { /* unwire */
UVM_MAP_CLIP_START(map, entry, start);
+
/*
* unwiring. first ensure that the range to be unwired is
* really wired down and that there are no holes.
*/
+
while ((entry != &map->header) && (entry->start < end)) {
if (entry->wired_count == 0 ||
(entry->end < end &&
@@ -2058,6 +2025,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
* regardless of the number of mlock calls made on that
* region.
*/
+
entry = start_entry;
while ((entry != &map->header) && (entry->start < end)) {
UVM_MAP_CLIP_END(map, entry, end);
@@ -2069,10 +2037,6 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
vm_map_unlock(map);
UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
return(KERN_SUCCESS);
-
- /*
- * end of unwire case!
- */
}
/*
@@ -2098,13 +2062,15 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
while ((entry != &map->header) && (entry->start < end)) {
if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
- /*
+
+ /*
* perform actions of vm_map_lookup that need the
* write lock on the map: create an anonymous map
* for a copy-on-write region, or an anonymous map
* for a zero-fill region. (XXXCDC: submap case
* ok?)
*/
+
if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
if (UVM_ET_ISNEEDSCOPY(entry) &&
((entry->protection & VM_PROT_WRITE) ||
@@ -2122,14 +2088,17 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
/*
* Check for holes
*/
+
if (entry->protection == VM_PROT_NONE ||
(entry->end < end &&
(entry->next == &map->header ||
entry->next->start > entry->end))) {
+
/*
* found one. amap creation actions do not need to
* be undone, but the wired counts need to be restored.
*/
+
while (entry != &map->header && entry->end > start) {
entry->wired_count--;
entry = entry->prev;
@@ -2171,9 +2140,11 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
}
if (rv) { /* failed? */
+
/*
* Get back to an exclusive (write) lock.
*/
+
vm_map_upgrade(map);
vm_map_unbusy(map);
@@ -2186,6 +2157,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
* first drop the wiring count on all the entries
* which haven't actually been wired yet.
*/
+
failed_entry = entry;
while (entry != &map->header && entry->start < end) {
entry->wired_count--;
@@ -2196,6 +2168,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
* now, unwire all the entries that were successfully
* wired above.
*/
+
entry = start_entry;
while (entry != failed_entry) {
entry->wired_count--;
@@ -2214,9 +2187,11 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
vm_map_unbusy(map);
vm_map_unlock_read(map);
} else {
+
/*
* Get back to an exclusive (write) lock.
*/
+
vm_map_upgrade(map);
vm_map_unbusy(map);
}
@@ -2249,10 +2224,7 @@ uvm_map_pageable_all(map, flags, limit)
UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
-#ifdef DIAGNOSTIC
- if ((map->flags & VM_MAP_PAGEABLE) == 0)
- panic("uvm_map_pageable_all: map %p not pageable", map);
-#endif
+ KASSERT(map->flags & VM_MAP_PAGEABLE);
vm_map_lock(map);
@@ -2483,13 +2455,11 @@ uvm_map_clean(map, start, end, flags)
vsize_t size;
int rv, error, refs;
UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
- map, start, end, flags);
-#ifdef DIAGNOSTIC
- if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == (PGO_FREE|PGO_DEACTIVATE))
- panic("uvm_map_clean: FREE and DEACTIVATE");
-#endif
+ UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
+ map, start, end, flags);
+ KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
+ (PGO_FREE|PGO_DEACTIVATE));
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -2501,6 +2471,7 @@ uvm_map_clean(map, start, end, flags)
/*
* Make a first pass to check for holes.
*/
+
for (current = entry; current->start < end; current = current->next) {
if (UVM_ET_ISSUBMAP(current)) {
vm_map_unlock_read(map);
@@ -2518,11 +2489,7 @@ uvm_map_clean(map, start, end, flags)
for (current = entry; current->start < end; current = current->next) {
amap = current->aref.ar_amap; /* top layer */
uobj = current->object.uvm_obj; /* bottom layer */
-
-#ifdef DIAGNOSTIC
- if (start < current->start)
- panic("uvm_map_clean: hole");
-#endif
+ KASSERT(start >= current->start);
/*
* No amap cleaning necessary if:
@@ -2531,6 +2498,7 @@ uvm_map_clean(map, start, end, flags)
*
* (2) We're not deactivating or freeing pages.
*/
+
if (amap == NULL ||
(flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
goto flush_object;
@@ -2560,12 +2528,14 @@ uvm_map_clean(map, start, end, flags)
}
switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
+
/*
* XXX In these first 3 cases, we always just
* XXX deactivate the page. We may want to
* XXX handle the different cases more
* XXX specifically, in the future.
*/
+
case PGO_CLEANIT|PGO_FREE:
case PGO_CLEANIT|PGO_DEACTIVATE:
case PGO_DEACTIVATE:
@@ -2584,23 +2554,14 @@ uvm_map_clean(map, start, end, flags)
* by the anon (may simply be loaned to the
* anon).
*/
+
if ((pg->pqflags & PQ_ANON) == 0) {
-#ifdef DIAGNOSTIC
- if (pg->uobject != NULL)
- panic("uvm_map_clean: "
- "page anon vs. object "
- "inconsistency");
-#endif
+ KASSERT(pg->uobject == NULL);
uvm_unlock_pageq();
simple_unlock(&anon->an_lock);
continue;
}
-
-#ifdef DIAGNOSTIC
- if (pg->uanon != anon)
- panic("uvm_map_clean: anon "
- "inconsistency");
-#endif
+ KASSERT(pg->uanon == anon);
/* zap all mappings for the page. */
pmap_page_protect(pg, VM_PROT_NONE);
@@ -2613,10 +2574,12 @@ uvm_map_clean(map, start, end, flags)
continue;
case PGO_FREE:
+
/*
* If there are multiple references to
* the amap, just deactivate the page.
*/
+
if (amap_refs(amap) > 1)
goto deactivate_it;
@@ -2661,7 +2624,6 @@ uvm_map_clean(map, start, end, flags)
}
start += size;
}
-
vm_map_unlock_read(map);
return (error);
}
@@ -2686,14 +2648,12 @@ uvm_map_checkprot(map, start, end, protection)
if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
return(FALSE);
}
-
entry = tmp_entry;
-
while (start < end) {
if (entry == &map->header) {
return(FALSE);
}
-
+
/*
* no holes allowed
*/
@@ -2711,7 +2671,7 @@ uvm_map_checkprot(map, start, end, protection)
}
/* go to next entry */
-
+
start = entry->end;
entry = entry->next;
}
@@ -2794,11 +2754,11 @@ uvmspace_unshare(p)
struct proc *p;
{
struct vmspace *nvm, *ovm = p->p_vmspace;
-
+
if (ovm->vm_refcnt == 1)
/* nothing to do: vmspace isn't shared in the first place */
return;
-
+
/* make a new vmspace, still holding old one */
nvm = uvmspace_fork(ovm);
@@ -3007,7 +2967,7 @@ uvmspace_fork(vm1)
*/
if (new_entry->aref.ar_amap)
/* share reference */
- amap_ref(new_entry, AMAP_SHARED);
+ uvm_map_reference_amap(new_entry, AMAP_SHARED);
if (new_entry->object.uvm_obj &&
new_entry->object.uvm_obj->pgops->pgo_reference)
@@ -3046,7 +3006,7 @@ uvmspace_fork(vm1)
uvm_mapent_copy(old_entry, new_entry);
if (new_entry->aref.ar_amap)
- amap_ref(new_entry, 0);
+ uvm_map_reference_amap(new_entry, 0);
if (new_entry->object.uvm_obj &&
new_entry->object.uvm_obj->pgops->pgo_reference)
@@ -3060,7 +3020,7 @@ uvmspace_fork(vm1)
(UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
uvm_map_entry_link(new_map, new_map->header.prev,
new_entry);
-
+
/*
* the new entry will need an amap. it will either
* need to be copied from the old entry or created
@@ -3104,7 +3064,7 @@ uvmspace_fork(vm1)
/* XXXCDC: M_WAITOK ... ok? */
}
}
-
+
/*
* if the parent's entry is wired down, then the
* parent process does not want page faults on
@@ -3186,7 +3146,7 @@ uvmspace_fork(vm1)
new_entry->start,
(old_entry->end - old_entry->start),
old_entry->start);
-
+
/*
* protect the child's mappings if necessary
*/
@@ -3227,19 +3187,6 @@ uvmspace_fork(vm1)
*/
/*
- * uvm_map_print: print out a map
- */
-
-void
-uvm_map_print(map, full)
- vm_map_t map;
- boolean_t full;
-{
-
- uvm_map_printit(map, full, printf);
-}
-
-/*
* uvm_map_printit: actually prints the map
*/
@@ -3268,9 +3215,11 @@ uvm_map_printit(map, full, pr)
entry = entry->next) {
(*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
entry, entry->start, entry->end, entry->object.uvm_obj,
- (long long)entry->offset, entry->aref.ar_amap, entry->aref.ar_pageoff);
+ (long long)entry->offset, entry->aref.ar_amap,
+ entry->aref.ar_pageoff);
(*pr)(
-"\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, wc=%d, adv=%d\n",
+ "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
+ "wc=%d, adv=%d\n",
(entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
(entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
(entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
@@ -3280,19 +3229,6 @@ uvm_map_printit(map, full, pr)
}
/*
- * uvm_object_print: print out an object
- */
-
-void
-uvm_object_print(uobj, full)
- struct uvm_object *uobj;
- boolean_t full;
-{
-
- uvm_object_printit(uobj, full, printf);
-}
-
-/*
* uvm_object_printit: actually prints the object
*/
@@ -3319,7 +3255,7 @@ uvm_object_printit(uobj, full, pr)
for (pg = TAILQ_FIRST(&uobj->memq);
pg != NULL;
pg = TAILQ_NEXT(pg, listq), cnt++) {
- (*pr)("<%p,0x%lx> ", pg, pg->offset);
+ (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
if ((cnt % 3) == 2) {
(*pr)("\n ");
}
@@ -3336,19 +3272,6 @@ const char page_pqflagbits[] =
"\20\1FREE\2INACTIVE\3ACTIVE\4LAUNDRY\5ANON\6AOBJ";
/*
- * uvm_page_print: print out a page
- */
-
-void
-uvm_page_print(pg, full)
- struct vm_page *pg;
- boolean_t full;
-{
-
- uvm_page_printit(pg, full, printf);
-}
-
-/*
* uvm_page_printit: actually print the page
*/
@@ -3358,7 +3281,7 @@ uvm_page_printit(pg, full, pr)
boolean_t full;
int (*pr) __P((const char *, ...));
{
- struct vm_page *lcv;
+ struct vm_page *tpg;
struct uvm_object *uobj;
struct pglist *pgl;
char pgbuf[128];
@@ -3388,7 +3311,7 @@ uvm_page_printit(pg, full, pr)
if ((pg->pqflags & PQ_FREE) == 0) {
if (pg->pqflags & PQ_ANON) {
if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
- (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
+ (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
(pg->uanon) ? pg->uanon->u.an_page : NULL);
else
(*pr)(" anon backpointer is OK\n");
@@ -3396,11 +3319,12 @@ uvm_page_printit(pg, full, pr)
uobj = pg->uobject;
if (uobj) {
(*pr)(" checking object list\n");
- for (lcv = uobj->memq.tqh_first ; lcv ;
- lcv = lcv->listq.tqe_next) {
- if (lcv == pg) break;
+ TAILQ_FOREACH(tpg, &uobj->memq, listq) {
+ if (tpg == pg) {
+ break;
+ }
}
- if (lcv)
+ if (tpg)
(*pr)(" page found on object list\n");
else
(*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
@@ -3413,21 +3337,23 @@ uvm_page_printit(pg, full, pr)
int fl = uvm_page_lookup_freelist(pg);
pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
PGFL_ZEROS : PGFL_UNKNOWN];
- }
- else if (pg->pqflags & PQ_INACTIVE)
- pgl = (pg->pqflags & PQ_SWAPBACKED) ?
+ } else if (pg->pqflags & PQ_INACTIVE) {
+ pgl = (pg->pqflags & PQ_SWAPBACKED) ?
&uvm.page_inactive_swp : &uvm.page_inactive_obj;
- else if (pg->pqflags & PQ_ACTIVE)
+ } else if (pg->pqflags & PQ_ACTIVE) {
pgl = &uvm.page_active;
- else
+ } else {
pgl = NULL;
+ }
if (pgl) {
(*pr)(" checking pageq list\n");
- for (lcv = pgl->tqh_first ; lcv ; lcv = lcv->pageq.tqe_next) {
- if (lcv == pg) break;
+ TAILQ_FOREACH(tpg, pgl, pageq) {
+ if (tpg == pg) {
+ break;
+ }
}
- if (lcv)
+ if (tpg)
(*pr)(" page found on pageq list\n");
else
(*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index f3ac92f0cdb..31e4e7c6cc2 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.h,v 1.13 2001/11/07 01:18:01 art Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.14 2001/11/07 02:55:50 art Exp $ */
/* $NetBSD: uvm_map.h,v 1.22 2000/09/13 15:00:25 thorpej Exp $ */
/*
@@ -113,7 +113,7 @@
#include <uvm/uvm_anon.h>
/*
- * Types defined:
+ * types defined:
*
* vm_map_t the high-level address map data structure.
* vm_map_entry_t an entry in an address map.
@@ -121,9 +121,8 @@
*/
/*
- * Objects which live in maps may be either VM objects, or
- * another map (called a "sharing map") which denotes read-write
- * sharing with other maps.
+ * Objects which live in maps may be either VM objects, or another map
+ * (called a "sharing map") which denotes read-write sharing with other maps.
*
* XXXCDC: private pager data goes here now
*/
@@ -134,10 +133,10 @@ union vm_map_object {
};
/*
- * Address map entries consist of start and end addresses,
- * a VM object (or sharing map) and offset into that object,
- * and user-exported inheritance and protection information.
- * Also included is control information for virtual copy operations.
+ * Address map entries consist of start and end addresses,
+ * a VM object (or sharing map) and offset into that object,
+ * and user-exported inheritance and protection information.
+ * Also included is control information for virtual copy operations.
*/
struct vm_map_entry {
struct vm_map_entry *prev; /* previous entry */
diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c
index 25af71e70f1..e98bae74f60 100644
--- a/sys/uvm/uvm_meter.c
+++ b/sys/uvm/uvm_meter.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_meter.c,v 1.13 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_meter.c,v 1.13 2000/06/27 17:29:27 mrg Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.14 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_meter.c,v 1.14 2000/11/24 18:54:31 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -59,7 +59,7 @@
*/
int maxslp = MAXSLP; /* patchable ... */
-struct loadavg averunnable; /* decl. */
+struct loadavg averunnable;
/*
* constants for averages over 1, 5, and 15 minutes when sampling at
@@ -87,7 +87,7 @@ uvm_meter()
if ((time.tv_sec % 5) == 0)
uvm_loadav(&averunnable);
if (proc0.p_slptime > (maxslp / 2))
- wakeup((caddr_t)&proc0);
+ wakeup(&proc0);
}
/*
@@ -101,7 +101,8 @@ uvm_loadav(avg)
int i, nrun;
struct proc *p;
- for (nrun = 0, p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
+ nrun = 0;
+ LIST_FOREACH(p, &allproc, p_list) {
switch (p->p_stat) {
case SSLEEP:
if (p->p_priority > PZERO || p->p_slptime > 1)
@@ -194,7 +195,7 @@ uvm_total(totalp)
* calculate process statistics
*/
- for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
+ LIST_FOREACH(p, &allproc, p_list) {
if (p->p_flag & P_SYSTEM)
continue;
switch (p->p_stat) {
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 2aba0283ac7..22f0ceb9d3f 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_mmap.c,v 1.24 2001/11/07 01:18:01 art Exp $ */
-/* $NetBSD: uvm_mmap.c,v 1.44 2000/09/13 15:00:25 thorpej Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.25 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_mmap.c,v 1.45 2000/11/24 23:30:01 soren Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -880,7 +880,7 @@ sys_madvise(p, v, retval)
case MADV_FREE:
/*
* These pages contain no valid data, and may be
- * grbage-collected. Toss all resources, including
+ * garbage-collected. Toss all resources, including
* any swap space in use.
*/
rv = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index bc80f041f57..4e46822894a 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_page.c,v 1.28 2001/11/07 01:18:01 art Exp $ */
-/* $NetBSD: uvm_page.c,v 1.40 2000/08/02 20:25:11 thorpej Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.29 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_page.c,v 1.43 2000/11/09 19:15:28 christos Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -256,7 +256,7 @@ uvm_page_init(kvm_startp, kvm_endp)
*/
if (vm_nphysseg == 0)
- panic("vm_page_bootstrap: no memory pre-allocated");
+ panic("uvm_page_bootstrap: no memory pre-allocated");
/*
* first calculate the number of free pages...
@@ -495,7 +495,7 @@ uvm_page_physget_freelist(paddrp, freelist)
{
if (uvm.page_init_done == TRUE)
- panic("vm_page_physget: called _after_ bootstrap");
+ panic("uvm_page_physget: called _after_ bootstrap");
if (vm_physmem[lcv].free_list != freelist)
continue;
@@ -510,7 +510,7 @@ uvm_page_physget_freelist(paddrp, freelist)
if (vm_physmem[lcv].avail_start ==
vm_physmem[lcv].end) {
if (vm_nphysseg == 1)
- panic("vm_page_physget: out of memory!");
+ panic("vum_page_physget: out of memory!");
vm_nphysseg--;
for (x = lcv ; x < vm_nphysseg ; x++)
/* structure copy */
@@ -529,7 +529,7 @@ uvm_page_physget_freelist(paddrp, freelist)
if (vm_physmem[lcv].avail_end ==
vm_physmem[lcv].start) {
if (vm_nphysseg == 1)
- panic("vm_page_physget: out of memory!");
+ panic("uvm_page_physget: out of memory!");
vm_nphysseg--;
for (x = lcv ; x < vm_nphysseg ; x++)
/* structure copy */
@@ -560,7 +560,7 @@ uvm_page_physget_freelist(paddrp, freelist)
/* nothing left? nuke it */
if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
if (vm_nphysseg == 1)
- panic("vm_page_physget: out of memory!");
+ panic("uvm_page_physget: out of memory!");
vm_nphysseg--;
for (x = lcv ; x < vm_nphysseg ; x++)
/* structure copy */
@@ -622,6 +622,7 @@ uvm_page_physload(start, end, avail_start, avail_end, free_list)
"segment\n");
printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
VM_PHYSSEG_MAX, (long long)start, (long long)end);
+ printf("\tincrease VM_PHYSSEG_MAX\n");
return;
}
@@ -1330,7 +1331,21 @@ uvm_pageidlezero()
uvm_unlock_fpageq(s);
#ifdef PMAP_PAGEIDLEZERO
- PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg));
+ if (PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg)) == FALSE) {
+ /*
+ * The machine-dependent code detected some
+ * reason for us to abort zeroing pages,
+ * probably because there is a process now
+ * ready to run.
+ */
+ s = uvm_lock_fpageq();
+ TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN],
+ pg, pageq);
+ uvmexp.free++;
+ uvmexp.zeroaborts++;
+ uvm_unlock_fpageq(s);
+ return;
+ }
#else
/*
* XXX This will toast the cache unless the pmap_zero_page()
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index cd8105d010d..564504a205f 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_page.h,v 1.10 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_page.h,v 1.16 2000/06/27 09:00:14 mrg Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.11 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_page.h,v 1.17 2000/10/03 20:50:49 mrg Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -195,8 +195,7 @@ struct vm_page {
* - VM_PSTRAT_BSEARCH: binary search (sorted by address)
* - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
* - others?
- * XXXCDC: eventually we should remove contig and old non-contig cases
- * and purge all left-over global variables...
+ * XXXCDC: eventually we should purge all left-over global variables...
*/
#define VM_PSTRAT_RANDOM 1
#define VM_PSTRAT_BSEARCH 2
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index 1ac5457b1fa..29e305c57b3 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager.c,v 1.19 2001/11/07 01:18:01 art Exp $ */
-/* $NetBSD: uvm_pager.c,v 1.33 2000/09/13 15:00:25 thorpej Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.20 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_pager.c,v 1.34 2000/11/24 22:41:39 chs Exp $ */
/*
*
@@ -64,7 +64,6 @@ struct uvm_pagerops *uvmpagerops[] = {
* the pager map: provides KVA for I/O
*/
-#define PAGER_MAP_SIZE (4 * 1024 * 1024)
vm_map_t pager_map; /* XXX */
simple_lock_data_t pager_map_wanted_lock;
boolean_t pager_map_wanted; /* locked by pager map */
diff --git a/sys/uvm/uvm_pager.h b/sys/uvm/uvm_pager.h
index 0644ac93969..208693227d3 100644
--- a/sys/uvm/uvm_pager.h
+++ b/sys/uvm/uvm_pager.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager.h,v 1.12 2001/11/06 00:20:22 art Exp $ */
-/* $NetBSD: uvm_pager.h,v 1.16 2000/06/26 14:21:18 mrg Exp $ */
+/* $OpenBSD: uvm_pager.h,v 1.13 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_pager.h,v 1.18 2000/11/24 22:41:39 chs Exp $ */
/*
*
@@ -116,8 +116,6 @@ struct uvm_pagerops {
int (*pgo_get) /* get/read page */
__P((struct uvm_object *, voff_t,
vm_page_t *, int *, int, vm_prot_t, int, int));
- int (*pgo_asyncget) /* start async get */
- __P((struct uvm_object *, voff_t, int));
int (*pgo_put) /* put/write page */
__P((struct uvm_object *, vm_page_t *,
int, boolean_t));
@@ -128,8 +126,6 @@ struct uvm_pagerops {
__P((struct uvm_object *, struct vm_page **,
int *, struct vm_page *, int, voff_t,
voff_t));
- void (*pgo_aiodone) /* async iodone */
- __P((struct uvm_aiodesc *));
boolean_t (*pgo_releasepg) /* release page */
__P((struct vm_page *, struct vm_page **));
};
@@ -213,6 +209,14 @@ struct vm_page **uvm_mk_pcluster __P((struct uvm_object *, struct vm_page **,
#define VM_PAGER_UNLOCK 6
#define VM_PAGER_REFAULT 7
+/*
+ * XXX
+ * this is needed until the device strategy interface
+ * is changed to do physically-addressed i/o.
+ */
+
+#define PAGER_MAP_SIZE (16 * 1024 * 1024)
+
#endif /* _KERNEL */
#endif /* _UVM_UVM_PAGER_H_ */
diff --git a/sys/uvm/uvm_pager_i.h b/sys/uvm/uvm_pager_i.h
index c7776c42bdd..397c0e544cd 100644
--- a/sys/uvm/uvm_pager_i.h
+++ b/sys/uvm/uvm_pager_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager_i.h,v 1.7 2001/08/11 10:57:22 art Exp $ */
-/* $NetBSD: uvm_pager_i.h,v 1.9 2000/05/08 23:13:42 thorpej Exp $ */
+/* $OpenBSD: uvm_pager_i.h,v 1.8 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_pager_i.h,v 1.10 2000/11/25 06:28:00 chs Exp $ */
/*
*
@@ -57,11 +57,15 @@ PAGER_INLINE struct vm_page *
uvm_pageratop(kva)
vaddr_t kva;
{
+ struct vm_page *pg;
paddr_t pa;
+ boolean_t rv;
- if (__predict_false(pmap_extract(pmap_kernel(), kva, &pa) == FALSE))
- panic("uvm_pageratop");
- return (PHYS_TO_VM_PAGE(pa));
+ rv = pmap_extract(pmap_kernel(), kva, &pa);
+ KASSERT(rv);
+ pg = PHYS_TO_VM_PAGE(pa);
+ KASSERT(pg != NULL);
+ return (pg);
}
#endif /* defined(UVM_PAGER_INLINE) || defined(UVM_PAGER) */
diff --git a/sys/uvm/uvm_pglist.c b/sys/uvm/uvm_pglist.c
index 94b81596eeb..70a3c9f7904 100644
--- a/sys/uvm/uvm_pglist.c
+++ b/sys/uvm/uvm_pglist.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pglist.c,v 1.8 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_pglist.c,v 1.11 2000/06/27 17:29:34 mrg Exp $ */
+/* $OpenBSD: uvm_pglist.c,v 1.9 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_pglist.c,v 1.12 2000/11/25 06:28:00 chs Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -128,18 +128,19 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
/*
* Block all memory allocation and lock the free list.
*/
- s = uvm_lock_fpageq(); /* lock free page queue */
+ s = uvm_lock_fpageq();
/* Are there even any free pages? */
- if (uvmexp.free <= (uvmexp.reserve_pagedaemon +
- uvmexp.reserve_kernel))
+ if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
goto out;
for (;; try += alignment) {
if (try + size > high) {
+
/*
* We've run past the allowable range.
*/
+
goto out;
}
@@ -159,39 +160,34 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
/*
* Found a suitable starting page. See of the range is free.
*/
+
for (; idx < end; idx++) {
if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
- /*
- * Page not available.
- */
break;
}
-
idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
-
if (idx > tryidx) {
lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
-
if ((lastidxpa + PAGE_SIZE) != idxpa) {
+
/*
* Region not contiguous.
*/
+
break;
}
if (boundary != 0 &&
((lastidxpa ^ idxpa) & pagemask) != 0) {
+
/*
* Region crosses boundary.
*/
+
break;
}
}
}
-
if (idx == end) {
- /*
- * Woo hoo! Found one.
- */
break;
}
}
@@ -210,7 +206,7 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
pgflidx = (m->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef DEBUG
for (tp = TAILQ_FIRST(&uvm.page_free[
- free_list].pgfl_queues[pgflidx]);
+ free_list].pgfl_queues[pgflidx]);
tp != NULL;
tp = TAILQ_NEXT(tp, pageq)) {
if (tp == m)
@@ -228,8 +224,7 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
m->pqflags = 0;
m->uobject = NULL;
m->uanon = NULL;
- m->wire_count = 0;
- m->loan_count = 0;
+ m->version++;
TAILQ_INSERT_TAIL(rlist, m, pageq);
idx++;
STAT_INCR(uvm_pglistalloc_npages);
@@ -237,18 +232,18 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
error = 0;
out:
- uvm_unlock_fpageq(s);
-
/*
* check to see if we need to generate some free pages waking
* the pagedaemon.
- * XXX: we read uvm.free without locking
*/
- if (uvmexp.free < uvmexp.freemin ||
- (uvmexp.free < uvmexp.freetarg &&
- uvmexp.inactive < uvmexp.inactarg))
+ if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
+ (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
+ uvmexp.inactive < uvmexp.inactarg)) {
wakeup(&uvm.pagedaemon);
+ }
+
+ uvm_unlock_fpageq(s);
return (error);
}
diff --git a/sys/uvm/uvm_pglist.h b/sys/uvm/uvm_pglist.h
index 7012031d055..665a20f994e 100644
--- a/sys/uvm/uvm_pglist.h
+++ b/sys/uvm/uvm_pglist.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pglist.h,v 1.1 2001/08/12 17:55:56 mickey Exp $ */
+/* $OpenBSD: uvm_pglist.h,v 1.2 2001/11/07 02:55:50 art Exp $ */
/* $NetBSD: uvm_pglist.h,v 1.3 2001/05/02 01:22:20 thorpej Exp $ */
/*-
@@ -58,4 +58,4 @@ struct pgfreelist {
struct pglist pgfl_queues[PGFL_NQUEUES];
};
-#endif /* _PGLIST_H_ */
+#endif
diff --git a/sys/uvm/uvm_stat.c b/sys/uvm/uvm_stat.c
index 34a0921a818..d6f90081760 100644
--- a/sys/uvm/uvm_stat.c
+++ b/sys/uvm/uvm_stat.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_stat.c,v 1.7 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_stat.c,v 1.14 2000/06/27 17:29:35 mrg Exp $ */
+/* $OpenBSD: uvm_stat.c,v 1.8 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_stat.c,v 1.15 2000/11/24 07:25:52 chs Exp $ */
/*
*
@@ -43,6 +43,7 @@
#include <sys/systm.h>
#include <uvm/uvm.h>
+#include <uvm/uvm_ddb.h>
/*
* globals
@@ -58,6 +59,8 @@ struct uvm_history_head uvm_histories;
int uvmhist_print_enabled = 1;
#endif
+#ifdef DDB
+
/*
* prototypes
*/
@@ -68,7 +71,6 @@ void uvm_hist __P((u_int32_t));
static void uvmhist_dump_histories __P((struct uvm_history *[]));
#endif
void uvmcnt_dump __P((void));
-void uvm_dump __P((void));
#ifdef UVMHIST
@@ -198,54 +200,56 @@ uvmcnt_dump()
}
/*
- * uvm_dump: ddb hook to dump interesting uvm counters
+ * uvmexp_print: ddb hook to print interesting uvm counters
*/
-void
-uvm_dump()
+void
+uvmexp_print(void (*pr)(const char *, ...))
{
- printf("Current UVM status:\n");
- printf(" pagesize=%d (0x%x), pagemask=0x%x, pageshift=%d\n",
+ (*pr)("Current UVM status:\n");
+ (*pr)(" pagesize=%d (0x%x), pagemask=0x%x, pageshift=%d\n",
uvmexp.pagesize, uvmexp.pagesize, uvmexp.pagemask,
uvmexp.pageshift);
- printf(" %d VM pages: %d active, %d inactive, %d wired, %d free\n",
+ (*pr)(" %d VM pages: %d active, %d inactive, %d wired, %d free\n",
uvmexp.npages, uvmexp.active, uvmexp.inactive, uvmexp.wired,
uvmexp.free);
- printf(" freemin=%d, free-target=%d, inactive-target=%d, "
+ (*pr)(" freemin=%d, free-target=%d, inactive-target=%d, "
"wired-max=%d\n", uvmexp.freemin, uvmexp.freetarg, uvmexp.inactarg,
uvmexp.wiredmax);
- printf(" faults=%d, traps=%d, intrs=%d, ctxswitch=%d\n",
+ (*pr)(" faults=%d, traps=%d, intrs=%d, ctxswitch=%d\n",
uvmexp.faults, uvmexp.traps, uvmexp.intrs, uvmexp.swtch);
- printf(" softint=%d, syscalls=%d, swapins=%d, swapouts=%d\n",
+ (*pr)(" softint=%d, syscalls=%d, swapins=%d, swapouts=%d\n",
uvmexp.softs, uvmexp.syscalls, uvmexp.swapins, uvmexp.swapouts);
- printf(" fault counts:\n");
- printf(" noram=%d, noanon=%d, pgwait=%d, pgrele=%d\n",
+ (*pr)(" fault counts:\n");
+ (*pr)(" noram=%d, noanon=%d, pgwait=%d, pgrele=%d\n",
uvmexp.fltnoram, uvmexp.fltnoanon, uvmexp.fltpgwait,
uvmexp.fltpgrele);
- printf(" ok relocks(total)=%d(%d), anget(retrys)=%d(%d), "
+ (*pr)(" ok relocks(total)=%d(%d), anget(retrys)=%d(%d), "
"amapcopy=%d\n", uvmexp.fltrelckok, uvmexp.fltrelck,
uvmexp.fltanget, uvmexp.fltanretry, uvmexp.fltamcopy);
- printf(" neighbor anon/obj pg=%d/%d, gets(lock/unlock)=%d/%d\n",
+ (*pr)(" neighbor anon/obj pg=%d/%d, gets(lock/unlock)=%d/%d\n",
uvmexp.fltnamap, uvmexp.fltnomap, uvmexp.fltlget, uvmexp.fltget);
- printf(" cases: anon=%d, anoncow=%d, obj=%d, prcopy=%d, przero=%d\n",
+ (*pr)(" cases: anon=%d, anoncow=%d, obj=%d, prcopy=%d, przero=%d\n",
uvmexp.flt_anon, uvmexp.flt_acow, uvmexp.flt_obj, uvmexp.flt_prcopy,
uvmexp.flt_przero);
- printf(" daemon and swap counts:\n");
- printf(" woke=%d, revs=%d, scans=%d, swout=%d\n", uvmexp.pdwoke,
- uvmexp.pdrevs, uvmexp.pdscans, uvmexp.pdswout);
- printf(" busy=%d, freed=%d, reactivate=%d, deactivate=%d\n",
+ (*pr)(" daemon and swap counts:\n");
+ (*pr)(" woke=%d, revs=%d, scans=%d, obscans=%d, anscans=%d\n",
+ uvmexp.pdwoke, uvmexp.pdrevs, uvmexp.pdscans, uvmexp.pdobscan,
+ uvmexp.pdanscan);
+ (*pr)(" busy=%d, freed=%d, reactivate=%d, deactivate=%d\n",
uvmexp.pdbusy, uvmexp.pdfreed, uvmexp.pdreact, uvmexp.pddeact);
- printf(" pageouts=%d, pending=%d, nswget=%d\n", uvmexp.pdpageouts,
+ (*pr)(" pageouts=%d, pending=%d, nswget=%d\n", uvmexp.pdpageouts,
uvmexp.pdpending, uvmexp.nswget);
- printf(" nswapdev=%d, nanon=%d, nanonneeded=%d nfreeanon=%d\n",
+ (*pr)(" nswapdev=%d, nanon=%d, nanonneeded=%d nfreeanon=%d\n",
uvmexp.nswapdev, uvmexp.nanon, uvmexp.nanonneeded,
uvmexp.nfreeanon);
- printf(" swpages=%d, swpginuse=%d, swpgonly=%d paging=%d\n",
+ (*pr)(" swpages=%d, swpginuse=%d, swpgonly=%d paging=%d\n",
uvmexp.swpages, uvmexp.swpginuse, uvmexp.swpgonly, uvmexp.paging);
- printf(" kernel pointers:\n");
- printf(" objs(kern/kmem/mb)=%p/%p/%p\n", uvm.kernel_object,
+ (*pr)(" kernel pointers:\n");
+ (*pr)(" objs(kern/kmem/mb)=%p/%p/%p\n", uvm.kernel_object,
uvmexp.kmem_object, uvmexp.mb_object);
}
+#endif
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index ada8eee8464..e4bd678b122 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_swap.c,v 1.37 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_swap.c,v 1.38 2000/06/27 17:29:35 mrg Exp $ */
+/* $OpenBSD: uvm_swap.c,v 1.38 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
@@ -783,6 +783,7 @@ sys_swapctl(p, v, retval)
error = 0; /* assume no error */
switch(SCARG(uap, cmd)) {
+
case SWAP_DUMPDEV:
if (vp->v_type != VBLK) {
error = ENOTBLK;
@@ -869,12 +870,6 @@ sys_swapctl(p, v, retval)
free(sdp, M_VMSWAP);
break;
}
-
- /*
- * got it! now add a second reference to vp so that
- * we keep a reference to the vnode after we return.
- */
- vref(vp);
break;
case SWAP_OFF:
@@ -908,9 +903,10 @@ sys_swapctl(p, v, retval)
}
/*
- * done! use vput to drop our reference and unlock
+ * done! release the ref gained by namei() and unlock.
*/
vput(vp);
+
out:
lockmgr(&swap_syscall_lock, LK_RELEASE, NULL, p);
@@ -1092,6 +1088,11 @@ swap_on(p, sdp)
printf("leaving %d pages of swap\n", size);
}
+ /*
+ * add a ref to vp to reflect usage as a swap device.
+ */
+ vref(vp);
+
/*
* add anons to reflect the new swap space
*/
@@ -1109,9 +1110,8 @@ swap_on(p, sdp)
sdp->swd_npages = size;
sdp->swd_flags &= ~SWF_FAKE; /* going live */
sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
- simple_unlock(&uvm.swap_data_lock);
uvmexp.swpages += size;
-
+ simple_unlock(&uvm.swap_data_lock);
return (0);
bad:
@@ -1167,17 +1167,17 @@ swap_off(p, sdp)
#endif
/*
- * done with the vnode.
+ * done with the vnode and saved creds.
+ * drop our ref on the vnode before calling VOP_CLOSE()
+ * so that spec_close() can tell if this is the last close.
*/
if (sdp->swd_vp->v_type == VREG) {
crfree(sdp->swd_cred);
}
+ vrele(sdp->swd_vp);
if (sdp->swd_vp != rootvp) {
(void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
}
- if (sdp->swd_vp) {
- vrele(sdp->swd_vp);
- }
/* remove anons from the system */
uvm_anon_remove(sdp->swd_npages);
diff --git a/sys/uvm/uvm_swap.h b/sys/uvm/uvm_swap.h
index 3c9e9ad0311..3108dd10194 100644
--- a/sys/uvm/uvm_swap.h
+++ b/sys/uvm/uvm_swap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_swap.h,v 1.8 2001/07/26 19:37:13 art Exp $ */
+/* $OpenBSD: uvm_swap.h,v 1.9 2001/11/07 02:55:51 art Exp $ */
/* $NetBSD: uvm_swap.h,v 1.5 2000/01/11 06:57:51 chs Exp $ */
/*
@@ -34,7 +34,7 @@
#ifndef _UVM_UVM_SWAP_H_
#define _UVM_UVM_SWAP_H_
-#define SWSLOT_BAD (-1)
+#define SWSLOT_BAD (-1)
#ifdef _KERNEL
diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c
index 294b28b0c98..44863db3e2d 100644
--- a/sys/uvm/uvm_unix.c
+++ b/sys/uvm/uvm_unix.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_unix.c,v 1.16 2001/11/07 01:18:01 art Exp $ */
+/* $OpenBSD: uvm_unix.c,v 1.17 2001/11/07 02:55:51 art Exp $ */
/* $NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $ */
/*
@@ -83,7 +83,7 @@ sys_obreak(p, v, retval)
old = (vaddr_t)vm->vm_daddr;
new = round_page((vaddr_t)SCARG(uap, nsize));
if ((new - old) > p->p_rlimit[RLIMIT_DATA].rlim_cur)
- return(ENOMEM);
+ return (ENOMEM);
old = round_page(old + ptoa(vm->vm_dsize));
diff = new - old;
@@ -112,9 +112,9 @@ sys_obreak(p, v, retval)
}
uprintf("sbrk: %s %ld failed, return = %d\n",
- diff > 0 ? "grow" : "shrink",
- (long)(diff > 0 ? diff : -diff), rv);
- return(ENOMEM);
+ diff > 0 ? "grow" : "shrink",
+ (long)(diff > 0 ? diff : -diff), rv);
+ return (ENOMEM);
}
/*
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index 019e71e8a00..e5e954eb9df 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_vnode.c,v 1.22 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_vnode.c,v 1.35 2000/06/27 17:29:37 mrg Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.23 2001/11/07 02:55:51 art Exp $ */
+/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -84,8 +84,6 @@ lock_data_t uvn_sync_lock; /* locks sync operation */
* functions
*/
-static int uvn_asyncget __P((struct uvm_object *, voff_t,
- int));
static void uvn_cluster __P((struct uvm_object *, voff_t,
voff_t *, voff_t *));
static void uvn_detach __P((struct uvm_object *));
@@ -114,11 +112,9 @@ struct uvm_pagerops uvm_vnodeops = {
NULL, /* no specialized fault routine required */
uvn_flush,
uvn_get,
- uvn_asyncget,
uvn_put,
uvn_cluster,
uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */
- NULL, /* AIO-DONE function (not until we have asyncio) */
uvn_releasepg,
};
@@ -1546,28 +1542,6 @@ uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
}
/*
- * uvn_asyncget: start async I/O to bring pages into ram
- *
- * => caller must lock object(???XXX: see if this is best)
- * => could be called from uvn_get or a madvise() fault-ahead.
- * => if it fails, it doesn't matter.
- */
-
-static int
-uvn_asyncget(uobj, offset, npages)
- struct uvm_object *uobj;
- voff_t offset;
- int npages;
-{
-
- /*
- * XXXCDC: we can't do async I/O yet
- */
- printf("uvn_asyncget called\n");
- return (KERN_SUCCESS);
-}
-
-/*
* uvn_io: do I/O to a vnode
*
* => prefer map unlocked (not required)
@@ -1695,7 +1669,7 @@ uvn_io(uvn, pps, npages, flags, rw)
*/
result = 0;
if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0)
- result = vn_lock(vn, LK_EXCLUSIVE | LK_RETRY, curproc /*XXX*/);
+ result = vn_lock(vn, LK_EXCLUSIVE | LK_RETRY | LK_RECURSEFAIL, curproc);
if (result == 0) {
/* NOTE: vnode now locked! */
@@ -1706,7 +1680,7 @@ uvn_io(uvn, pps, npages, flags, rw)
result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred);
if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0)
- VOP_UNLOCK(vn, 0, curproc /*XXX*/);
+ VOP_UNLOCK(vn, 0, curproc);
}
/* NOTE: vnode now unlocked (unless vnislocked) */
@@ -1870,9 +1844,9 @@ uvm_vnp_uncache(vp)
* unlocked causing us to return TRUE when we should not. we ignore
* this as a false-positive return value doesn't hurt us.
*/
- VOP_UNLOCK(vp, 0, curproc /*XXX*/);
+ VOP_UNLOCK(vp, 0, curproc);
uvn_detach(&uvn->u_obj);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc/*XXX*/);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
/*
* and return...
@@ -1954,7 +1928,7 @@ uvm_vnp_sync(mp)
* step 1: ensure we are only ones using the uvn_sync_q by locking
* our lock...
*/
- lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, (void *)0, curproc /*XXX*/);
+ lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, NULL, curproc);
/*
* step 2: build up a simpleq of uvns of interest based on the
@@ -2050,5 +2024,5 @@ uvm_vnp_sync(mp)
/*
* done! release sync lock
*/
- lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0, curproc /*XXX*/);
+ lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0, curproc);
}