summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/uvm/uvm.h12
-rw-r--r--sys/uvm/uvm_amap.c4
-rw-r--r--sys/uvm/uvm_amap_i.h5
-rw-r--r--sys/uvm/uvm_anon.c44
-rw-r--r--sys/uvm/uvm_anon.h3
-rw-r--r--sys/uvm/uvm_aobj.c65
-rw-r--r--sys/uvm/uvm_aobj.h8
-rw-r--r--sys/uvm/uvm_ddb.h5
-rw-r--r--sys/uvm/uvm_device.c29
-rw-r--r--sys/uvm/uvm_device.h9
-rw-r--r--sys/uvm/uvm_extern.h23
-rw-r--r--sys/uvm/uvm_fault.c82
-rw-r--r--sys/uvm/uvm_fault.h5
-rw-r--r--sys/uvm/uvm_fault_i.h5
-rw-r--r--sys/uvm/uvm_glue.c11
-rw-r--r--sys/uvm/uvm_glue.h5
-rw-r--r--sys/uvm/uvm_init.c5
-rw-r--r--sys/uvm/uvm_io.c5
-rw-r--r--sys/uvm/uvm_km.c31
-rw-r--r--sys/uvm/uvm_km.h5
-rw-r--r--sys/uvm/uvm_loan.c11
-rw-r--r--sys/uvm/uvm_loan.h5
-rw-r--r--sys/uvm/uvm_map.c82
-rw-r--r--sys/uvm/uvm_map.h15
-rw-r--r--sys/uvm/uvm_map_i.h13
-rw-r--r--sys/uvm/uvm_meter.c5
-rw-r--r--sys/uvm/uvm_mmap.c46
-rw-r--r--sys/uvm/uvm_object.h5
-rw-r--r--sys/uvm/uvm_page.c43
-rw-r--r--sys/uvm/uvm_page.h5
-rw-r--r--sys/uvm/uvm_page_i.h5
-rw-r--r--sys/uvm/uvm_pager.c7
-rw-r--r--sys/uvm/uvm_pager.h9
-rw-r--r--sys/uvm/uvm_pager_i.h5
-rw-r--r--sys/uvm/uvm_pdaemon.c178
-rw-r--r--sys/uvm/uvm_pdaemon.h5
-rw-r--r--sys/uvm/uvm_pglist.c3
-rw-r--r--sys/uvm/uvm_stat.c9
-rw-r--r--sys/uvm/uvm_stat.h5
-rw-r--r--sys/uvm/uvm_swap.c124
-rw-r--r--sys/uvm/uvm_unix.c5
-rw-r--r--sys/uvm/uvm_user.c5
-rw-r--r--sys/uvm/uvm_vnode.c47
-rw-r--r--sys/uvm/uvm_vnode.h5
44 files changed, 521 insertions, 482 deletions
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h
index 3951affe90f..e2ee78db07b 100644
--- a/sys/uvm/uvm.h
+++ b/sys/uvm/uvm.h
@@ -1,10 +1,4 @@
-/* $OpenBSD: uvm.h,v 1.2 1999/02/26 05:32:05 art Exp $ */
-/* $NetBSD: uvm.h,v 1.13 1998/10/11 22:59:53 chuck Exp $ */
-
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
+/* $NetBSD: uvm.h,v 1.14 1999/03/25 18:48:49 mrg Exp $ */
/*
*
@@ -92,6 +86,7 @@ struct uvm {
int page_nhash; /* number of buckets */
int page_hashmask; /* hash mask */
simple_lock_data_t hashlock; /* lock on page_hash array */
+
/* anon stuff */
struct vm_anon *afree; /* anon free list */
simple_lock_data_t afreelock; /* lock on anon free list */
@@ -107,6 +102,9 @@ struct uvm {
vaddr_t pager_sva; /* start of pager VA area */
vaddr_t pager_eva; /* end of pager VA area */
+ /* swap-related items */
+ simple_lock_data_t swap_data_lock;
+
/* kernel object: to support anonymous pageable kernel memory */
struct uvm_object *kernel_object;
};
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index e45e5fbd2ca..0087a0396a1 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.2 1999/02/26 05:32:06 art Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.3 1999/08/23 08:13:22 art Exp $ */
/* $NetBSD: uvm_amap.c,v 1.19 1999/01/28 14:46:27 chuck Exp $ */
/*
@@ -795,7 +795,7 @@ ReStart:
*/
nanon = uvm_analloc();
if (nanon)
- npg = uvm_pagealloc(NULL, 0, nanon);
+ npg = uvm_pagealloc(NULL, 0, nanon, 0);
else
npg = NULL; /* XXX: quiet gcc warning */
diff --git a/sys/uvm/uvm_amap_i.h b/sys/uvm/uvm_amap_i.h
index 93e40429e1d..6c4b70d9af7 100644
--- a/sys/uvm/uvm_amap_i.h
+++ b/sys/uvm/uvm_amap_i.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_amap_i.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
/* $NetBSD: uvm_amap_i.h,v 1.11 1999/01/28 14:46:27 chuck Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index 5564cc03e1b..f55a1831c62 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.c,v 1.2 1999/02/26 05:32:06 art Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.3 1999/08/23 08:13:22 art Exp $ */
/* $NetBSD: uvm_anon.c,v 1.1 1999/01/24 23:53:15 chuck Exp $ */
/*
@@ -204,7 +204,6 @@ uvm_anfree(anon)
if ((pg->flags & PG_BUSY) != 0) {
/* tell them to dump it when done */
pg->flags |= PG_RELEASED;
- simple_unlock(&anon->an_lock);
UVMHIST_LOG(maphist,
" anon 0x%x, page 0x%x: BUSY (released!)",
anon, pg, 0, 0);
@@ -222,19 +221,9 @@ uvm_anfree(anon)
}
/*
- * are we using any backing store resources? if so, free them.
+ * free any swap resources.
*/
- if (anon->an_swslot) {
- /*
- * on backing store: no I/O in progress. sole amap reference
- * is ours and we've got it locked down. thus we can free,
- * and be done.
- */
- UVMHIST_LOG(maphist," freeing anon 0x%x, paged to swslot 0x%x",
- anon, anon->an_swslot, 0, 0);
- uvm_swap_free(anon->an_swslot, 1);
- anon->an_swslot = 0;
- }
+ uvm_anon_dropswap(anon);
/*
* now that we've stripped the data areas from the anon, free the anon
@@ -249,6 +238,33 @@ uvm_anfree(anon)
}
/*
+ * uvm_anon_dropswap: release any swap resources from this anon.
+ *
+ * => anon must be locked or have a reference count of 0.
+ */
+void
+uvm_anon_dropswap(anon)
+ struct vm_anon *anon;
+{
+ UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
+ if (anon->an_swslot == 0) {
+ return;
+ }
+
+ UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
+ anon, anon->an_swslot, 0, 0);
+ uvm_swap_free(anon->an_swslot, 1);
+ anon->an_swslot = 0;
+
+ if (anon->u.an_page == NULL) {
+ /* this page is no longer only in swap. */
+ simple_lock(&uvm.swap_data_lock);
+ uvmexp.swpgonly--;
+ simple_unlock(&uvm.swap_data_lock);
+ }
+}
+
+/*
* uvm_anon_lockloanpg: given a locked anon, lock its resident page
*
* => anon is locked by caller
diff --git a/sys/uvm/uvm_anon.h b/sys/uvm/uvm_anon.h
index 693d7c9498a..7b4c230e302 100644
--- a/sys/uvm/uvm_anon.h
+++ b/sys/uvm/uvm_anon.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
+/* $OpenBSD: uvm_anon.h,v 1.3 1999/08/23 08:13:23 art Exp $ */
/* $NetBSD: uvm_anon.h,v 1.9 1999/01/24 23:53:15 chuck Exp $ */
/*
@@ -102,5 +102,6 @@ void uvm_anfree __P((struct vm_anon *));
void uvm_anon_init __P((void));
void uvm_anon_add __P((int));
struct vm_page *uvm_anon_lockloanpg __P((struct vm_anon *));
+void uvm_anon_dropswap __P((struct vm_anon *));
#endif /* _UVM_UVM_ANON_H_ */
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index a0a50c7f72c..250662bece8 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_aobj.c,v 1.3 1999/04/28 09:28:18 art Exp $ */
-/* $NetBSD: uvm_aobj.c,v 1.15 1998/10/18 23:49:59 chs Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.18 1999/03/26 17:34:15 chs Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
* Washington University.
* All rights reserved.
@@ -198,7 +193,6 @@ static boolean_t uao_releasepg __P((struct vm_page *,
struct uvm_pagerops aobj_pager = {
uao_init, /* init */
- NULL, /* attach */
uao_reference, /* reference */
uao_detach, /* detach */
NULL, /* fault */
@@ -425,8 +419,17 @@ uao_free(aobj)
{
int slot = elt->slots[j];
- if (slot)
+ if (slot) {
uvm_swap_free(slot, 1);
+
+ /*
+ * this page is no longer
+ * only in swap.
+ */
+ simple_lock(&uvm.swap_data_lock);
+ uvmexp.swpgonly--;
+ simple_unlock(&uvm.swap_data_lock);
+ }
}
next = elt->list.le_next;
@@ -445,8 +448,14 @@ uao_free(aobj)
{
int slot = aobj->u_swslots[i];
- if (slot)
+ if (slot) {
uvm_swap_free(slot, 1);
+
+ /* this page is no longer only in swap. */
+ simple_lock(&uvm.swap_data_lock);
+ uvmexp.swpgonly--;
+ simple_unlock(&uvm.swap_data_lock);
+ }
}
FREE(aobj->u_swslots, M_UVMAOBJ);
}
@@ -663,7 +672,6 @@ uao_detach(uobj)
busybody = FALSE;
for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
- int swslot;
if (pg->flags & PG_BUSY) {
pg->flags |= PG_RELEASED;
@@ -671,16 +679,9 @@ uao_detach(uobj)
continue;
}
-
/* zap the mappings, free the swap slot, free the page */
pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
-
- swslot = uao_set_swslot(&aobj->u_obj,
- pg->offset >> PAGE_SHIFT, 0);
- if (swslot) {
- uvm_swap_free(swslot, 1);
- }
-
+ uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
uvm_lock_pageq();
uvm_pagefree(pg);
uvm_unlock_pageq();
@@ -796,7 +797,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
if (ptmp == NULL && uao_find_swslot(aobj,
current_offset >> PAGE_SHIFT) == 0) {
ptmp = uvm_pagealloc(uobj, current_offset,
- NULL);
+ NULL, 0);
if (ptmp) {
/* new page */
ptmp->flags &= ~(PG_BUSY|PG_FAKE);
@@ -886,7 +887,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
if (ptmp == NULL) {
ptmp = uvm_pagealloc(uobj, current_offset,
- NULL); /* alloc */
+ NULL, 0);
/* out of RAM? */
if (ptmp == NULL) {
@@ -1039,7 +1040,6 @@ static boolean_t uao_releasepg(pg, nextpgp)
struct vm_page **nextpgp; /* OUT */
{
struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
- int slot;
#ifdef DIAGNOSTIC
if ((pg->flags & PG_RELEASED) == 0)
@@ -1050,9 +1050,7 @@ static boolean_t uao_releasepg(pg, nextpgp)
* dispose of the page [caller handles PG_WANTED] and swap slot.
*/
pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
- slot = uao_set_swslot(&aobj->u_obj, pg->offset >> PAGE_SHIFT, 0);
- if (slot)
- uvm_swap_free(slot, 1);
+ uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
uvm_lock_pageq();
if (nextpgp)
*nextpgp = pg->pageq.tqe_next; /* next page for daemon */
@@ -1089,3 +1087,22 @@ static boolean_t uao_releasepg(pg, nextpgp)
return FALSE;
}
+
+/*
+ * uao_dropswap: release any swap resources from this aobj page.
+ *
+ * => aobj must be locked or have a reference count of 0.
+ */
+
+void
+uao_dropswap(uobj, pageidx)
+ struct uvm_object *uobj;
+ int pageidx;
+{
+ int slot;
+
+ slot = uao_set_swslot(uobj, pageidx, 0);
+ if (slot) {
+ uvm_swap_free(slot, 1);
+ }
+}
diff --git a/sys/uvm/uvm_aobj.h b/sys/uvm/uvm_aobj.h
index 72b171022ef..286922874fe 100644
--- a/sys/uvm/uvm_aobj.h
+++ b/sys/uvm/uvm_aobj.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_aobj.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
-/* $NetBSD: uvm_aobj.h,v 1.6 1998/02/12 07:36:45 chs Exp $ */
+/* $NetBSD: uvm_aobj.h,v 1.7 1999/03/25 18:48:50 mrg Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
* Washington University.
* All rights reserved.
@@ -68,6 +63,7 @@
*/
int uao_set_swslot __P((struct uvm_object *, int, int));
+void uao_dropswap __P((struct uvm_object *, int));
/*
* globals
diff --git a/sys/uvm/uvm_ddb.h b/sys/uvm/uvm_ddb.h
index 80685b27de5..797003a0fa2 100644
--- a/sys/uvm/uvm_ddb.h
+++ b/sys/uvm/uvm_ddb.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_ddb.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
/* $NetBSD: uvm_ddb.h,v 1.1 1998/07/04 22:18:53 jonathan Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index d56cb3f7e53..b0fea4ff884 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,10 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.2 1999/02/26 05:32:06 art Exp $ */
-/* $NetBSD: uvm_device.c,v 1.11 1998/11/19 05:23:26 mrg Exp $ */
-
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
+/* $NetBSD: uvm_device.c,v 1.15 1999/03/26 21:58:39 mycroft Exp $ */
/*
*
@@ -73,7 +67,7 @@ static simple_lock_data_t udv_lock;
*/
static void udv_init __P((void));
-struct uvm_object *udv_attach __P((void *, vm_prot_t));
+struct uvm_object *udv_attach __P((void *, vm_prot_t, vaddr_t, vsize_t));
static void udv_reference __P((struct uvm_object *));
static void udv_detach __P((struct uvm_object *));
static int udv_fault __P((struct uvm_faultinfo *, vaddr_t,
@@ -92,7 +86,6 @@ static int udv_put __P((struct uvm_object *, vm_page_t *,
struct uvm_pagerops uvm_deviceops = {
udv_init,
- udv_attach,
udv_reference,
udv_detach,
udv_fault,
@@ -135,9 +128,11 @@ udv_init()
* => in fact, nothing should be locked so that we can sleep here.
*/
struct uvm_object *
-udv_attach(arg, accessprot)
+udv_attach(arg, accessprot, off, size)
void *arg;
vm_prot_t accessprot;
+ vaddr_t off; /* used only for access check */
+ vsize_t size; /* used only for access check */
{
dev_t device = *((dev_t *) arg);
struct uvm_device *udv, *lcv;
@@ -157,6 +152,20 @@ udv_attach(arg, accessprot)
return(NULL);
/*
+ * Check that the specified range of the device allows the
+ * desired protection.
+ *
+ * XXX assumes VM_PROT_* == PROT_*
+ * XXX clobbers off and size, but nothing else here needs them.
+ */
+
+ while (size != 0) {
+ if ((*mapfn)(device, off, accessprot) == -1)
+ return (NULL);
+ off += PAGE_SIZE; size -= PAGE_SIZE;
+ }
+
+ /*
* keep looping until we get it
*/
diff --git a/sys/uvm/uvm_device.h b/sys/uvm/uvm_device.h
index 352314140de..6ac771b50f5 100644
--- a/sys/uvm/uvm_device.h
+++ b/sys/uvm/uvm_device.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_device.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
-/* $NetBSD: uvm_device.h,v 1.5 1998/03/09 00:58:56 mrg Exp $ */
+/* $NetBSD: uvm_device.h,v 1.6 1999/03/24 03:52:41 cgd Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
@@ -72,6 +67,6 @@ struct uvm_device {
* prototypes
*/
-struct uvm_object *udv_attach __P((void *, vm_prot_t));
+struct uvm_object *udv_attach __P((void *, vm_prot_t, vaddr_t, vsize_t));
#endif /* _UVM_UVM_DEVICE_H_ */
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index a18c0ae1ec8..9782f0e0560 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_extern.h,v 1.4 1999/08/17 10:32:19 niklas Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.21 1998/09/08 23:44:21 thorpej Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.24 1999/04/11 04:04:11 chs Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
@@ -123,7 +118,6 @@
/*
* the following defines are for uvm_km_kmemalloc's flags
*/
-
#define UVM_KMF_NOWAIT 0x1 /* matches M_NOWAIT */
#define UVM_KMF_VALLOC 0x2 /* allocate VA only */
#define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */
@@ -136,6 +130,11 @@
#define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */
/*
+ * flags for uvm_pagealloc_strat()
+ */
+#define UVM_PGA_USERESERVE 0x0001
+
+/*
* structures
*/
@@ -182,6 +181,7 @@ struct uvmexp {
int nswapdev; /* number of configured swap devices in system */
int swpages; /* number of PAGE_SIZE'ed swap pages */
int swpginuse; /* number of swap pages in use */
+ int swpgonly; /* number of swap pages in use, not also in RAM */
int nswget; /* number of times fault calls uvm_swap_get() */
int nanon; /* number total of anon's in system */
int nfreeanon; /* number of free anon's */
@@ -275,7 +275,7 @@ int uvm_fault __P((vm_map_t, vaddr_t,
void uvm_chgkprot __P((caddr_t, size_t, int));
#endif
void uvm_fork __P((struct proc *, struct proc *, boolean_t,
- void *, size_t));
+ void *, size_t));
void uvm_exit __P((struct proc *));
void uvm_init_limits __P((struct proc *));
boolean_t uvm_kernacc __P((caddr_t, size_t, int));
@@ -346,9 +346,10 @@ int uvm_mmap __P((vm_map_t, vaddr_t *, vsize_t,
/* uvm_page.c */
struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
- vaddr_t, struct vm_anon *, int, int));
-#define uvm_pagealloc(obj, off, anon) \
- uvm_pagealloc_strat((obj), (off), (anon), UVM_PGA_STRAT_NORMAL, 0)
+ vaddr_t, struct vm_anon *, int, int, int));
+#define uvm_pagealloc(obj, off, anon, flags) \
+ uvm_pagealloc_strat((obj), (off), (anon), (flags), \
+ UVM_PGA_STRAT_NORMAL, 0)
vaddr_t uvm_pagealloc_contig __P((vaddr_t, vaddr_t,
vaddr_t, vaddr_t));
void uvm_pagerealloc __P((struct vm_page *,
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 338e9f0d1a3..f82f5cd0c5f 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_fault.c,v 1.2 1999/02/26 05:32:06 art Exp $ */
-/* $NetBSD: uvm_fault.c,v 1.19 1999/01/24 23:53:15 chuck Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.28 1999/04/11 04:04:11 chs Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
@@ -378,7 +373,7 @@ int uvmfault_anonget(ufi, amap, anon)
/*
* no page, we must try and bring it in.
*/
- pg = uvm_pagealloc(NULL, 0, anon);
+ pg = uvm_pagealloc(NULL, 0, anon, 0);
if (pg == NULL) { /* out of RAM. */
@@ -549,6 +544,9 @@ int uvmfault_anonget(ufi, amap, anon)
* the map locked off during I/O.
*/
+#define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
+ ~VM_PROT_WRITE : VM_PROT_ALL)
+
int
uvm_fault(orig_map, vaddr, fault_type, access_type)
vm_map_t orig_map;
@@ -651,7 +649,7 @@ ReFault:
* ensure that we pmap_enter page R/O since
* needs_copy is still true
*/
- enter_prot = enter_prot & ~VM_PROT_WRITE;
+ enter_prot &= ~VM_PROT_WRITE;
}
}
@@ -821,7 +819,8 @@ ReFault:
uvmexp.fltnamap++;
pmap_enter(ufi.orig_map->pmap, currva,
VM_PAGE_TO_PHYS(anon->u.an_page),
- (anon->an_ref > 1) ? VM_PROT_READ : enter_prot,
+ (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
+ enter_prot,
(ufi.entry->wired_count != 0));
}
simple_unlock(&anon->an_lock);
@@ -887,8 +886,7 @@ ReFault:
result = uobj->pgops->pgo_get(uobj, ufi.entry->offset +
(startva - ufi.entry->start),
pages, &gotpages, centeridx,
- UVM_ET_ISCOPYONWRITE(ufi.entry) ?
- VM_PROT_READ : access_type,
+ access_type & MASK(ufi.entry),
ufi.entry->advice, PGO_LOCKED);
/*
@@ -949,8 +947,7 @@ ReFault:
uvmexp.fltnomap++;
pmap_enter(ufi.orig_map->pmap, currva,
VM_PAGE_TO_PHYS(pages[lcv]),
- UVM_ET_ISCOPYONWRITE(ufi.entry) ?
- VM_PROT_READ : enter_prot, wired);
+ enter_prot & MASK(ufi.entry), wired);
/*
* NOTE: page can't be PG_WANTED or PG_RELEASED
@@ -1075,7 +1072,7 @@ ReFault:
if (anon->an_ref == 1) {
/* get new un-owned replacement page */
- pg = uvm_pagealloc(NULL, 0, NULL);
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == NULL) {
uvmfault_unlockall(&ufi, amap, uobj,
anon);
@@ -1138,7 +1135,7 @@ ReFault:
oanon = anon; /* oanon = old, locked anon */
anon = uvm_analloc();
if (anon)
- pg = uvm_pagealloc(NULL, 0, anon);
+ pg = uvm_pagealloc(NULL, 0, anon, 0);
#ifdef __GNUC__
else
pg = NULL; /* XXX: gcc */
@@ -1149,13 +1146,18 @@ ReFault:
if (anon)
uvm_anfree(anon);
uvmfault_unlockall(&ufi, amap, uobj, oanon);
- if (anon == NULL) {
+#ifdef DIAGNOSTIC
+ if (uvmexp.swpgonly > uvmexp.swpages) {
+ panic("uvmexp.swpgonly botch");
+ }
+#endif
+ if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
UVMHIST_LOG(maphist,
"<- failed. out of VM",0,0,0,0);
uvmexp.fltnoanon++;
- /* XXX: OUT OF VM, ??? */
return (KERN_RESOURCE_SHORTAGE);
}
+
uvmexp.fltnoram++;
uvm_wait("flt_noram3"); /* out of RAM, wait for more */
goto ReFault;
@@ -1210,10 +1212,19 @@ ReFault:
if (fault_type == VM_FAULT_WIRE) {
uvm_pagewire(pg);
+
+ /*
+ * since the now-wired page cannot be paged out,
+ * release its swap resources for others to use.
+ * since an anon with no swap cannot be PG_CLEAN,
+ * clear its clean flag now.
+ */
+
+ pg->flags &= ~(PG_CLEAN);
+ uvm_anon_dropswap(anon);
} else {
/* activate it */
uvm_pageactivate(pg);
-
}
uvm_unlock_pageq();
@@ -1279,8 +1290,7 @@ Case2:
result = uobj->pgops->pgo_get(uobj,
(ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset,
&uobjpage, &gotpages, 0,
- UVM_ET_ISCOPYONWRITE(ufi.entry) ?
- VM_PROT_READ : access_type,
+ access_type & MASK(ufi.entry),
ufi.entry->advice, 0);
/* locked: uobjpage(if result OK) */
@@ -1411,7 +1421,7 @@ Case2:
uvmexp.flt_obj++;
if (UVM_ET_ISCOPYONWRITE(ufi.entry))
- enter_prot = enter_prot & ~VM_PROT_WRITE;
+ enter_prot &= ~VM_PROT_WRITE;
pg = uobjpage; /* map in the actual object */
/* assert(uobjpage != PGO_DONTCARE) */
@@ -1430,7 +1440,7 @@ Case2:
/* write fault: must break the loan here */
/* alloc new un-owned page */
- pg = uvm_pagealloc(NULL, 0, NULL);
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == NULL) {
/*
@@ -1450,7 +1460,8 @@ Case2:
uvmfault_unlockall(&ufi, amap, uobj,
NULL);
UVMHIST_LOG(maphist,
- " out of RAM breaking loan, waiting", 0,0,0,0);
+ " out of RAM breaking loan, waiting",
+ 0,0,0,0);
uvmexp.fltnoram++;
uvm_wait("flt_noram4");
goto ReFault;
@@ -1510,7 +1521,7 @@ Case2:
anon = uvm_analloc();
if (anon)
- pg = uvm_pagealloc(NULL, 0, anon); /* BUSY+CLEAN+FAKE */
+ pg = uvm_pagealloc(NULL, 0, anon, 0);
#ifdef __GNUC__
else
pg = NULL; /* XXX: gcc */
@@ -1540,13 +1551,18 @@ Case2:
/* unlock and fail ... */
uvmfault_unlockall(&ufi, amap, uobj, NULL);
- if (anon == NULL) {
+#ifdef DIAGNOSTIC
+ if (uvmexp.swpgonly > uvmexp.swpages) {
+ panic("uvmexp.swpgonly botch");
+ }
+#endif
+ if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
UVMHIST_LOG(maphist, " promote: out of VM",
0,0,0,0);
uvmexp.fltnoanon++;
- /* XXX: out of VM */
return (KERN_RESOURCE_SHORTAGE);
}
+
UVMHIST_LOG(maphist, " out of RAM, waiting for more",
0,0,0,0);
uvm_anfree(anon);
@@ -1627,11 +1643,21 @@ Case2:
if (fault_type == VM_FAULT_WIRE) {
uvm_pagewire(pg);
+ if (pg->pqflags & PQ_AOBJ) {
+
+ /*
+ * since the now-wired page cannot be paged out,
+ * release its swap resources for others to use.
+ * since an aobj page with no swap cannot be PG_CLEAN,
+ * clear its clean flag now.
+ */
+
+ pg->flags &= ~(PG_CLEAN);
+ uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
+ }
} else {
-
/* activate it */
uvm_pageactivate(pg);
-
}
uvm_unlock_pageq();
diff --git a/sys/uvm/uvm_fault.h b/sys/uvm/uvm_fault.h
index 815ca70ee34..fd8958f96e5 100644
--- a/sys/uvm/uvm_fault.h
+++ b/sys/uvm/uvm_fault.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_fault.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
/* $NetBSD: uvm_fault.h,v 1.7 1998/10/11 23:07:42 chuck Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_fault_i.h b/sys/uvm/uvm_fault_i.h
index 2302c85c34c..8a2c3ea0fb4 100644
--- a/sys/uvm/uvm_fault_i.h
+++ b/sys/uvm/uvm_fault_i.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_fault_i.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
/* $NetBSD: uvm_fault_i.h,v 1.7 1999/01/24 23:53:15 chuck Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index dfbf67de7f6..d32d222a7fa 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_glue.c,v 1.3 1999/08/17 10:32:19 niklas Exp $ */
-/* $NetBSD: uvm_glue.c,v 1.15 1998/10/19 22:21:19 tron Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.19 1999/04/30 21:23:50 thorpej Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
@@ -258,6 +253,8 @@ uvm_vsunlock(p, addr, len)
* - the address space is copied as per parent map's inherit values
* - a new "user" structure is allocated for the child process
* [filled in by MD layer...]
+ * - if specified, the child gets a new user stack described by
+ * stack and stacksize
* - NOTE: the kernel stack may be at a different location in the child
* process, and thus addresses of automatic variables may be invalid
* after cpu_fork returns in the child process. We do nothing here
@@ -466,8 +463,6 @@ loop:
printf("scheduler: no room for pid %d(%s), free %d\n",
p->p_pid, p->p_comm, uvmexp.free);
#endif
- printf("scheduler: no room for pid %d(%s), free %d\n",
- p->p_pid, p->p_comm, uvmexp.free);/*XXXCDC: HIGHLY BOGUS */
(void) splhigh();
uvm_wait("schedpwait");
(void) spl0();
diff --git a/sys/uvm/uvm_glue.h b/sys/uvm/uvm_glue.h
index a82cf085c83..4fdc0cb1742 100644
--- a/sys/uvm/uvm_glue.h
+++ b/sys/uvm/uvm_glue.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_glue.h,v 1.2 1999/02/26 05:32:06 art Exp $ */
/* $NetBSD: uvm_glue.h,v 1.4 1998/02/10 02:34:37 perry Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_init.c b/sys/uvm/uvm_init.c
index f4e53263b03..57e75cfcee0 100644
--- a/sys/uvm/uvm_init.c
+++ b/sys/uvm/uvm_init.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_init.c,v 1.2 1999/02/26 05:32:07 art Exp $ */
/* $NetBSD: uvm_init.c,v 1.10 1999/01/24 23:53:15 chuck Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_io.c b/sys/uvm/uvm_io.c
index 0fc8efd74a1..ddfdc1fd475 100644
--- a/sys/uvm/uvm_io.c
+++ b/sys/uvm/uvm_io.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_io.c,v 1.2 1999/02/26 05:32:07 art Exp $ */
/* $NetBSD: uvm_io.c,v 1.7 1998/10/11 23:18:20 chuck Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 9e90e2d9a7b..2e0ece879f7 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.2 1999/02/26 05:32:07 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.18 1998/10/18 23:49:59 chs Exp $ */
+/* $NetBSD: uvm_km.c,v 1.22 1999/03/26 21:58:39 mycroft Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
@@ -177,7 +172,6 @@ static struct uvm_object mb_object_store;
static struct uvm_pagerops km_pager = {
NULL, /* init */
- NULL, /* attach */
NULL, /* reference */
NULL, /* detach */
NULL, /* fault */
@@ -247,7 +241,7 @@ uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
/* null? attempt to allocate the page */
if (ptmp == NULL) {
ptmp = uvm_pagealloc(uobj, current_offset,
- NULL);
+ NULL, 0);
if (ptmp) {
/* new page */
ptmp->flags &= ~(PG_BUSY|PG_FAKE);
@@ -337,7 +331,7 @@ uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
if (ptmp == NULL) {
ptmp = uvm_pagealloc(uobj, current_offset,
- NULL); /* alloc */
+ NULL, 0);
/* out of RAM? */
if (ptmp == NULL) {
@@ -578,12 +572,7 @@ uvm_km_pgremove(uobj, start, end)
* if this kernel object is an aobj, free the swap slot.
*/
if (is_aobj) {
- int slot = uao_set_swslot(uobj,
- curoff >> PAGE_SHIFT,
- 0);
-
- if (slot)
- uvm_swap_free(slot, 1);
+ uao_dropswap(uobj, curoff >> PAGE_SHIFT);
}
uvm_lock_pageq();
@@ -618,11 +607,7 @@ loop_by_list:
* if this kernel object is an aobj, free the swap slot.
*/
if (is_aobj) {
- int slot = uao_set_swslot(uobj,
- pp->offset >> PAGE_SHIFT, 0);
-
- if (slot)
- uvm_swap_free(slot, 1);
+ uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
}
uvm_lock_pageq();
@@ -712,7 +697,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
loopva = kva;
while (size) {
simple_lock(&obj->vmobjlock);
- pg = uvm_pagealloc(obj, offset, NULL);
+ pg = uvm_pagealloc(obj, offset, NULL, 0);
if (pg) {
pg->flags &= ~PG_BUSY; /* new page */
UVM_PAGE_OWN(pg, NULL);
@@ -861,7 +846,7 @@ uvm_km_alloc1(map, size, zeroit)
}
/* allocate ram */
- pg = uvm_pagealloc(uvm.kernel_object, offset, NULL);
+ pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
if (pg) {
pg->flags &= ~PG_BUSY; /* new page */
UVM_PAGE_OWN(pg, NULL);
@@ -1012,7 +997,7 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
vaddr_t va;
again:
- pg = uvm_pagealloc(NULL, 0, NULL);
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == NULL) {
if (waitok) {
uvm_wait("plpg");
diff --git a/sys/uvm/uvm_km.h b/sys/uvm/uvm_km.h
index 0c7d6509f07..7b07b0d74d7 100644
--- a/sys/uvm/uvm_km.h
+++ b/sys/uvm/uvm_km.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_km.h,v 1.2 1999/02/26 05:32:07 art Exp $ */
/* $NetBSD: uvm_km.h,v 1.6 1998/08/13 02:11:01 eeh Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c
index 9facf6f1677..66d81f15eb0 100644
--- a/sys/uvm/uvm_loan.c
+++ b/sys/uvm/uvm_loan.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_loan.c,v 1.2 1999/02/26 05:32:07 art Exp $ */
-/* $NetBSD: uvm_loan.c,v 1.13 1999/01/24 23:53:15 chuck Exp $ */
+/* $NetBSD: uvm_loan.c,v 1.14 1999/03/25 18:48:52 mrg Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
@@ -615,7 +610,7 @@ uvm_loanzero(ufi, output, flags)
if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
- while ((pg = uvm_pagealloc(NULL, 0, NULL)) == NULL) {
+ while ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) {
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
ufi->entry->object.uvm_obj, NULL);
uvm_wait("loanzero1");
@@ -645,7 +640,7 @@ uvm_loanzero(ufi, output, flags)
/* loaning to an anon */
while ((anon = uvm_analloc()) == NULL ||
- (pg = uvm_pagealloc(NULL, 0, anon)) == NULL) {
+ (pg = uvm_pagealloc(NULL, 0, anon, 0)) == NULL) {
/* unlock everything */
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
diff --git a/sys/uvm/uvm_loan.h b/sys/uvm/uvm_loan.h
index d9e2938cb0c..c36a60237dc 100644
--- a/sys/uvm/uvm_loan.h
+++ b/sys/uvm/uvm_loan.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_loan.h,v 1.2 1999/02/26 05:32:07 art Exp $ */
/* $NetBSD: uvm_loan.h,v 1.5 1998/08/13 02:11:01 eeh Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index f5637f0cb58..68487e398e3 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.2 1999/02/26 05:32:07 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.34 1999/01/24 23:53:15 chuck Exp $ */
+/* $NetBSD: uvm_map.c,v 1.39 1999/05/12 19:11:23 thorpej Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
@@ -82,9 +77,6 @@
#include <sys/malloc.h>
#include <sys/pool.h>
-#include <sys/user.h>
-#include <machine/pcb.h>
-
#ifdef SYSVSHM
#include <sys/shm.h>
#endif
@@ -116,6 +108,17 @@ struct pool uvm_vmspace_pool;
struct pool uvm_map_entry_pool;
+#ifdef PMAP_GROWKERNEL
+/*
+ * This global represents the end of the kernel virtual address
+ * space. If we want to exceed this, we must grow the kernel
+ * virtual address space dynamically.
+ *
+ * Note, this variable is locked by kernel_map's lock.
+ */
+vaddr_t uvm_maxkaddr;
+#endif
+
/*
* macros
*/
@@ -183,21 +186,13 @@ static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
* local inlines
*/
-#undef UVM_MAP_INLINES
-
-#ifdef UVM_MAP_INLINES
-#define UVM_INLINE __inline
-#else
-#define UVM_INLINE
-#endif
-
/*
* uvm_mapent_alloc: allocate a map entry
*
* => XXX: static pool for kernel map?
*/
-static UVM_INLINE vm_map_entry_t
+static __inline vm_map_entry_t
uvm_mapent_alloc(map)
vm_map_t map;
{
@@ -235,7 +230,7 @@ uvm_mapent_alloc(map)
* => XXX: static pool for kernel map?
*/
-static UVM_INLINE void
+static __inline void
uvm_mapent_free(me)
vm_map_entry_t me;
{
@@ -260,7 +255,7 @@ uvm_mapent_free(me)
* uvm_mapent_copy: copy a map entry, preserving flags
*/
-static UVM_INLINE void
+static __inline void
uvm_mapent_copy(src, dst)
vm_map_entry_t src;
vm_map_entry_t dst;
@@ -275,7 +270,7 @@ uvm_mapent_copy(src, dst)
* => map should be locked by caller
*/
-static UVM_INLINE void
+static __inline void
uvm_map_entry_unwire(map, entry)
vm_map_t map;
vm_map_entry_t entry;
@@ -514,18 +509,14 @@ uvm_map(map, startp, size, uobj, uoffset, flags)
return (KERN_NO_SPACE);
}
-#if defined(PMAP_GROWKERNEL) /* hack */
+#ifdef PMAP_GROWKERNEL
{
- /* locked by kernel_map lock */
- static vaddr_t maxkaddr = 0;
-
/*
- * hack: grow kernel PTPs in advance.
+ * If the kernel pmap can't map the requested space,
+ * then allocate more resources for it.
*/
- if (map == kernel_map && maxkaddr < (*startp + size)) {
- pmap_growkernel(*startp + size);
- maxkaddr = *startp + size;
- }
+ if (map == kernel_map && uvm_maxkaddr < (*startp + size))
+ uvm_maxkaddr = pmap_growkernel(*startp + size);
}
#endif
@@ -1466,7 +1457,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
newentry->start = dstaddr + oldoffset;
newentry->end =
newentry->start + (entry->end - (entry->start + fudge));
- if (newentry->end > newend)
+ if (newentry->end > newend || newentry->end < newentry->start)
newentry->end = newend;
newentry->object.uvm_obj = entry->object.uvm_obj;
if (newentry->object.uvm_obj) {
@@ -1715,11 +1706,10 @@ uvm_map_submap(map, start, end, submap)
*
* => set_max means set max_protection.
* => map must be unlocked.
- * => XXXCDC: does not work properly with share maps. rethink.
*/
-#define MASK(entry) ( UVM_ET_ISCOPYONWRITE(entry) ? \
- ~VM_PROT_WRITE : VM_PROT_ALL)
+#define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
+ ~VM_PROT_WRITE : VM_PROT_ALL)
#define max(a,b) ((a) > (b) ? (a) : (b))
int
@@ -1751,10 +1741,10 @@ uvm_map_protect(map, start, end, new_prot, set_max)
current = entry;
while ((current != &map->header) && (current->start < end)) {
if (UVM_ET_ISSUBMAP(current))
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
if ((new_prot & current->max_protection) != new_prot) {
vm_map_unlock(map);
- return(KERN_PROTECTION_FAILURE);
+ return (KERN_PROTECTION_FAILURE);
}
current = current->next;
}
@@ -1804,7 +1794,6 @@ uvm_map_protect(map, start, end, new_prot, set_max)
* => map must be unlocked
* => note that the inherit code is used during a "fork". see fork
* code for details.
- * => XXXCDC: currently only works in main map. what about share map?
*/
int
@@ -1826,7 +1815,7 @@ uvm_map_inherit(map, start, end, new_inheritance)
break;
default:
UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
}
vm_map_lock(map);
@@ -2018,7 +2007,7 @@ uvm_map_pageable(map, start, end, new_pageable)
}
vm_map_unlock(map);
UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
}
entry = entry->next;
}
@@ -2100,7 +2089,6 @@ uvm_map_pageable(map, start, end, new_pageable)
* => called from sys_msync()
* => caller must not write-lock map (read OK).
* => we may sleep while cleaning if SYNCIO [with map read-locked]
- * => XXX: does this handle share maps properly?
*/
int
@@ -2131,12 +2119,12 @@ uvm_map_clean(map, start, end, flags)
for (current = entry; current->start < end; current = current->next) {
if (UVM_ET_ISSUBMAP(current)) {
vm_map_unlock_read(map);
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
}
if (end > current->end && (current->next == &map->header ||
current->end != current->next->start)) {
vm_map_unlock_read(map);
- return(KERN_INVALID_ADDRESS);
+ return (KERN_INVALID_ADDRESS);
}
}
@@ -2319,11 +2307,9 @@ uvmspace_unshare(p)
nvm = uvmspace_fork(ovm);
s = splhigh(); /* make this `atomic' */
- pmap_deactivate(p);
- /* unbind old vmspace */
+ pmap_deactivate(p); /* unbind old vmspace */
p->p_vmspace = nvm;
- pmap_activate(p);
- /* switch to new vmspace */
+ pmap_activate(p); /* switch to new vmspace */
splx(s); /* end of critical section */
uvmspace_free(ovm); /* drop reference to old vmspace */
@@ -2774,6 +2760,10 @@ uvmspace_fork(vm1)
shmfork(vm1, vm2);
#endif
+#ifdef PMAP_FORK
+ pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
+#endif
+
UVMHIST_LOG(maphist,"<- done",0,0,0,0);
return(vm2);
}
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index bdd029f7aee..c4ee5711acb 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_map.h,v 1.2 1999/02/26 05:32:07 art Exp $ */
-/* $NetBSD: uvm_map.h,v 1.10 1998/10/11 23:14:48 chuck Exp $ */
+/* $NetBSD: uvm_map.h,v 1.11 1999/03/25 18:48:52 mrg Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
@@ -122,6 +117,14 @@
#endif /* UVM_MAP_INLINE */
/*
+ * globals:
+ */
+
+#ifdef PMAP_GROWKERNEL
+extern vaddr_t uvm_maxkaddr;
+#endif
+
+/*
* protos: the following prototypes define the interface to vm_map
*/
diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h
index 032547d4619..e56ba28e5e9 100644
--- a/sys/uvm/uvm_map_i.h
+++ b/sys/uvm/uvm_map_i.h
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_map_i.h,v 1.2 1999/02/26 05:32:07 art Exp $ */
-/* $NetBSD: uvm_map_i.h,v 1.10 1998/10/11 23:14:48 chuck Exp $ */
+/* $NetBSD: uvm_map_i.h,v 1.11 1999/03/25 18:48:53 mrg Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
@@ -140,10 +135,6 @@ uvm_map_setup(map, min, max, pageable)
*
* => caller must check alignment and size
* => map must be unlocked (we will lock it)
- * => if the "start"/"stop" range lie within a mapping of a share map,
- * then the unmap takes place within the context of that share map
- * rather than in the main map, unless the "mainonly" flag is set.
- * (e.g. the "exit" system call would want to set "mainonly").
*/
MAP_INLINE int
@@ -156,7 +147,7 @@ uvm_unmap(map, start, end)
UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
- map, start, end, 0);
+ map, start, end, 0);
/*
* work now done by helper functions. wipe the pmap's and then
* detach from the dead entries...
diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c
index 07484df3ae1..32ee3f29eed 100644
--- a/sys/uvm/uvm_meter.c
+++ b/sys/uvm/uvm_meter.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_meter.c,v 1.2 1999/02/26 05:32:07 art Exp $ */
/* $NetBSD: uvm_meter.c,v 1.7 1998/08/09 22:36:39 perry Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California.
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index ddf5ba432c9..4d78b3a3993 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_mmap.c,v 1.3 1999/06/04 00:17:29 art Exp $ */
-/* $NetBSD: uvm_mmap.c,v 1.15 1998/10/11 23:18:20 chuck Exp $ */
+/* $NetBSD: uvm_mmap.c,v 1.19 1999/03/25 18:48:53 mrg Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993 The Regents of the University of California.
* Copyright (c) 1988 University of Utah.
@@ -97,11 +92,11 @@ sys_sbrk(p, v, retval)
{
#if 0
struct sys_sbrk_args /* {
- syscallarg(int) incr;
- } */ *uap = v;
+ syscallarg(int) incr;
+ } */ *uap = v;
#endif
- return (EOPNOTSUPP);
+ return (ENOSYS);
}
/*
@@ -117,11 +112,11 @@ sys_sstk(p, v, retval)
{
#if 0
struct sys_sstk_args /* {
- syscallarg(int) incr;
- } */ *uap = v;
+ syscallarg(int) incr;
+ } */ *uap = v;
#endif
- return (EOPNOTSUPP);
+ return (ENOSYS);
}
/*
@@ -137,13 +132,13 @@ sys_madvise(p, v, retval)
{
#if 0
struct sys_madvise_args /* {
- syscallarg(caddr_t) addr;
- syscallarg(size_t) len;
- syscallarg(int) behav;
- } */ *uap = v;
+ syscallarg(caddr_t) addr;
+ syscallarg(size_t) len;
+ syscallarg(int) behav;
+ } */ *uap = v;
#endif
- return (EOPNOTSUPP);
+ return (ENOSYS);
}
/*
@@ -159,13 +154,13 @@ sys_mincore(p, v, retval)
{
#if 0
struct sys_mincore_args /* {
- syscallarg(caddr_t) addr;
- syscallarg(size_t) len;
- syscallarg(char *) vec;
- } */ *uap = v;
+ syscallarg(caddr_t) addr;
+ syscallarg(size_t) len;
+ syscallarg(char *) vec;
+ } */ *uap = v;
#endif
- return (EOPNOTSUPP);
+ return (ENOSYS);
}
#if 0
@@ -242,7 +237,8 @@ sys_mmap(p, v, retval)
*/
if (pos + size > (vaddr_t)-PAGE_SIZE) {
#ifdef DEBUG
- printf("mmap: pos=%qx, size=%x too big\n", pos, (int)size);
+ printf("mmap: pos=%qx, size=%lx too big\n", (long long)pos,
+ (long)size);
#endif
return (EINVAL);
}
@@ -418,7 +414,7 @@ is_anon: /* label for SunOS style /dev/zero */
*/
int
-sys_msync(p, v, retval) /* ART_UVM_XXX - is this correct msync? */
+sys_msync(p, v, retval)
struct proc *p;
void *v;
register_t *retval;
@@ -904,7 +900,7 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
} else {
uobj = udv_attach((void *) &vp->v_rdev,
(flags & MAP_SHARED) ?
- maxprot : (maxprot & ~VM_PROT_WRITE));
+ maxprot : (maxprot & ~VM_PROT_WRITE), foff, size);
advice = UVM_ADV_RANDOM;
}
diff --git a/sys/uvm/uvm_object.h b/sys/uvm/uvm_object.h
index 829cab968ef..c45dd262a6b 100644
--- a/sys/uvm/uvm_object.h
+++ b/sys/uvm/uvm_object.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_object.h,v 1.2 1999/02/26 05:32:07 art Exp $ */
/* $NetBSD: uvm_object.h,v 1.5 1998/03/09 00:58:58 mrg Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 1a59da29d07..6619e707fe5 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_page.c,v 1.3 1999/07/23 14:47:06 ho Exp $ */
-/* $NetBSD: uvm_page.c,v 1.15 1998/10/18 23:50:00 chs Exp $ */
+/* $NetBSD: uvm_page.c,v 1.19 1999/05/20 20:07:55 thorpej Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
@@ -388,6 +383,7 @@ uvm_pageboot_alloc(size)
#else /* !PMAP_STEAL_MEMORY */
+ static boolean_t initialized = FALSE;
vaddr_t addr, vaddr;
paddr_t paddr;
@@ -395,23 +391,39 @@ uvm_pageboot_alloc(size)
size = round_page(size);
/*
- * on first call to this function init ourselves. we detect this
- * by checking virtual_space_start/end which are in the zero'd BSS area.
+ * on first call to this function, initialize ourselves.
*/
-
- if (virtual_space_start == virtual_space_end) {
+ if (initialized == FALSE) {
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
/* round it the way we like it */
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
+
+ initialized = TRUE;
}
/*
* allocate virtual memory for this request
*/
+ if (virtual_space_start == virtual_space_end ||
+ (virtual_space_end - virtual_space_start) < size)
+ panic("uvm_pageboot_alloc: out of virtual space");
addr = virtual_space_start;
+
+#ifdef PMAP_GROWKERNEL
+ /*
+ * If the kernel pmap can't map the requested space,
+ * then allocate more resources for it.
+ */
+ if (uvm_maxkaddr < (addr + size)) {
+ uvm_maxkaddr = pmap_growkernel(addr + size);
+ if (uvm_maxkaddr < (addr + size))
+ panic("uvm_pageboot_alloc: pmap_growkernel() failed");
+ }
+#endif
+
virtual_space_start += size;
/*
@@ -812,15 +824,17 @@ uvm_page_physdump()
*/
struct vm_page *
-uvm_pagealloc_strat(obj, off, anon, strat, free_list)
+uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
struct uvm_object *obj;
vaddr_t off;
+ int flags;
struct vm_anon *anon;
int strat, free_list;
{
int lcv, s;
struct vm_page *pg;
struct pglist *freeq;
+ boolean_t use_reserve;
#ifdef DIAGNOSTIC
/* sanity check */
@@ -850,10 +864,11 @@ uvm_pagealloc_strat(obj, off, anon, strat, free_list)
* the requestor isn't the pagedaemon.
*/
- if ((uvmexp.free <= uvmexp.reserve_kernel &&
- !(obj && obj->uo_refs == UVM_OBJ_KERN)) ||
+ use_reserve = (flags & UVM_PGA_USERESERVE) ||
+ (obj && obj->uo_refs == UVM_OBJ_KERN);
+ if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
(uvmexp.free <= uvmexp.reserve_pagedaemon &&
- !(obj == uvmexp.kmem_object && curproc == uvm.pagedaemon_proc)))
+ !(use_reserve && curproc == uvm.pagedaemon_proc)))
goto fail;
again:
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index 302cfe6d4bc..621bb01d9a1 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_page.h,v 1.2 1999/02/26 05:32:07 art Exp $ */
/* $NetBSD: uvm_page.h,v 1.10 1998/08/13 02:11:02 eeh Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
diff --git a/sys/uvm/uvm_page_i.h b/sys/uvm/uvm_page_i.h
index 3aa1c17955a..4691e0806f8 100644
--- a/sys/uvm/uvm_page_i.h
+++ b/sys/uvm/uvm_page_i.h
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_page_i.h,v 1.2 1999/02/26 05:32:08 art Exp $ */
/* $NetBSD: uvm_page_i.h,v 1.8 1998/08/13 02:11:02 eeh Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index 65b4128beeb..be91fbf83e0 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_pager.c,v 1.2 1999/02/26 05:32:08 art Exp $ */
-/* $NetBSD: uvm_pager.c,v 1.14 1999/01/22 08:00:35 chs Exp $ */
+/* $NetBSD: uvm_pager.c,v 1.15 1999/03/25 18:48:55 mrg Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_pager.h b/sys/uvm/uvm_pager.h
index 93b2af0f0d2..888dee8e577 100644
--- a/sys/uvm/uvm_pager.h
+++ b/sys/uvm/uvm_pager.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_pager.h,v 1.2 1999/02/26 05:32:08 art Exp $ */
-/* $NetBSD: uvm_pager.h,v 1.7 1998/08/13 02:11:03 eeh Exp $ */
+/* $NetBSD: uvm_pager.h,v 1.8 1999/03/24 03:45:28 cgd Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
@@ -67,8 +62,6 @@ struct uvm_aiodesc {
struct uvm_pagerops {
void (*pgo_init) __P((void));/* init pager */
- struct uvm_object * (*pgo_attach) /* get uvm_object */
- __P((void *, vm_prot_t));
void (*pgo_reference) /* add reference to obj */
__P((struct uvm_object *));
void (*pgo_detach) /* drop reference to obj */
diff --git a/sys/uvm/uvm_pager_i.h b/sys/uvm/uvm_pager_i.h
index 7749cd7949a..20843037580 100644
--- a/sys/uvm/uvm_pager_i.h
+++ b/sys/uvm/uvm_pager_i.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_pager_i.h,v 1.2 1999/02/26 05:32:08 art Exp $ */
/* $NetBSD: uvm_pager_i.h,v 1.6 1998/08/13 02:11:03 eeh Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 0e64585ad32..73416c26f4a 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.2 1999/02/26 05:32:08 art Exp $ */
-/* $NetBSD: uvm_pdaemon.c,v 1.12 1998/11/04 07:06:05 chs Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.14 1999/03/26 17:33:30 chs Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
@@ -88,6 +83,16 @@
#include <uvm/uvm.h>
/*
+ * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedeamon will reactivate
+ * in a pass thru the inactive list when swap is full. the value should be
+ * "small"... if it's too large we'll cycle the active pages thru the inactive
+ * queue too quickly to for them to be referenced and avoid being freed.
+ */
+
+#define UVMPD_NUMDIRTYREACTS 16
+
+
+/*
* local prototypes
*/
@@ -294,7 +299,9 @@ uvm_pageout()
/*
* done! restart loop.
*/
- thread_wakeup(&uvmexp.free);
+ if (uvmexp.free > uvmexp.reserve_kernel ||
+ uvmexp.paging == 0)
+ thread_wakeup(&uvmexp.free);
}
/*NOTREACHED*/
}
@@ -322,10 +329,11 @@ uvmpd_scan_inactive(pglst)
int npages;
struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; /* XXX: see below */
int swnpages, swcpages; /* XXX: see below */
- int swslot, oldslot;
+ int swslot;
struct vm_anon *anon;
boolean_t swap_backed;
vaddr_t start;
+ int dirtyreacts;
UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
/*
@@ -344,6 +352,7 @@ uvmpd_scan_inactive(pglst)
swslot = 0;
swnpages = swcpages = 0;
free = 0;
+ dirtyreacts = 0;
for (p = pglst->tqh_first ; p != NULL || swslot != 0 ; p = nextpg) {
@@ -362,7 +371,8 @@ uvmpd_scan_inactive(pglst)
uvm_unlock_fpageq();
splx(s);
- if (free >= uvmexp.freetarg) {
+ if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
+ dirtyreacts == UVMPD_NUMDIRTYREACTS) {
UVMHIST_LOG(pdhist," met free target: "
"exit loop", 0, 0, 0, 0);
retval = TRUE; /* hit the target! */
@@ -480,6 +490,13 @@ uvmpd_scan_inactive(pglst)
*/
if (p->flags & PG_CLEAN) {
+ if (p->pqflags & PQ_SWAPBACKED) {
+ /* this page now lives only in swap */
+ simple_lock(&uvm.swap_data_lock);
+ uvmexp.swpgonly++;
+ simple_unlock(&uvm.swap_data_lock);
+ }
+
/* zap all mappings with pmap_page_protect... */
pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
uvm_pagefree(p);
@@ -510,8 +527,7 @@ uvmpd_scan_inactive(pglst)
* this page is dirty, skip it if we'll have met our
* free target when all the current pageouts complete.
*/
- if (free + uvmexp.paging > uvmexp.freetarg)
- {
+ if (free + uvmexp.paging > uvmexp.freetarg << 2) {
if (anon) {
simple_unlock(&anon->an_lock);
} else {
@@ -521,6 +537,53 @@ uvmpd_scan_inactive(pglst)
}
/*
+ * this page is dirty, but we can't page it out
+ * since all pages in swap are only in swap.
+ * reactivate it so that we eventually cycle
+ * all pages thru the inactive queue.
+ */
+#ifdef DIAGNOSTIC
+ if (uvmexp.swpgonly > uvmexp.swpages) {
+ panic("uvmexp.swpgonly botch");
+ }
+#endif
+ if ((p->pqflags & PQ_SWAPBACKED) &&
+ uvmexp.swpgonly == uvmexp.swpages) {
+ dirtyreacts++;
+ uvm_pageactivate(p);
+ if (anon) {
+ simple_unlock(&anon->an_lock);
+ } else {
+ simple_unlock(&uobj->vmobjlock);
+ }
+ continue;
+ }
+
+ /*
+ * if the page is swap-backed and dirty and swap space
+ * is full, free any swap allocated to the page
+ * so that other pages can be paged out.
+ */
+#ifdef DIAGNOSTIC
+ if (uvmexp.swpginuse > uvmexp.swpages) {
+ panic("uvmexp.swpginuse botch");
+ }
+#endif
+ if ((p->pqflags & PQ_SWAPBACKED) &&
+ uvmexp.swpginuse == uvmexp.swpages) {
+
+ if ((p->pqflags & PQ_ANON) &&
+ p->uanon->an_swslot) {
+ uvm_swap_free(p->uanon->an_swslot, 1);
+ p->uanon->an_swslot = 0;
+ }
+ if (p->pqflags & PQ_AOBJ) {
+ uao_dropswap(p->uobject,
+ p->offset >> PAGE_SHIFT);
+ }
+ }
+
+ /*
* the page we are looking at is dirty. we must
* clean it before it can be freed. to do this we
* first mark the page busy so that no one else will
@@ -551,11 +614,8 @@ uvmpd_scan_inactive(pglst)
anon->an_swslot = 0;
}
} else {
- oldslot = uao_set_swslot(uobj,
- p->offset >> PAGE_SHIFT, 0);
-
- if (oldslot)
- uvm_swap_free(oldslot, 1);
+ uao_dropswap(uobj,
+ p->offset >> PAGE_SHIFT);
}
/*
@@ -587,7 +647,6 @@ uvmpd_scan_inactive(pglst)
* add block to cluster
*/
swpps[swcpages] = p;
- uvmexp.pgswapout++;
if (anon)
anon->an_swslot = swslot + swcpages;
else
@@ -879,7 +938,7 @@ uvmpd_scan_inactive(pglst)
void
uvmpd_scan()
{
- int s, free, pages_freed, page_shortage;
+ int s, free, inactive_shortage, swap_shortage, pages_freed;
struct vm_page *p, *nextpg;
struct uvm_object *uobj;
boolean_t got_it;
@@ -925,7 +984,6 @@ uvmpd_scan()
*/
UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
- pages_freed = uvmexp.pdfreed; /* so far... */
/*
* do loop #1! alternate starting queue between swap and object based
@@ -933,33 +991,45 @@ uvmpd_scan()
*/
got_it = FALSE;
+ pages_freed = uvmexp.pdfreed;
if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
if (!got_it)
got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
+ pages_freed = uvmexp.pdfreed - pages_freed;
/*
* we have done the scan to get free pages. now we work on meeting
* our inactive target.
*/
- page_shortage = uvmexp.inactarg - uvmexp.inactive;
- pages_freed = uvmexp.pdfreed - pages_freed; /* # pages freed in loop */
- if (page_shortage <= 0 && pages_freed == 0)
- page_shortage = 1;
+ inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
- UVMHIST_LOG(pdhist, " second loop: page_shortage=%d", page_shortage,
- 0, 0, 0);
- for (p = uvm.page_active.tqh_first ;
- p != NULL && page_shortage > 0 ; p = nextpg) {
+ /*
+ * detect if we're not going to be able to page anything out
+ * until we free some swap resources from active pages.
+ */
+ swap_shortage = 0;
+ if (uvmexp.free < uvmexp.freetarg &&
+ uvmexp.swpginuse == uvmexp.swpages &&
+ uvmexp.swpgonly < uvmexp.swpages &&
+ pages_freed == 0) {
+ swap_shortage = uvmexp.freetarg - uvmexp.free;
+ }
+
+ UVMHIST_LOG(pdhist, " loop 2: inactive_shortage=%d swap_shortage=%d",
+ inactive_shortage, swap_shortage,0,0);
+ for (p = TAILQ_FIRST(&uvm.page_active);
+ p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
+ p = nextpg) {
nextpg = p->pageq.tqe_next;
if (p->flags & PG_BUSY)
continue; /* quick check before trying to lock */
/*
- * lock owner
+ * lock the page's owner.
*/
/* is page anon owned or ownerless? */
if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
@@ -969,36 +1039,66 @@ uvmpd_scan()
panic("pagedaemon: page with no anon or "
"object detected - loop 2");
#endif
-
if (!simple_lock_try(&p->uanon->an_lock))
continue;
/* take over the page? */
if ((p->pqflags & PQ_ANON) == 0) {
-
#ifdef DIAGNOSTIC
if (p->loan_count < 1)
panic("pagedaemon: non-loaned "
"ownerless page detected - loop 2");
#endif
-
p->loan_count--;
p->pqflags |= PQ_ANON;
}
-
} else {
-
if (!simple_lock_try(&p->uobject->vmobjlock))
continue;
-
}
-
- if ((p->flags & PG_BUSY) == 0) {
+ /*
+ * skip this page if it's busy.
+ */
+ if ((p->flags & PG_BUSY) != 0) {
+ if (p->pqflags & PQ_ANON)
+ simple_unlock(&p->uanon->an_lock);
+ else
+ simple_unlock(&p->uobject->vmobjlock);
+ continue;
+ }
+
+ /*
+ * if there's a shortage of swap, free any swap allocated
+ * to this page so that other pages can be paged out.
+ */
+ if (swap_shortage > 0) {
+ if ((p->pqflags & PQ_ANON) && p->uanon->an_swslot) {
+ uvm_swap_free(p->uanon->an_swslot, 1);
+ p->uanon->an_swslot = 0;
+ p->flags &= ~PG_CLEAN;
+ swap_shortage--;
+ }
+ if (p->pqflags & PQ_AOBJ) {
+ int slot = uao_set_swslot(p->uobject,
+ p->offset >> PAGE_SHIFT, 0);
+ if (slot) {
+ uvm_swap_free(slot, 1);
+ p->flags &= ~PG_CLEAN;
+ swap_shortage--;
+ }
+ }
+ }
+
+ /*
+ * deactivate this page if there's a shortage of
+ * inactive pages.
+ */
+ if (inactive_shortage > 0) {
pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
/* no need to check wire_count as pg is "active" */
uvm_pagedeactivate(p);
uvmexp.pddeact++;
- page_shortage--;
+ inactive_shortage--;
}
if (p->pqflags & PQ_ANON)
@@ -1006,8 +1106,4 @@ uvmpd_scan()
else
simple_unlock(&p->uobject->vmobjlock);
}
-
- /*
- * done scan
- */
}
diff --git a/sys/uvm/uvm_pdaemon.h b/sys/uvm/uvm_pdaemon.h
index 0208b43dd16..4590f1cef10 100644
--- a/sys/uvm/uvm_pdaemon.h
+++ b/sys/uvm/uvm_pdaemon.h
@@ -1,10 +1,5 @@
-/* $OpenBSD: uvm_pdaemon.h,v 1.2 1999/02/26 05:32:08 art Exp $ */
/* $NetBSD: uvm_pdaemon.h,v 1.5 1998/02/10 14:12:28 mrg Exp $ */
-/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
diff --git a/sys/uvm/uvm_pglist.c b/sys/uvm/uvm_pglist.c
index 800155b7b8a..042ab2b8749 100644
--- a/sys/uvm/uvm_pglist.c
+++ b/sys/uvm/uvm_pglist.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: uvm_pglist.c,v 1.2 1999/02/26 05:32:08 art Exp $ */
-/* $NetBSD: uvm_pglist.c,v 1.6 1998/08/13 02:11:03 eeh Exp $ */
+/* $NetBSD: uvm_pglist.c,v 1.5.2.1 1998/07/30 14:04:15 eeh Exp $ */
#define VM_PAGE_ALLOC_MEMORY_STATS
diff --git a/sys/uvm/uvm_stat.c b/sys/uvm/uvm_stat.c
index f5d8d2d511f..d376e772f6f 100644
--- a/sys/uvm/uvm_stat.c
+++ b/sys/uvm/uvm_stat.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_stat.c,v 1.2 1999/02/26 05:32:08 art Exp $ */
-/* $NetBSD: uvm_stat.c,v 1.10 1998/06/20 13:19:00 mrg Exp $ */
+/* $NetBSD: uvm_stat.c,v 1.11 1999/03/25 18:48:56 mrg Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
@@ -247,6 +242,8 @@ uvm_dump()
uvmexp.pdpending, uvmexp.nswget);
printf(" nswapdev=%d, nanon=%d, nfreeanon=%d\n", uvmexp.nswapdev,
uvmexp.nanon, uvmexp.nfreeanon);
+ printf(" swpages=%d, swpginuse=%d, swpgonly=%d paging=%d\n",
+ uvmexp.swpages, uvmexp.swpginuse, uvmexp.swpgonly, uvmexp.paging);
printf(" kernel pointers:\n");
printf(" objs(kern/kmem/mb)=%p/%p/%p\n", uvm.kernel_object,
diff --git a/sys/uvm/uvm_stat.h b/sys/uvm/uvm_stat.h
index ab1c9b4e99b..8b772ea9f46 100644
--- a/sys/uvm/uvm_stat.h
+++ b/sys/uvm/uvm_stat.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_stat.h,v 1.2 1999/02/26 05:32:08 art Exp $ */
/* $NetBSD: uvm_stat.h,v 1.13 1998/08/09 22:36:39 perry Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index 6c0b22c5ae3..21f768a78c4 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: uvm_swap.c,v 1.8 1999/07/08 00:54:28 deraadt Exp $ */
-/* $NetBSD: uvm_swap.c,v 1.23 1998/12/26 06:25:59 marc Exp $ */
+/* $NetBSD: uvm_swap.c,v 1.26 1999/03/26 17:34:16 chs Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
@@ -85,7 +84,7 @@
* - swap_syscall_lock (sleep lock): this lock serializes the swapctl
* system call and prevents the swap priority list from changing
* while we are in the middle of a system call (e.g. SWAP_STATS).
- * - swap_data_lock (simple_lock): this lock protects all swap data
+ * - uvm.swap_data_lock (simple_lock): this lock protects all swap data
* structures including the priority list, the swapdev structures,
* and the swapmap extent.
* - swap_buf_lock (simple_lock): this lock protects the free swapbuf
@@ -236,7 +235,6 @@ static struct swap_priority swap_priority;
/* locks */
lock_data_t swap_syscall_lock;
-static simple_lock_data_t swap_data_lock;
/*
* prototypes
@@ -287,7 +285,7 @@ uvm_swap_init()
LIST_INIT(&swap_priority);
uvmexp.nswapdev = 0;
lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
- simple_lock_init(&swap_data_lock);
+ simple_lock_init(&uvm.swap_data_lock);
if (bdevvp(swapdev, &swapdev_vp))
panic("uvm_swap_init: can't get vnode for swap device");
@@ -347,7 +345,7 @@ uvm_swap_init()
/*
* swaplist_insert: insert swap device "sdp" into the global list
*
- * => caller must hold both swap_syscall_lock and swap_data_lock
+ * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
* => caller must provide a newly malloc'd swappri structure (we will
* FREE it if we don't need it... this it to prevent malloc blocking
* here while adding swap)
@@ -407,7 +405,7 @@ swaplist_insert(sdp, newspp, priority)
* swaplist_find: find and optionally remove a swap device from the
* global list.
*
- * => caller must hold both swap_syscall_lock and swap_data_lock
+ * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
* => we return the swapdev we found (and removed)
*/
static struct swapdev *
@@ -443,7 +441,7 @@ swaplist_find(vp, remove)
* swaplist_trim: scan priority list for empty priority entries and kill
* them.
*
- * => caller must hold both swap_syscall_lock and swap_data_lock
+ * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
*/
static void
swaplist_trim()
@@ -463,7 +461,7 @@ swaplist_trim()
* swapdrum_add: add a "swapdev"'s blocks into /dev/drum's area.
*
* => caller must hold swap_syscall_lock
- * => swap_data_lock should be unlocked (we may sleep)
+ * => uvm.swap_data_lock should be unlocked (we may sleep)
*/
static void
swapdrum_add(sdp, npages)
@@ -485,7 +483,7 @@ swapdrum_add(sdp, npages)
* to the "swapdev" that maps that section of the drum.
*
* => each swapdev takes one big contig chunk of the drum
- * => caller must hold swap_data_lock
+ * => caller must hold uvm.swap_data_lock
*/
static struct swapdev *
swapdrum_getsdp(pgno)
@@ -506,6 +504,7 @@ swapdrum_getsdp(pgno)
return NULL;
}
+
/*
* sys_swapctl: main entry point for swapctl(2) system call
* [with two helper functions: swap_on and swap_off]
@@ -558,7 +557,7 @@ sys_swapctl(p, v, retval)
*
* note that the swap_priority list can't change as long
* as we are holding the swap_syscall_lock. we don't want
- * to grab the swap_data_lock because we may fault&sleep during
+ * to grab the uvm.swap_data_lock because we may fault&sleep during
* copyout() and we don't want to be holding that lock then!
*/
if (SCARG(uap, cmd) == SWAP_STATS
@@ -662,6 +661,15 @@ sys_swapctl(p, v, retval)
error = 0; /* assume no error */
switch(SCARG(uap, cmd)) {
+ case SWAP_DUMPDEV:
+ if (vp->v_type != VBLK) {
+ error = ENOTBLK;
+ goto out;
+ }
+ dumpdev = vp->v_rdev;
+
+ break;
+
case SWAP_CTL:
/*
* get new priority, remove old entry (if any) and then
@@ -671,14 +679,14 @@ sys_swapctl(p, v, retval)
priority = SCARG(uap, misc);
spp = (struct swappri *)
malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
if ((sdp = swaplist_find(vp, 1)) == NULL) {
error = ENOENT;
} else {
swaplist_insert(sdp, spp, priority);
swaplist_trim();
}
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
if (error)
free(spp, M_VMSWAP);
break;
@@ -691,10 +699,10 @@ sys_swapctl(p, v, retval)
* it.
*/
priority = SCARG(uap, misc);
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
if ((sdp = swaplist_find(vp, 0)) != NULL) {
error = EBUSY;
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
break;
}
sdp = (struct swapdev *)
@@ -713,7 +721,7 @@ sys_swapctl(p, v, retval)
sdp->swd_cred = crdup(p->p_ucred);
#endif
swaplist_insert(sdp, spp, priority);
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
sdp->swd_pathlen = len;
sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
@@ -726,10 +734,10 @@ sys_swapctl(p, v, retval)
* if swap_on is a success, it will clear the SWF_FAKE flag
*/
if ((error = swap_on(p, sdp)) != 0) {
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
(void) swaplist_find(vp, 1); /* kill fake entry */
swaplist_trim();
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
#ifdef SWAP_TO_FILES
if (vp->v_type == VREG)
crfree(sdp->swd_cred);
@@ -752,9 +760,9 @@ sys_swapctl(p, v, retval)
/*
* find the entry of interest and ensure it is enabled.
*/
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
if ((sdp = swaplist_find(vp, 0)) == NULL) {
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
error = ENXIO;
break;
}
@@ -763,7 +771,7 @@ sys_swapctl(p, v, retval)
* can't stop swapping from it (again).
*/
if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
error = EBUSY;
break;
}
@@ -779,7 +787,7 @@ sys_swapctl(p, v, retval)
error = ENXIO;
break;
}
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
free((caddr_t)sdp, M_VMSWAP);
#else
error = EINVAL;
@@ -810,7 +818,7 @@ out:
*
* => we avoid the start of the disk (to protect disk labels)
* => we also avoid the miniroot, if we are swapping to root.
- * => caller should leave swap_data_lock unlocked, we may lock it
+ * => caller should leave uvm.swap_data_lock unlocked, we may lock it
* if needed.
*/
static int
@@ -953,9 +961,11 @@ swap_on(p, sdp)
if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
panic("disklabel region");
sdp->swd_npginuse += addr;
+ simple_lock(&uvm.swap_data_lock);
uvmexp.swpginuse += addr;
+ uvmexp.swpgonly += addr;
+ simple_unlock(&uvm.swap_data_lock);
}
-
/*
* if the vnode we are swapping to is the root vnode
@@ -979,8 +989,11 @@ swap_on(p, sdp)
rootpages, EX_WAITOK))
panic("swap_on: unable to preserve miniroot");
+ simple_lock(&uvm.swap_data_lock);
sdp->swd_npginuse += (rootpages - addr);
uvmexp.swpginuse += (rootpages - addr);
+ uvmexp.swpgonly += (rootpages - addr);
+ simple_unlock(&uvm.swap_data_lock);
printf("Preserved %d pages of miniroot ", rootpages);
printf("leaving %d pages of swap\n", size - rootpages);
@@ -989,12 +1002,12 @@ swap_on(p, sdp)
/*
* now add the new swapdev to the drum and enable.
*/
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
swapdrum_add(sdp, npages);
sdp->swd_npages = npages;
sdp->swd_flags &= ~SWF_FAKE; /* going live */
sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
uvmexp.swpages += npages;
/*
@@ -1147,8 +1160,7 @@ swstrategy(bp)
{
struct swapdev *sdp;
struct vnode *vp;
- int pageno;
- int bn;
+ int s, pageno, bn;
UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
/*
@@ -1157,9 +1169,9 @@ swstrategy(bp)
* in it (i.e. the blocks we are doing I/O on).
*/
pageno = dbtob(bp->b_blkno) >> PAGE_SHIFT;
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
sdp = swapdrum_getsdp(pageno);
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
if (sdp == NULL) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
@@ -1195,6 +1207,7 @@ swstrategy(bp)
* must convert "bp" from an I/O on /dev/drum to an I/O
* on the swapdev (sdp).
*/
+ s = splbio();
bp->b_blkno = bn; /* swapdev block number */
vp = sdp->swd_vp; /* swapdev vnode pointer */
bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
@@ -1205,10 +1218,8 @@ swstrategy(bp)
* drum's v_numoutput counter to the swapdevs.
*/
if ((bp->b_flags & B_READ) == 0) {
- int s = splbio();
vwakeup(bp); /* kills one 'v_numoutput' on drum */
vp->v_numoutput++; /* put it on swapdev */
- splx(s);
}
/*
@@ -1222,6 +1233,7 @@ swstrategy(bp)
* finally plug in swapdev vnode and start I/O
*/
bp->b_vp = vp;
+ splx(s);
VOP_STRATEGY(bp);
return;
#ifdef SWAP_TO_FILES
@@ -1550,7 +1562,7 @@ sw_reg_iodone(bp)
* allocate in a priority we "rotate" the circle queue.
* => space can be freed with uvm_swap_free
* => we return the page slot number in /dev/drum (0 == invalid slot)
- * => we lock swap_data_lock
+ * => we lock uvm.swap_data_lock
* => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
*/
int
@@ -1572,7 +1584,7 @@ uvm_swap_alloc(nslots, lessok)
/*
* lock data lock, convert slots into blocks, and enter loop
*/
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
ReTry: /* XXXMRG */
for (spp = swap_priority.lh_first; spp != NULL;
@@ -1598,23 +1610,11 @@ ReTry: /* XXXMRG */
CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
sdp->swd_npginuse += *nslots;
uvmexp.swpginuse += *nslots;
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
/* done! return drum slot number */
UVMHIST_LOG(pdhist,
"success! returning %d slots starting at %d",
*nslots, result + sdp->swd_drumoffset, 0, 0);
-#if 0
-{
- struct swapdev *sdp2;
-
- sdp2 = swapdrum_getsdp(result + sdp->swd_drumoffset);
- if (sdp2 == NULL) {
-printf("uvm_swap_alloc: nslots=%d, dev=%x, drumoff=%d, result=%ld",
- *nslots, sdp->swd_dev, sdp->swd_drumoffset, result);
-panic("uvm_swap_alloc: allocating unmapped swap block!");
- }
-}
-#endif
return(result + sdp->swd_drumoffset);
}
}
@@ -1626,7 +1626,7 @@ panic("uvm_swap_alloc: allocating unmapped swap block!");
}
/* XXXMRG: END HACK */
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
return 0; /* failed */
}
@@ -1634,7 +1634,7 @@ panic("uvm_swap_alloc: allocating unmapped swap block!");
* uvm_swap_free: free swap slots
*
* => this can be all or part of an allocation made by uvm_swap_alloc
- * => we lock swap_data_lock
+ * => we lock uvm.swap_data_lock
*/
void
uvm_swap_free(startslot, nslots)
@@ -1651,7 +1651,7 @@ uvm_swap_free(startslot, nslots)
* in the extent, and return. must hold pri lock to do
* lookup and access the extent.
*/
- simple_lock(&swap_data_lock);
+ simple_lock(&uvm.swap_data_lock);
sdp = swapdrum_getsdp(startslot);
#ifdef DIAGNOSTIC
@@ -1674,7 +1674,7 @@ uvm_swap_free(startslot, nslots)
if (sdp->swd_npginuse < 0)
panic("uvm_swap_free: inuse < 0");
#endif
- simple_unlock(&swap_data_lock);
+ simple_unlock(&uvm.swap_data_lock);
}
/*
@@ -1721,9 +1721,25 @@ uvm_swap_get(page, swslot, flags)
printf("uvm_swap_get: ASYNC get requested?\n");
#endif
+ /*
+ * this page is (about to be) no longer only in swap.
+ */
+ simple_lock(&uvm.swap_data_lock);
+ uvmexp.swpgonly--;
+ simple_unlock(&uvm.swap_data_lock);
+
result = uvm_swap_io(&page, swslot, 1, B_READ |
((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
+ if (result != VM_PAGER_OK && result != VM_PAGER_PEND) {
+ /*
+ * oops, the read failed so it really is still only in swap.
+ */
+ simple_lock(&uvm.swap_data_lock);
+ uvmexp.swpgonly++;
+ simple_unlock(&uvm.swap_data_lock);
+ }
+
return (result);
}
@@ -1789,8 +1805,10 @@ uvm_swap_io(pps, startslot, npages, flags)
bp->b_vnbufs.le_next = NOLIST;
bp->b_data = (caddr_t)kva;
bp->b_blkno = startblk;
+ s = splbio();
VHOLD(swapdev_vp);
bp->b_vp = swapdev_vp;
+ splx(s);
/* XXXCDC: isn't swapdev_vp always a VCHR? */
/* XXXMRG: probably -- this is obviously something inherited... */
if (swapdev_vp->v_type == VBLK)
@@ -1848,7 +1866,6 @@ uvm_swap_io(pps, startslot, npages, flags)
* now dispose of the swap buffer
*/
s = splbio();
- bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY|B_NOCACHE);
if (bp->b_vp)
brelvp(bp);
@@ -1883,9 +1900,8 @@ uvm_swap_bufdone(bp)
#endif
/*
- * drop buffers reference to the vnode and its flags.
+ * drop the buffer's reference to the vnode.
*/
- bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY|B_NOCACHE);
if (bp->b_vp)
brelvp(bp);
diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c
index 1afa138a6b3..1ff0e84990e 100644
--- a/sys/uvm/uvm_unix.c
+++ b/sys/uvm/uvm_unix.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_unix.c,v 1.3 1999/07/20 11:10:54 mickey Exp $ */
/* $NetBSD: uvm_unix.c,v 1.7 1998/10/11 23:18:21 chuck Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993 The Regents of the University of California.
* Copyright (c) 1988 University of Utah.
diff --git a/sys/uvm/uvm_user.c b/sys/uvm/uvm_user.c
index 20684440ec6..4509395c704 100644
--- a/sys/uvm/uvm_user.c
+++ b/sys/uvm/uvm_user.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_user.c,v 1.2 1999/02/26 05:32:08 art Exp $ */
/* $NetBSD: uvm_user.c,v 1.6 1998/10/11 23:18:21 chuck Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index 80eba8cb92a..dbba98b4934 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_vnode.c,v 1.3 1999/07/08 00:54:29 deraadt Exp $ */
-/* $NetBSD: uvm_vnode.c,v 1.18 1999/01/29 12:56:17 bouyer Exp $ */
+/* $NetBSD: uvm_vnode.c,v 1.22 1999/03/25 18:48:56 mrg Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993
* The Regents of the University of California.
@@ -118,7 +113,6 @@ static boolean_t uvn_releasepg __P((struct vm_page *,
struct uvm_pagerops uvm_vnodeops = {
uvn_init,
- uvn_attach,
uvn_reference,
uvn_detach,
NULL, /* no specialized fault routine required */
@@ -292,12 +286,12 @@ uvn_attach(arg, accessprot)
*/
#ifdef DEBUG
if (vp->v_type == VBLK)
- printf("used_vnode_size = %qu\n", used_vnode_size);
+ printf("used_vnode_size = %qu\n", (long long)used_vnode_size);
#endif
if (used_vnode_size > (vaddr_t) -PAGE_SIZE) {
#ifdef DEBUG
printf("uvn_attach: vn %p size truncated %qx->%x\n", vp,
- used_vnode_size, -PAGE_SIZE);
+ (long long)used_vnode_size, -PAGE_SIZE);
#endif
used_vnode_size = (vaddr_t) -PAGE_SIZE;
}
@@ -1458,7 +1452,7 @@ uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
if (ptmp == NULL) {
ptmp = uvm_pagealloc(uobj, current_offset,
- NULL); /* alloc */
+ NULL, 0);
/* out of RAM? */
if (ptmp == NULL) {
@@ -1699,17 +1693,31 @@ uvn_io(uvn, pps, npages, flags, rw)
UVMHIST_LOG(maphist, "calling VOP",0,0,0,0);
+ /*
+ * This process may already have this vnode locked, if we faulted in
+ * copyin() or copyout() on a region backed by this vnode
+ * while doing I/O to the vnode. If this is the case, don't
+ * panic.. instead, return the error to the user.
+ *
+ * XXX this is a stopgap to prevent a panic.
+ * Ideally, this kind of operation *should* work.
+ */
+ result = 0;
if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0)
- vn_lock(vn, LK_EXCLUSIVE | LK_RETRY, curproc /*XXX*/);
- /* NOTE: vnode now locked! */
+ result = vn_lock(vn, LK_EXCLUSIVE | LK_RETRY, curproc /*XXX*/);
- if (rw == UIO_READ)
- result = VOP_READ(vn, &uio, 0, curproc->p_ucred);
- else
- result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred);
+ if (result == 0) {
+ /* NOTE: vnode now locked! */
- if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0)
- VOP_UNLOCK(vn, 0, curproc /*XXX*/);
+ if (rw == UIO_READ)
+ result = VOP_READ(vn, &uio, 0, curproc->p_ucred);
+ else
+ result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred);
+
+ if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0)
+ VOP_UNLOCK(vn, 0, curproc /*XXX*/);
+ }
+
/* NOTE: vnode now unlocked (unless vnislocked) */
UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0);
@@ -1921,7 +1929,8 @@ uvm_vnp_setsize(vp, newsize)
if (newsize > (vaddr_t) -PAGE_SIZE) {
#ifdef DEBUG
printf("uvm_vnp_setsize: vn %p size truncated "
- "%qx->%lx\n", vp, newsize, (vaddr_t)-PAGE_SIZE);
+ "%qx->%lx\n", vp, (long long)newsize,
+ (vaddr_t)-PAGE_SIZE);
#endif
newsize = (vaddr_t)-PAGE_SIZE;
}
diff --git a/sys/uvm/uvm_vnode.h b/sys/uvm/uvm_vnode.h
index bd91981f112..fc0b2de0e13 100644
--- a/sys/uvm/uvm_vnode.h
+++ b/sys/uvm/uvm_vnode.h
@@ -1,11 +1,6 @@
-/* $OpenBSD: uvm_vnode.h,v 1.2 1999/02/26 05:32:08 art Exp $ */
/* $NetBSD: uvm_vnode.h,v 1.6 1998/08/13 02:11:04 eeh Exp $ */
/*
- * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
- * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
- */
-/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.