summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-08-11 10:57:23 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-08-11 10:57:23 +0000
commit7a180b0dffd8a6191dc39c284c4b43646bbe8ac6 (patch)
tree5442aa1b4d2ff26b5330a6478a7d647080cc100a /sys/uvm
parent511fb2556ded7a7b5ce46090532eccce46d77468 (diff)
Various random fixes from NetBSD.
Including support for zeroing pages in the idle loop (not enabled yet).
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm.h39
-rw-r--r--sys/uvm/uvm_aobj.c14
-rw-r--r--sys/uvm/uvm_device.c16
-rw-r--r--sys/uvm/uvm_device.h6
-rw-r--r--sys/uvm/uvm_extern.h14
-rw-r--r--sys/uvm/uvm_fault.c21
-rw-r--r--sys/uvm/uvm_glue.c39
-rw-r--r--sys/uvm/uvm_init.c5
-rw-r--r--sys/uvm/uvm_io.c4
-rw-r--r--sys/uvm/uvm_km.c56
-rw-r--r--sys/uvm/uvm_loan.c15
-rw-r--r--sys/uvm/uvm_map.c70
-rw-r--r--sys/uvm/uvm_map.h6
-rw-r--r--sys/uvm/uvm_map_i.h14
-rw-r--r--sys/uvm/uvm_meter.c4
-rw-r--r--sys/uvm/uvm_mmap.c28
-rw-r--r--sys/uvm/uvm_page.c185
-rw-r--r--sys/uvm/uvm_page.h13
-rw-r--r--sys/uvm/uvm_page_i.h8
-rw-r--r--sys/uvm/uvm_pager.c78
-rw-r--r--sys/uvm/uvm_pager.h12
-rw-r--r--sys/uvm/uvm_pager_i.h6
-rw-r--r--sys/uvm/uvm_pdaemon.c4
-rw-r--r--sys/uvm/uvm_pglist.c44
-rw-r--r--sys/uvm/uvm_stat.h34
-rw-r--r--sys/uvm/uvm_swap.c36
-rw-r--r--sys/uvm/uvm_unix.c14
-rw-r--r--sys/uvm/uvm_vnode.c21
28 files changed, 488 insertions, 318 deletions
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h
index 92d7382cfb5..bb696dfda42 100644
--- a/sys/uvm/uvm.h
+++ b/sys/uvm/uvm.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm.h,v 1.11 2001/07/18 15:19:12 art Exp $ */
-/* $NetBSD: uvm.h,v 1.18 1999/11/13 00:21:17 thorpej Exp $ */
+/* $OpenBSD: uvm.h,v 1.12 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm.h,v 1.22 2000/06/08 05:52:34 thorpej Exp $ */
/*
*
@@ -75,13 +75,15 @@
struct uvm {
/* vm_page related parameters */
/* vm_page queues */
- struct pglist page_free[VM_NFREELIST]; /* unallocated pages */
+ struct pgfreelist page_free[VM_NFREELIST]; /* unallocated pages */
struct pglist page_active; /* allocated pages, in use */
struct pglist page_inactive_swp;/* pages inactive (reclaim or free) */
struct pglist page_inactive_obj;/* pages inactive (reclaim or free) */
simple_lock_data_t pageqlock; /* lock for active/inactive page q */
simple_lock_data_t fpageqlock; /* lock for free page q */
boolean_t page_init_done; /* TRUE if uvm_page_init() finished */
+ boolean_t page_idle_zero; /* TRUE if we should try to zero
+ pages in the idle loop */
/* page daemon trigger */
int pagedaemon; /* daemon sleeps on this */
struct proc *pagedaemon_proc; /* daemon's pid */
@@ -114,17 +116,6 @@ struct uvm {
struct uvm_object *kernel_object;
};
-extern struct uvm uvm;
-
-/*
- * historys
- */
-
-#ifdef _KERNEL
-UVMHIST_DECL(maphist);
-UVMHIST_DECL(pdhist);
-#endif /* _KERNEL */
-
/*
* vm_map_entry etype bits:
*/
@@ -145,13 +136,25 @@ UVMHIST_DECL(pdhist);
#ifdef _KERNEL
+extern struct uvm uvm;
+
+/*
+ * historys
+ */
+
+UVMHIST_DECL(maphist);
+UVMHIST_DECL(pdhist);
+
/*
- * UVM_UNLOCK_AND_WAIT: atomic unlock+wait... front end for the
- * uvm_sleep() function.
+ * UVM_UNLOCK_AND_WAIT: atomic unlock+wait... wrapper around the
+ * interlocked tsleep() function.
*/
-#define UVM_UNLOCK_AND_WAIT(event, lock, intr, msg, timo) \
- uvm_sleep(event, lock, intr, msg, timo)
+#define UVM_UNLOCK_AND_WAIT(event, slock, intr, msg, timo) \
+do { \
+ (void) ltsleep(event, PVM | PNORELOCK | (intr ? PCATCH : 0), \
+ msg, timo, slock); \
+} while (0)
/*
* UVM_PAGE_OWN: track page ownership (only if UVM_PAGE_TRKOWN)
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index c0502bbe321..1c88ea322e2 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_aobj.c,v 1.13 2001/08/06 14:03:04 art Exp $ */
-/* $NetBSD: uvm_aobj.c,v 1.28 2000/03/26 20:54:46 kleink Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.14 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.31 2000/05/19 04:34:45 thorpej Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -203,7 +203,6 @@ struct uvm_pagerops aobj_pager = {
NULL, /* put (done by pagedaemon) */
NULL, /* cluster */
NULL, /* mk_pcluster */
- uvm_shareprot, /* shareprot */
NULL, /* aiodone */
uao_releasepg /* releasepg */
};
@@ -298,7 +297,7 @@ uao_find_swslot(aobj, pageidx)
if (elt)
return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
else
- return(NULL);
+ return(0);
}
/*
@@ -1005,19 +1004,18 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
ptmp = uvm_pagelookup(uobj, current_offset);
/*
- * if page is new, attempt to allocate the page, then
- * zero-fill it.
+ * if page is new, attempt to allocate the page,
+ * zero-fill'd.
*/
if (ptmp == NULL && uao_find_swslot(aobj,
current_offset >> PAGE_SHIFT) == 0) {
ptmp = uvm_pagealloc(uobj, current_offset,
- NULL, 0);
+ NULL, UVM_PGA_ZERO);
if (ptmp) {
/* new page */
ptmp->flags &= ~(PG_BUSY|PG_FAKE);
ptmp->pqflags |= PQ_AOBJ;
UVM_PAGE_OWN(ptmp, NULL);
- uvm_pagezero(ptmp);
}
}
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index b2c8d7b7e0c..196fcdcddd2 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_device.c,v 1.10 2001/08/06 14:03:04 art Exp $ */
-/* $NetBSD: uvm_device.c,v 1.20 2000/03/26 20:54:46 kleink Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.11 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_device.c,v 1.22 2000/05/28 10:21:55 drochner Exp $ */
/*
*
@@ -95,7 +95,6 @@ struct uvm_pagerops uvm_deviceops = {
udv_put,
NULL, /* no cluster function */
NULL, /* no put cluster function */
- NULL, /* no share protect. no share maps for us */
NULL, /* no AIO-DONE function since no async i/o */
NULL, /* no releasepg function since no normal pages */
};
@@ -131,7 +130,7 @@ struct uvm_object *
udv_attach(arg, accessprot, off, size)
void *arg;
vm_prot_t accessprot;
- vaddr_t off; /* used only for access check */
+ voff_t off; /* used only for access check */
vsize_t size; /* used only for access check */
{
dev_t device = *((dev_t *) arg);
@@ -152,6 +151,15 @@ udv_attach(arg, accessprot, off, size)
return(NULL);
/*
+ * As long as the device d_mmap interface gets an "int"
+ * offset, we have to watch out not to overflow its
+ * numeric range. (assuming it will be interpreted as
+ * "unsigned")
+ */
+ if (((off + size - 1) & (u_int)-1) != off + size - 1)
+ return (0);
+
+ /*
* Check that the specified range of the device allows the
* desired protection.
*
diff --git a/sys/uvm/uvm_device.h b/sys/uvm/uvm_device.h
index af56a7d4eaa..b127c7f5ede 100644
--- a/sys/uvm/uvm_device.h
+++ b/sys/uvm/uvm_device.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_device.h,v 1.6 2001/03/09 05:34:38 smart Exp $ */
-/* $NetBSD: uvm_device.h,v 1.8 1999/06/21 17:25:11 thorpej Exp $ */
+/* $OpenBSD: uvm_device.h,v 1.7 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_device.h,v 1.9 2000/05/28 10:21:55 drochner Exp $ */
/*
*
@@ -70,7 +70,7 @@ struct uvm_device {
* prototypes
*/
-struct uvm_object *udv_attach __P((void *, vm_prot_t, vaddr_t, vsize_t));
+struct uvm_object *udv_attach __P((void *, vm_prot_t, voff_t, vsize_t));
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 531373e6889..209e182cdae 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.18 2001/08/08 02:36:59 millert Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.38 2000/03/26 20:54:46 kleink Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.19 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.42 2000/06/08 05:52:34 thorpej Exp $ */
/*
*
@@ -133,7 +133,8 @@
/*
* flags for uvm_pagealloc_strat()
*/
-#define UVM_PGA_USERESERVE 0x0001
+#define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */
+#define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */
/*
* lockflags that control the locking behavior of various functions.
@@ -176,6 +177,7 @@ struct uvmexp {
int inactive; /* number of pages that we free'd but may want back */
int paging; /* number of pages in the process of being paged out */
int wired; /* number of wired pages */
+ int zeropages; /* number of zero'd pages */
int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
int reserve_kernel; /* number of pages reserved for kernel */
@@ -212,6 +214,10 @@ struct uvmexp {
int forks; /* forks */
int forks_ppwait; /* forks where parent waits */
int forks_sharevm; /* forks where vmspace is shared */
+ int pga_zerohit; /* pagealloc where zero wanted and zero
+ was available */
+ int pga_zeromiss; /* pagealloc where zero wanted and zero
+ not available */
/* fault subcounters */
int fltnoram; /* number of times fault was out of ram */
@@ -291,8 +297,6 @@ int uvm_fault __P((vm_map_t, vaddr_t,
#if defined(KGDB)
void uvm_chgkprot __P((caddr_t, size_t, int));
#endif
-void uvm_sleep __P((void *, struct simplelock *, boolean_t,
- const char *, int));
void uvm_fork __P((struct proc *, struct proc *, boolean_t,
void *, size_t));
void uvm_exit __P((struct proc *));
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index ab60ff2a081..b848a3a3353 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.c,v 1.17 2001/07/26 19:37:13 art Exp $ */
-/* $NetBSD: uvm_fault.c,v 1.47 2000/01/11 06:57:50 chs Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.18 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.48 2000/04/10 01:17:41 thorpej Exp $ */
/*
*
@@ -1601,8 +1601,16 @@ Case2:
#endif
anon = uvm_analloc();
- if (anon)
- pg = uvm_pagealloc(NULL, 0, anon, 0);
+ if (anon) {
+ /*
+ * In `Fill in data...' below, if
+ * uobjpage == PGO_DONTCARE, we want
+ * a zero'd, dirty page, so have
+ * uvm_pagealloc() do that for us.
+ */
+ pg = uvm_pagealloc(NULL, 0, anon,
+ (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
+ }
#ifdef __GNUC__
else
pg = NULL; /* XXX: gcc */
@@ -1691,7 +1699,10 @@ Case2:
} else {
uvmexp.flt_przero++;
- uvm_pagezero(pg); /* zero page [pg now dirty] */
+ /*
+ * Page is zero'd and marked dirty by uvm_pagealloc()
+ * above.
+ */
UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x",
anon, pg, 0, 0);
}
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index ae01993b2f6..0156592effc 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_glue.c,v 1.18 2001/08/06 14:03:04 art Exp $ */
-/* $NetBSD: uvm_glue.c,v 1.31 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.19 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.36 2000/06/18 05:20:27 simonb Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -107,31 +107,6 @@ int readbuffers = 0; /* allow KGDB to read kern buffer pool */
/*
- * uvm_sleep: atomic unlock and sleep for UVM_UNLOCK_AND_WAIT().
- */
-
-void
-uvm_sleep(event, slock, canintr, msg, timo)
- void *event;
- struct simplelock *slock;
- boolean_t canintr;
- const char *msg;
- int timo;
-{
- int s, pri;
-
- pri = PVM;
- if (canintr)
- pri |= PCATCH;
-
- s = splhigh();
- if (slock != NULL)
- simple_unlock(slock);
- (void) tsleep(event, pri, (char *)msg, timo);
- splx(s);
-}
-
-/*
* uvm_kernacc: can the kernel access a region of memory
*
* - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
@@ -210,7 +185,7 @@ uvm_useracc(addr, len, rw)
*/
void
uvm_chgkprot(addr, len, rw)
- register caddr_t addr;
+ caddr_t addr;
size_t len;
int rw;
{
@@ -433,8 +408,8 @@ uvm_swapin(p)
void
uvm_scheduler()
{
- register struct proc *p;
- register int pri;
+ struct proc *p;
+ int pri;
struct proc *pp;
int ppri;
UVMHIST_FUNC("uvm_scheduler"); UVMHIST_CALLED(maphist);
@@ -525,7 +500,7 @@ loop:
void
uvm_swapout_threads()
{
- register struct proc *p;
+ struct proc *p;
struct proc *outp, *outp2;
int outpri, outpri2;
int didswap = 0;
@@ -595,7 +570,7 @@ uvm_swapout_threads()
static void
uvm_swapout(p)
- register struct proc *p;
+ struct proc *p;
{
vaddr_t addr;
int s;
diff --git a/sys/uvm/uvm_init.c b/sys/uvm/uvm_init.c
index 660b6c7b947..93e1744d732 100644
--- a/sys/uvm/uvm_init.c
+++ b/sys/uvm/uvm_init.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_init.c,v 1.6 2001/01/29 02:07:44 niklas Exp $ */
-/* $NetBSD: uvm_init.c,v 1.11 1999/03/25 18:48:51 mrg Exp $ */
+/* $OpenBSD: uvm_init.c,v 1.7 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_init.c,v 1.12 2000/03/29 03:43:34 simonb Exp $ */
/*
*
@@ -48,7 +48,6 @@
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/vnode.h>
-#include <sys/conf.h>
#include <vm/vm.h>
diff --git a/sys/uvm/uvm_io.c b/sys/uvm/uvm_io.c
index d739d7557e1..78d20788a57 100644
--- a/sys/uvm/uvm_io.c
+++ b/sys/uvm/uvm_io.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_io.c,v 1.5 2001/01/29 02:07:45 niklas Exp $ */
-/* $NetBSD: uvm_io.c,v 1.8 1999/03/25 18:48:51 mrg Exp $ */
+/* $OpenBSD: uvm_io.c,v 1.6 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_io.c,v 1.10 2000/06/02 12:02:44 pk Exp $ */
/*
*
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 2acbc0507f3..ac76b56f1a2 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.13 2001/07/26 19:37:13 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.34 2000/01/11 06:57:50 chs Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.14 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_km.c,v 1.35 2000/05/08 23:10:20 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -335,7 +335,7 @@ uvm_km_pgremove(uobj, start, end)
simple_lock(&uobj->vmobjlock); /* lock object */
#ifdef DIAGNOSTIC
- if (uobj->pgops != &aobj_pager)
+ if (__predict_false(uobj->pgops != &aobj_pager))
panic("uvm_km_pgremove: object %p not an aobj", uobj);
#endif
@@ -435,7 +435,7 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
simple_lock(&uobj->vmobjlock); /* lock object */
#ifdef DIAGNOSTIC
- if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0)
+ if (__predict_false(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0))
panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj);
#endif
@@ -456,11 +456,11 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
#ifdef DIAGNOSTIC
- if (pp->flags & PG_BUSY)
+ if (__predict_false(pp->flags & PG_BUSY))
panic("uvm_km_pgremove_intrsafe: busy page");
- if (pp->pqflags & PQ_ACTIVE)
+ if (__predict_false(pp->pqflags & PQ_ACTIVE))
panic("uvm_km_pgremove_intrsafe: active page");
- if (pp->pqflags & PQ_INACTIVE)
+ if (__predict_false(pp->pqflags & PQ_INACTIVE))
panic("uvm_km_pgremove_intrsafe: inactive page");
#endif
@@ -482,11 +482,11 @@ loop_by_list:
pp->flags & PG_BUSY, 0, 0);
#ifdef DIAGNOSTIC
- if (pp->flags & PG_BUSY)
+ if (__predict_false(pp->flags & PG_BUSY))
panic("uvm_km_pgremove_intrsafe: busy page");
- if (pp->pqflags & PQ_ACTIVE)
+ if (__predict_false(pp->pqflags & PQ_ACTIVE))
panic("uvm_km_pgremove_intrsafe: active page");
- if (pp->pqflags & PQ_INACTIVE)
+ if (__predict_false(pp->pqflags & PQ_INACTIVE))
panic("uvm_km_pgremove_intrsafe: inactive page");
#endif
@@ -527,7 +527,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
map, obj, size, flags);
#ifdef DIAGNOSTIC
/* sanity check */
- if (vm_map_pmap(map) != pmap_kernel())
+ if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
panic("uvm_km_kmemalloc: invalid map");
#endif
@@ -542,10 +542,10 @@ uvm_km_kmemalloc(map, obj, size, flags)
* allocate some virtual space
*/
- if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
+ if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
- != KERN_SUCCESS) {
+ != KERN_SUCCESS)) {
UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
return(0);
}
@@ -584,7 +584,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
* out of memory?
*/
- if (pg == NULL) {
+ if (__predict_false(pg == NULL)) {
if (flags & UVM_KMF_NOWAIT) {
/* free everything! */
uvm_unmap(map, kva, kva + size);
@@ -687,9 +687,10 @@ uvm_km_alloc1(map, size, zeroit)
* allocate some virtual space
*/
- if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
- UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
+ if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
+ UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ 0)) != KERN_SUCCESS)) {
UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
return(0);
}
@@ -730,7 +731,7 @@ uvm_km_alloc1(map, size, zeroit)
UVM_PAGE_OWN(pg, NULL);
}
simple_unlock(&uvm.kernel_object->vmobjlock);
- if (pg == NULL) {
+ if (__predict_false(pg == NULL)) {
uvm_wait("km_alloc1w"); /* wait for memory */
continue;
}
@@ -776,7 +777,7 @@ uvm_km_valloc(map, size)
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
#ifdef DIAGNOSTIC
- if (vm_map_pmap(map) != pmap_kernel())
+ if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
panic("uvm_km_valloc");
#endif
@@ -787,9 +788,10 @@ uvm_km_valloc(map, size)
* allocate some virtual space. will be demand filled by kernel_object.
*/
- if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
- UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
+ if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
+ UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ 0)) != KERN_SUCCESS)) {
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
return(0);
}
@@ -817,7 +819,7 @@ uvm_km_valloc_wait(map, size)
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
#ifdef DIAGNOSTIC
- if (vm_map_pmap(map) != pmap_kernel())
+ if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
panic("uvm_km_valloc_wait");
#endif
@@ -833,10 +835,10 @@ uvm_km_valloc_wait(map, size)
* by kernel_object.
*/
- if (uvm_map(map, &kva, size, uvm.kernel_object,
+ if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
- == KERN_SUCCESS) {
+ == KERN_SUCCESS)) {
UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
return(kva);
}
@@ -876,7 +878,7 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
again:
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
- if (pg == NULL) {
+ if (__predict_false(pg == NULL)) {
if (waitok) {
uvm_wait("plpg");
goto again;
@@ -884,7 +886,7 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
return (0);
}
va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
- if (va == 0)
+ if (__predict_false(va == 0))
uvm_pagefree(pg);
return (va);
#else
diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c
index 64fff56afad..380b3b0c0ce 100644
--- a/sys/uvm/uvm_loan.c
+++ b/sys/uvm/uvm_loan.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_loan.c,v 1.7 2001/07/18 10:47:05 art Exp $ */
-/* $NetBSD: uvm_loan.c,v 1.19 1999/09/12 01:17:36 chs Exp $ */
+/* $OpenBSD: uvm_loan.c,v 1.8 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_loan.c,v 1.20 2000/04/10 00:32:46 thorpej Exp $ */
/*
*
@@ -621,7 +621,8 @@ uvm_loanzero(ufi, output, flags)
if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
- while ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) {
+ while ((pg = uvm_pagealloc(NULL, 0, NULL,
+ UVM_PGA_ZERO)) == NULL) {
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
ufi->entry->object.uvm_obj, NULL);
uvm_wait("loanzero1");
@@ -635,8 +636,7 @@ uvm_loanzero(ufi, output, flags)
/* ... and try again */
}
- /* got a page, zero it and return */
- uvm_pagezero(pg); /* clears PG_CLEAN */
+ /* got a zero'd page; return */
pg->flags &= ~(PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
**output = pg;
@@ -651,7 +651,7 @@ uvm_loanzero(ufi, output, flags)
/* loaning to an anon */
while ((anon = uvm_analloc()) == NULL ||
- (pg = uvm_pagealloc(NULL, 0, anon, 0)) == NULL) {
+ (pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) {
/* unlock everything */
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
@@ -676,8 +676,7 @@ uvm_loanzero(ufi, output, flags)
/* ... and try again */
}
- /* got a page, zero it and return */
- uvm_pagezero(pg); /* clears PG_CLEAN */
+ /* got a zero'd page; return */
pg->flags &= ~(PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
uvm_lock_pageq();
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 598f20c3a37..430bad6983e 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.22 2001/08/06 14:03:04 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.70 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.23 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_map.c,v 1.77 2000/06/13 04:10:47 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -667,6 +667,7 @@ step3:
new_entry->aref.ar_pageoff = 0;
new_entry->aref.ar_amap = amap;
} else {
+ new_entry->aref.ar_pageoff = 0;
new_entry->aref.ar_amap = NULL;
}
@@ -1163,7 +1164,7 @@ uvm_map_reserve(map, size, offset, raddr)
vm_map_t map;
vsize_t size;
vaddr_t offset; /* hint for pmap_prefer */
- vaddr_t *raddr; /* OUT: reserved VA */
+ vaddr_t *raddr; /* IN:hint, OUT: reserved VA */
{
UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
@@ -1563,13 +1564,15 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
while (entry->start < end && entry != &srcmap->header) {
if (copy_ok) {
- oldoffset = (entry->start + fudge) - start;
- elen = min(end, entry->end) - (entry->start + fudge);
- pmap_copy(dstmap->pmap, srcmap->pmap, dstaddr + oldoffset,
- elen, entry->start + fudge);
+ oldoffset = (entry->start + fudge) - start;
+ elen = min(end, entry->end) -
+ (entry->start + fudge);
+ pmap_copy(dstmap->pmap, srcmap->pmap,
+ dstaddr + oldoffset, elen,
+ entry->start + fudge);
}
- /* we advance "entry" in the following if statement */
+ /* we advance "entry" in the following if statement */
if (flags & UVM_EXTRACT_REMOVE) {
pmap_remove(srcmap->pmap, entry->start,
entry->end);
@@ -2757,7 +2760,6 @@ uvmspace_unshare(p)
struct proc *p;
{
struct vmspace *nvm, *ovm = p->p_vmspace;
- int s;
if (ovm->vm_refcnt == 1)
/* nothing to do: vmspace isn't shared in the first place */
@@ -2766,11 +2768,9 @@ uvmspace_unshare(p)
/* make a new vmspace, still holding old one */
nvm = uvmspace_fork(ovm);
- s = splhigh(); /* make this `atomic' */
pmap_deactivate(p); /* unbind old vmspace */
p->p_vmspace = nvm;
pmap_activate(p); /* switch to new vmspace */
- splx(s); /* end of critical section */
uvmspace_free(ovm); /* drop reference to old vmspace */
}
@@ -2787,9 +2787,8 @@ uvmspace_exec(p)
{
struct vmspace *nvm, *ovm = p->p_vmspace;
vm_map_t map = &ovm->vm_map;
- int s;
-#ifdef sparc
+#ifdef __sparc__
/* XXX cgd 960926: the sparc #ifdef should be a MD hook */
kill_user_windows(p); /* before stack addresses go away */
#endif
@@ -2840,11 +2839,9 @@ uvmspace_exec(p)
* install new vmspace and drop our ref to the old one.
*/
- s = splhigh();
pmap_deactivate(p);
p->p_vmspace = nvm;
pmap_activate(p);
- splx(s);
uvmspace_free(ovm);
}
@@ -3274,22 +3271,36 @@ uvm_object_printit(uobj, full, pr)
struct vm_page *pg;
int cnt = 0;
- (*pr)("OBJECT %p: pgops=%p, npages=%d, ", uobj, uobj->pgops,
- uobj->uo_npages);
+ (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
+ uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages);
if (UVM_OBJ_IS_KERN_OBJECT(uobj))
(*pr)("refs=<SYSTEM>\n");
else
(*pr)("refs=%d\n", uobj->uo_refs);
- if (!full) return;
+ if (!full) {
+ return;
+ }
(*pr)(" PAGES <pg,offset>:\n ");
- for (pg = uobj->memq.tqh_first ; pg ; pg = pg->listq.tqe_next, cnt++) {
+ for (pg = TAILQ_FIRST(&uobj->memq);
+ pg != NULL;
+ pg = TAILQ_NEXT(pg, listq), cnt++) {
(*pr)("<%p,0x%lx> ", pg, pg->offset);
- if ((cnt % 3) == 2) (*pr)("\n ");
+ if ((cnt % 3) == 2) {
+ (*pr)("\n ");
+ }
+ }
+ if ((cnt % 3) != 2) {
+ (*pr)("\n");
}
- if ((cnt % 3) != 2) (*pr)("\n");
}
+const char page_flagbits[] =
+ "\20\4CLEAN\5BUSY\6WANTED\7TABLED\12FAKE\13FILLED\14DIRTY\15RELEASED"
+ "\16FAULTING\17CLEANCHK";
+const char page_pqflagbits[] =
+ "\20\1FREE\2INACTIVE\3ACTIVE\4LAUNDRY\5ANON\6AOBJ";
+
/*
* uvm_page_print: print out a page
*/
@@ -3316,12 +3327,16 @@ uvm_page_printit(pg, full, pr)
struct vm_page *lcv;
struct uvm_object *uobj;
struct pglist *pgl;
+ char pgbuf[128];
+ char pqbuf[128];
(*pr)("PAGE %p:\n", pg);
- (*pr)(" flags=0x%x, pqflags=0x%x, vers=%d, wire_count=%d, pa=0x%lx\n",
- pg->flags, pg->pqflags, pg->version, pg->wire_count, (long)pg->phys_addr);
+ snprintf(pgbuf, sizeof(pgbuf), "%b", pg->flags, page_flagbits);
+ snprintf(pqbuf, sizeof(pqbuf), "%b", pg->pqflags, page_pqflagbits);
+ (*pr)(" flags=%s, pqflags=%s, vers=%d, wire_count=%d, pa=0x%lx\n",
+ pgbuf, pqbuf, pg->version, pg->wire_count, (long)pg->phys_addr);
(*pr)(" uobject=%p, uanon=%p, offset=0x%lx loan_count=%d\n",
- pg->uobject, pg->uanon, pg->offset, pg->loan_count);
+ pg->uobject, pg->uanon, pg->offset, pg->loan_count);
#if defined(UVM_PAGE_TRKOWN)
if (pg->flags & PG_BUSY)
(*pr)(" owning process = %d, tag=%s\n",
@@ -3360,8 +3375,11 @@ uvm_page_printit(pg, full, pr)
}
/* cross-verify page queue */
- if (pg->pqflags & PQ_FREE)
- pgl = &uvm.page_free[uvm_page_lookup_freelist(pg)];
+ if (pg->pqflags & PQ_FREE) {
+ int fl = uvm_page_lookup_freelist(pg);
+ pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
+ PGFL_ZEROS : PGFL_UNKNOWN];
+ }
else if (pg->pqflags & PQ_INACTIVE)
pgl = (pg->pqflags & PQ_SWAPBACKED) ?
&uvm.page_inactive_swp : &uvm.page_inactive_obj;
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index 517f9bfcb96..3747c4da8ff 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.h,v 1.8 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_map.h,v 1.16 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.9 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_map.h,v 1.17 2000/03/29 04:05:47 simonb Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -160,8 +160,6 @@ void uvm_unmap_detach __P((vm_map_entry_t,int));
int uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t,
vm_map_entry_t *));
-struct vmspace *uvmspace_fork __P((struct vmspace *));
-
#endif /* _KERNEL */
#endif /* _UVM_UVM_MAP_H_ */
diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h
index 39262affb39..cac5159678e 100644
--- a/sys/uvm/uvm_map_i.h
+++ b/sys/uvm/uvm_map_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map_i.h,v 1.7 2001/05/10 07:59:06 art Exp $ */
-/* $NetBSD: uvm_map_i.h,v 1.16 1999/07/01 20:07:05 thorpej Exp $ */
+/* $OpenBSD: uvm_map_i.h,v 1.8 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_map_i.h,v 1.17 2000/05/08 22:59:35 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -132,8 +132,12 @@ uvm_map_setup(map, min, max, flags)
/*
* If the map is interrupt safe, place it on the list
* of interrupt safe maps, for uvm_fault().
+ *
+ * We almost never set up an interrupt-safe map, but we set
+ * up quite a few regular ones (at every fork!), so put
+ * interrupt-safe map setup in the slow path.
*/
- if (flags & VM_MAP_INTRSAFE) {
+ if (__predict_false(flags & VM_MAP_INTRSAFE)) {
struct vm_map_intrsafe *vmi = (struct vm_map_intrsafe *)map;
int s;
@@ -192,7 +196,7 @@ MAP_INLINE void
uvm_map_reference(map)
vm_map_t map;
{
- if (map == NULL) {
+ if (__predict_false(map == NULL)) {
#ifdef DIAGNOSTIC
printf("uvm_map_reference: reference to NULL map\n");
#ifdef DDB
@@ -220,7 +224,7 @@ uvm_map_deallocate(map)
{
int c;
- if (map == NULL) {
+ if (__predict_false(map == NULL)) {
#ifdef DIAGNOSTIC
printf("uvm_map_deallocate: reference to NULL map\n");
#ifdef DDB
diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c
index e7eae692c65..31c783c027c 100644
--- a/sys/uvm/uvm_meter.c
+++ b/sys/uvm/uvm_meter.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_meter.c,v 1.11 2001/08/02 11:06:38 art Exp $ */
-/* $NetBSD: uvm_meter.c,v 1.11 2000/02/11 19:22:54 thorpej Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.12 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_meter.c,v 1.12 2000/05/26 00:36:53 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 95948d5f113..ee8a10c06c5 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_mmap.c,v 1.18 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_mmap.c,v 1.38 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.19 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_mmap.c,v 1.41 2000/05/23 02:19:20 enami Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -302,7 +302,7 @@ sys_mmap(p, v, retval)
void *v;
register_t *retval;
{
- register struct sys_mmap_args /* {
+ struct sys_mmap_args /* {
syscallarg(caddr_t) addr;
syscallarg(size_t) len;
syscallarg(int) prot;
@@ -318,8 +318,8 @@ sys_mmap(p, v, retval)
vm_prot_t prot, maxprot;
int flags, fd;
vaddr_t vm_min_address = VM_MIN_ADDRESS;
- register struct filedesc *fdp = p->p_fd;
- register struct file *fp;
+ struct filedesc *fdp = p->p_fd;
+ struct file *fp;
struct vnode *vp;
caddr_t handle;
int error;
@@ -405,6 +405,9 @@ sys_mmap(p, v, retval)
vp->v_type != VBLK)
return (ENODEV); /* only REG/CHR/BLK support mmap */
+ if (vp->v_type == VREG && (pos + size) < pos)
+ return (EINVAL); /* no offset wrapping */
+
/* special case: catch SunOS style /dev/zero */
if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
flags |= MAP_ANON;
@@ -647,11 +650,11 @@ sys_msync(p, v, retval)
int
sys_munmap(p, v, retval)
- register struct proc *p;
+ struct proc *p;
void *v;
register_t *retval;
{
- register struct sys_munmap_args /* {
+ struct sys_munmap_args /* {
syscallarg(caddr_t) addr;
syscallarg(size_t) len;
} */ *uap = v;
@@ -789,7 +792,7 @@ sys_minherit(p, v, retval)
} */ *uap = v;
vaddr_t addr;
vsize_t size, pageoff;
- register vm_inherit_t inherit;
+ vm_inherit_t inherit;
addr = (vaddr_t)SCARG(uap, addr);
size = (vsize_t)SCARG(uap, len);
@@ -1232,10 +1235,6 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
vm_map_lock(map);
if (map->flags & VM_MAP_WIREFUTURE) {
- /*
- * uvm_map_pageable() always returns the map
- * unlocked.
- */
if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax
#ifdef pmap_wired_count
|| (locklimit != 0 && (size +
@@ -1244,10 +1243,15 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
#endif
) {
retval = KERN_RESOURCE_SHORTAGE;
+ vm_map_unlock(map);
/* unmap the region! */
(void) uvm_unmap(map, *addr, *addr + size);
goto bad;
}
+ /*
+ * uvm_map_pageable() always returns the map
+ * unlocked.
+ */
retval = uvm_map_pageable(map, *addr, *addr + size,
FALSE, UVM_LK_ENTER);
if (retval != KERN_SUCCESS) {
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index a7a6462981a..bb5d3b6c6c8 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_page.c,v 1.21 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_page.c,v 1.31 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.22 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_page.c,v 1.37 2000/06/09 04:43:19 soda Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -74,7 +74,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
-#include <sys/proc.h>
+#include <sys/sched.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -95,6 +95,14 @@ struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
/*
+ * Some supported CPUs in a given architecture don't support all
+ * of the things necessary to do idle page zero'ing efficiently.
+ * We therefore provide a way to disable it from machdep code here.
+ */
+
+boolean_t vm_page_zero_enable = TRUE;
+
+/*
* local variables
*/
@@ -218,8 +226,10 @@ uvm_page_init(kvm_startp, kvm_endp)
/*
* step 1: init the page queues and page queue locks
*/
- for (lcv = 0; lcv < VM_NFREELIST; lcv++)
- TAILQ_INIT(&uvm.page_free[lcv]);
+ for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
+ for (i = 0; i < PGFL_NQUEUES; i++)
+ TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
+ }
TAILQ_INIT(&uvm.page_active);
TAILQ_INIT(&uvm.page_inactive_swp);
TAILQ_INIT(&uvm.page_inactive_obj);
@@ -334,6 +344,16 @@ uvm_page_init(kvm_startp, kvm_endp)
uvmexp.reserve_pagedaemon = 4;
uvmexp.reserve_kernel = 6;
+ /*
+ * step 8: determine if we should zero pages in the idle
+ * loop.
+ *
+ * XXXJRT - might consider zero'ing up to the target *now*,
+ * but that could take an awfully long time if you
+ * have a lot of memory.
+ */
+ uvm.page_idle_zero = vm_page_zero_enable;
+
/*
* done!
*/
@@ -604,8 +624,8 @@ uvm_page_physload(start, end, avail_start, avail_end, free_list)
if (vm_nphysseg == VM_PHYSSEG_MAX) {
printf("uvm_page_physload: unable to load physical memory "
"segment\n");
- printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
- VM_PHYSSEG_MAX, start, end);
+ printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
+ VM_PHYSSEG_MAX, (long long)start, (long long)end);
return;
}
@@ -827,9 +847,11 @@ uvm_page_physdump()
printf("rehash: physical memory config [segs=%d of %d]:\n",
vm_nphysseg, VM_PHYSSEG_MAX);
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
- printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
- vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
- vm_physmem[lcv].avail_end);
+ printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
+ (long long)vm_physmem[lcv].start,
+ (long long)vm_physmem[lcv].end,
+ (long long)vm_physmem[lcv].avail_start,
+ (long long)vm_physmem[lcv].avail_end);
printf("STRATEGY = ");
switch (VM_PHYSSEG_STRAT) {
case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
@@ -851,6 +873,12 @@ uvm_page_physdump()
* => only one of obj or anon can be non-null
* => caller must activate/deactivate page if it is not wired.
* => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
+ * => policy decision: it is more important to pull a page off of the
+ * appropriate priority free list than it is to get a zero'd or
+ * unknown contents page. This is because we live with the
+ * consequences of a bad free list decision for the entire
+ * lifetime of the page, e.g. if the page comes from memory that
+ * is slower to access.
*/
struct vm_page *
@@ -861,9 +889,10 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
struct vm_anon *anon;
int strat, free_list;
{
- int lcv, s;
+ int lcv, try1, try2, s, zeroit = 0;
struct vm_page *pg;
struct pglist *freeq;
+ struct pgfreelist *pgfl;
boolean_t use_reserve;
#ifdef DIAGNOSTIC
@@ -900,13 +929,32 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
curproc == syncerproc))))
goto fail;
+#if PGFL_NQUEUES != 2
+#error uvm_pagealloc_strat needs to be updated
+#endif
+
+ /*
+ * If we want a zero'd page, try the ZEROS queue first, otherwise
+ * we try the UNKNOWN queue first.
+ */
+ if (flags & UVM_PGA_ZERO) {
+ try1 = PGFL_ZEROS;
+ try2 = PGFL_UNKNOWN;
+ } else {
+ try1 = PGFL_UNKNOWN;
+ try2 = PGFL_ZEROS;
+ }
+
again:
switch (strat) {
case UVM_PGA_STRAT_NORMAL:
/* Check all freelists in descending priority order. */
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
- freeq = &uvm.page_free[lcv];
- if ((pg = freeq->tqh_first) != NULL)
+ pgfl = &uvm.page_free[lcv];
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try1]))) != NULL ||
+ (pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try2]))) != NULL)
goto gotit;
}
@@ -921,8 +969,11 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
panic("uvm_pagealloc_strat: bad free list %d",
free_list);
#endif
- freeq = &uvm.page_free[free_list];
- if ((pg = freeq->tqh_first) != NULL)
+ pgfl = &uvm.page_free[free_list];
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try1]))) != NULL ||
+ (pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_queues[try2]))) != NULL)
goto gotit;
/* Fall back, if possible. */
@@ -943,6 +994,24 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
TAILQ_REMOVE(freeq, pg, pageq);
uvmexp.free--;
+ /* update zero'd page count */
+ if (pg->flags & PG_ZERO)
+ uvmexp.zeropages--;
+
+ /*
+ * update allocation statistics and remember if we have to
+ * zero the page
+ */
+ if (flags & UVM_PGA_ZERO) {
+ if (pg->flags & PG_ZERO) {
+ uvmexp.pga_zerohit++;
+ zeroit = 0;
+ } else {
+ uvmexp.pga_zeromiss++;
+ zeroit = 1;
+ }
+ }
+
uvm_unlock_fpageq(s); /* unlock free page queue */
pg->offset = off;
@@ -965,6 +1034,16 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
#endif
UVM_PAGE_OWN(pg, "new alloc");
+ if (flags & UVM_PGA_ZERO) {
+ /*
+ * A zero'd page is not clean. If we got a page not already
+ * zero'd, then we have to zero it ourselves.
+ */
+ pg->flags &= ~PG_CLEAN;
+ if (zeroit)
+ pmap_zero_page(VM_PAGE_TO_PHYS(pg));
+ }
+
return(pg);
fail:
@@ -1143,8 +1222,7 @@ struct vm_page *pg;
/*
* if the page was wired, unwire it now.
*/
- if (pg->wire_count)
- {
+ if (pg->wire_count) {
pg->wire_count = 0;
uvmexp.wired--;
}
@@ -1153,9 +1231,11 @@ struct vm_page *pg;
* and put on free queue
*/
+ pg->flags &= ~PG_ZERO;
+
s = uvm_lock_fpageq();
- TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)],
- pg, pageq);
+ TAILQ_INSERT_TAIL(&uvm.page_free[
+ uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
pg->pqflags = PQ_FREE;
#ifdef DEBUG
pg->uobject = (void *)0xdeadbeef;
@@ -1163,6 +1243,10 @@ struct vm_page *pg;
pg->uanon = (void *)0xdeadbeef;
#endif
uvmexp.free++;
+
+ if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
+ uvm.page_idle_zero = vm_page_zero_enable;
+
uvm_unlock_fpageq(s);
}
@@ -1204,3 +1288,66 @@ uvm_page_own(pg, tag)
return;
}
#endif
+
+/*
+ * uvm_pageidlezero: zero free pages while the system is idle.
+ *
+ * => we do at least one iteration per call, if we are below the target.
+ * => we loop until we either reach the target or whichqs indicates that
+ * there is a process ready to run.
+ */
+void
+uvm_pageidlezero()
+{
+ struct vm_page *pg;
+ struct pgfreelist *pgfl;
+ int free_list, s;
+
+ do {
+ s = uvm_lock_fpageq();
+
+ if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
+ uvm.page_idle_zero = FALSE;
+ uvm_unlock_fpageq(s);
+ return;
+ }
+
+ for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
+ pgfl = &uvm.page_free[free_list];
+ if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
+ PGFL_UNKNOWN])) != NULL)
+ break;
+ }
+
+ if (pg == NULL) {
+ /*
+ * No non-zero'd pages; don't bother trying again
+ * until we know we have non-zero'd pages free.
+ */
+ uvm.page_idle_zero = FALSE;
+ uvm_unlock_fpageq(s);
+ return;
+ }
+
+ TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
+ uvmexp.free--;
+ uvm_unlock_fpageq(s);
+
+#ifdef PMAP_PAGEIDLEZERO
+ PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg));
+#else
+ /*
+ * XXX This will toast the cache unless the pmap_zero_page()
+ * XXX implementation does uncached access.
+ */
+ pmap_zero_page(VM_PAGE_TO_PHYS(pg));
+#endif
+ pg->flags |= PG_ZERO;
+
+ s = uvm_lock_fpageq();
+ TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
+ uvmexp.free++;
+ uvmexp.zeropages++;
+ uvm_unlock_fpageq(s);
+ } while (whichqs == 0);
+}
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index 484c3a6cb54..e2a9541d828 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_page.h,v 1.8 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_page.h,v 1.14 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.9 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_page.h,v 1.15 2000/04/24 17:12:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -77,6 +77,12 @@
#ifdef _KERNEL
/*
+ * globals
+ */
+
+extern boolean_t vm_page_zero_enable;
+
+/*
* macros
*/
@@ -86,6 +92,8 @@
#define uvm_pagehash(obj,off) \
(((unsigned long)obj+(unsigned long)atop(off)) & uvm.page_hashmask)
+#define UVM_PAGEZERO_TARGET (uvmexp.free)
+
/*
* handle inline options
*/
@@ -108,6 +116,7 @@ void uvm_page_own __P((struct vm_page *, char *));
boolean_t uvm_page_physget __P((paddr_t *));
#endif
void uvm_page_rehash __P((void));
+void uvm_pageidlezero __P((void));
PAGE_INLINE int uvm_lock_fpageq __P((void));
PAGE_INLINE void uvm_unlock_fpageq __P((int));
diff --git a/sys/uvm/uvm_page_i.h b/sys/uvm/uvm_page_i.h
index c38ea665282..6576d6b118b 100644
--- a/sys/uvm/uvm_page_i.h
+++ b/sys/uvm/uvm_page_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_page_i.h,v 1.7 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_page_i.h,v 1.12 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_page_i.h,v 1.8 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_page_i.h,v 1.13 2000/05/08 23:11:53 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -216,7 +216,7 @@ uvm_pagedeactivate(pg)
}
if ((pg->pqflags & PQ_INACTIVE) == 0) {
#ifdef DIAGNOSTIC
- if (pg->wire_count)
+ if (__predict_false(pg->wire_count))
panic("uvm_pagedeactivate: caller did not check "
"wire count");
#endif
@@ -312,7 +312,7 @@ uvm_page_lookup_freelist(pg)
lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
#ifdef DIAGNOSTIC
- if (lcv == -1)
+ if (__predict_false(lcv == -1))
panic("uvm_page_lookup_freelist: unable to locate physseg");
#endif
return (vm_physmem[lcv].free_list);
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index caa21d61922..f69f45b16d4 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager.c,v 1.13 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_pager.c,v 1.26 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.14 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_pager.c,v 1.30 2000/05/20 03:36:06 thorpej Exp $ */
/*
*
@@ -55,7 +55,6 @@
* list of uvm pagers in the system
*/
-extern struct uvm_pagerops aobj_pager;
extern struct uvm_pagerops uvm_deviceops;
extern struct uvm_pagerops uvm_vnodeops;
@@ -114,31 +113,39 @@ uvm_pager_init()
*
* we basically just map in a blank map entry to reserve the space in the
* map and then use pmap_enter() to put the mappings in by hand.
- *
- * XXX It would be nice to know the direction of the I/O, so that we can
- * XXX map only what is necessary.
*/
vaddr_t
-uvm_pagermapin(pps, npages, aiop, waitf)
+uvm_pagermapin(pps, npages, aiop, flags)
struct vm_page **pps;
int npages;
struct uvm_aiodesc **aiop; /* OUT */
- int waitf;
+ int flags;
{
vsize_t size;
vaddr_t kva;
struct uvm_aiodesc *aio;
vaddr_t cva;
struct vm_page *pp;
+ vm_prot_t prot;
UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
- pps, npages, aiop, waitf);
+ UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, flags=0x%x)",
+ pps, npages, aiop, flags);
+
+ /*
+ * compute protection. outgoing I/O only needs read
+ * access to the page, whereas incoming needs read/write.
+ */
+
+ prot = VM_PROT_READ;
+ if (flags & UVMPAGER_MAPIN_READ)
+ prot |= VM_PROT_WRITE;
ReStart:
if (aiop) {
- MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
+ MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP,
+ (flags & UVMPAGER_MAPIN_WAITOK));
if (aio == NULL)
return(0);
*aiop = aio;
@@ -147,15 +154,15 @@ ReStart:
}
size = npages << PAGE_SHIFT;
- kva = NULL; /* let system choose VA */
+ kva = 0; /* let system choose VA */
if (uvm_map(pager_map, &kva, size, NULL,
UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
- if (waitf == M_NOWAIT) {
+ if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
if (aio)
FREE(aio, M_TEMP);
UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
- return(NULL);
+ return(0);
}
simple_lock(&pager_map_wanted_lock);
pager_map_wanted = TRUE;
@@ -172,14 +179,8 @@ ReStart:
if ((pp->flags & PG_BUSY) == 0)
panic("uvm_pagermapin: page not busy");
#endif
-
- /*
- * XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that
- * XXX really necessary? It could lead to unnecessary
- * XXX instruction cache flushes.
- */
pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
- VM_PROT_DEFAULT, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
+ prot, PMAP_WIRED | prot);
}
UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
@@ -360,41 +361,6 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
return(ppsp);
}
-
-/*
- * uvm_shareprot: generic share protect routine
- *
- * => caller must lock map entry's map
- * => caller must lock object pointed to by map entry
- */
-
-void
-uvm_shareprot(entry, prot)
- vm_map_entry_t entry;
- vm_prot_t prot;
-{
- struct uvm_object *uobj = entry->object.uvm_obj;
- struct vm_page *pp;
- voff_t start, stop;
- UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
-
- if (UVM_ET_ISSUBMAP(entry))
- panic("uvm_shareprot: non-object attached");
-
- start = entry->offset;
- stop = start + (entry->end - entry->start);
-
- /*
- * traverse list of pages in object. if page in range, pmap_prot it
- */
-
- for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
- if (pp->offset >= start && pp->offset < stop)
- pmap_page_protect(pp, prot);
- }
- UVMHIST_LOG(maphist, "<- done",0,0,0,0);
-}
-
/*
* uvm_pager_put: high level pageout routine
*
diff --git a/sys/uvm/uvm_pager.h b/sys/uvm/uvm_pager.h
index d63c94df155..d2bf315228e 100644
--- a/sys/uvm/uvm_pager.h
+++ b/sys/uvm/uvm_pager.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager.h,v 1.9 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_pager.h,v 1.12 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_pager.h,v 1.10 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_pager.h,v 1.15 2000/05/19 03:45:04 thorpej Exp $ */
/*
*
@@ -88,8 +88,6 @@ struct uvm_pagerops {
__P((struct uvm_object *, struct vm_page **,
int *, struct vm_page *, int, voff_t,
voff_t));
- void (*pgo_shareprot) /* share protect */
- __P((vm_map_entry_t, vm_prot_t));
void (*pgo_aiodone) /* async iodone */
__P((struct uvm_aiodesc *));
boolean_t (*pgo_releasepg) /* release page */
@@ -148,7 +146,11 @@ void uvm_pagermapout __P((vaddr_t, int));
struct vm_page **uvm_mk_pcluster __P((struct uvm_object *, struct vm_page **,
int *, struct vm_page *, int,
voff_t, voff_t));
-void uvm_shareprot __P((vm_map_entry_t, vm_prot_t));
+
+/* Flags to uvm_pagermapin() */
+#define UVMPAGER_MAPIN_WAITOK 0x01 /* it's okay to wait */
+#define UVMPAGER_MAPIN_READ 0x02 /* host <- device */
+#define UVMPAGER_MAPIN_WRITE 0x00 /* device -> host (pseudo flag) */
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_pager_i.h b/sys/uvm/uvm_pager_i.h
index ae629e28bf0..c7776c42bdd 100644
--- a/sys/uvm/uvm_pager_i.h
+++ b/sys/uvm/uvm_pager_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager_i.h,v 1.6 2001/06/08 08:09:40 art Exp $ */
-/* $NetBSD: uvm_pager_i.h,v 1.8 1999/07/08 18:11:03 thorpej Exp $ */
+/* $OpenBSD: uvm_pager_i.h,v 1.7 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_pager_i.h,v 1.9 2000/05/08 23:13:42 thorpej Exp $ */
/*
*
@@ -59,7 +59,7 @@ uvm_pageratop(kva)
{
paddr_t pa;
- if (pmap_extract(pmap_kernel(), kva, &pa) == FALSE)
+ if (__predict_false(pmap_extract(pmap_kernel(), kva, &pa) == FALSE))
panic("uvm_pageratop");
return (PHYS_TO_VM_PAGE(pa));
}
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 7cdcb9d0bf0..4b7588d309e 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.11 2001/07/18 14:27:07 art Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.12 2001/08/11 10:57:22 art Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.19 1999/11/04 21:51:42 thorpej Exp $ */
/*
@@ -148,7 +148,7 @@ uvm_wait(wmsg)
simple_lock(&uvm.pagedaemon_lock);
wakeup(&uvm.pagedaemon); /* wake the daemon! */
- UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
+ UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, (char *)wmsg,
timo);
splx(s);
diff --git a/sys/uvm/uvm_pglist.c b/sys/uvm/uvm_pglist.c
index 4b18657f22a..1bafc44c2a1 100644
--- a/sys/uvm/uvm_pglist.c
+++ b/sys/uvm/uvm_pglist.c
@@ -1,8 +1,6 @@
-/* $OpenBSD: uvm_pglist.c,v 1.6 2001/03/08 15:21:37 smart Exp $ */
-/* $NetBSD: uvm_pglist.c,v 1.8 1999/07/22 22:58:39 thorpej Exp $ */
+/* $OpenBSD: uvm_pglist.c,v 1.7 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_pglist.c,v 1.10 2000/05/20 19:54:01 thorpej Exp $ */
-#define VM_PAGE_ALLOC_MEMORY_STATS
-
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -42,9 +40,6 @@
/*
* uvm_pglist.c: pglist functions
- *
- * XXX: was part of uvm_page but has an incompatable copyright so it
- * gets its own file now.
*/
#include <sys/param.h>
@@ -53,8 +48,6 @@
#include <sys/proc.h>
#include <vm/vm.h>
-#include <vm/vm_page.h>
-#include <vm/vm_kern.h>
#include <uvm/uvm.h>
@@ -101,7 +94,7 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
paddr_t try, idxpa, lastidxpa;
int psi;
struct vm_page *pgs;
- int s, tryidx, idx, end, error, free_list;
+ int s, tryidx, idx, pgflidx, end, error, free_list;
vm_page_t m;
u_long pagemask;
#ifdef DEBUG
@@ -110,10 +103,10 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
#ifdef DIAGNOSTIC
if ((alignment & (alignment - 1)) != 0)
- panic("vm_page_alloc_memory: alignment must be power of 2");
+ panic("uvm_pglistalloc: alignment must be power of 2");
if ((boundary & (boundary - 1)) != 0)
- panic("vm_page_alloc_memory: boundary must be power of 2");
+ panic("uvm_pglistalloc: boundary must be power of 2");
#endif
/*
@@ -140,10 +133,8 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
s = uvm_lock_fpageq(); /* lock free page queue */
/* Are there even any free pages? */
- for (idx = 0; idx < VM_NFREELIST; idx++)
- if (uvm.page_free[idx].tqh_first != NULL)
- break;
- if (idx == VM_NFREELIST)
+ if (uvmexp.free <= (uvmexp.reserve_pagedaemon +
+ uvmexp.reserve_kernel))
goto out;
for (;; try += alignment) {
@@ -207,6 +198,10 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
}
}
+#if PGFL_NQUEUES != 2
+#error uvm_pglistalloc needs to be updated
+#endif
+
/*
* we have a chunk of memory that conforms to the requested constraints.
*/
@@ -214,17 +209,23 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
while (idx < end) {
m = &pgs[idx];
free_list = uvm_page_lookup_freelist(m);
+ pgflidx = (m->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef DEBUG
- for (tp = uvm.page_free[free_list].tqh_first;
- tp != NULL; tp = tp->pageq.tqe_next) {
+ for (tp = TAILQ_FIRST(&uvm.page_free[
+ free_list].pgfl_queues[pgflidx]);
+ tp != NULL;
+ tp = TAILQ_NEXT(tp, pageq)) {
if (tp == m)
break;
}
if (tp == NULL)
panic("uvm_pglistalloc: page not on freelist");
#endif
- TAILQ_REMOVE(&uvm.page_free[free_list], m, pageq);
+ TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_queues[pgflidx],
+ m, pageq);
uvmexp.free--;
+ if (m->flags & PG_ZERO)
+ uvmexp.zeropages--;
m->flags = PG_CLEAN;
m->pqflags = 0;
m->uobject = NULL;
@@ -279,9 +280,12 @@ uvm_pglistfree(list)
#endif
TAILQ_REMOVE(list, m, pageq);
m->pqflags = PQ_FREE;
- TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(m)],
+ TAILQ_INSERT_TAIL(&uvm.page_free[
+ uvm_page_lookup_freelist(m)].pgfl_queues[PGFL_UNKNOWN],
m, pageq);
uvmexp.free++;
+ if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
+ uvm.page_idle_zero = vm_page_zero_enable;
STAT_DECR(uvm_pglistalloc_npages);
}
diff --git a/sys/uvm/uvm_stat.h b/sys/uvm/uvm_stat.h
index d9c462bf543..f752eff6501 100644
--- a/sys/uvm/uvm_stat.h
+++ b/sys/uvm/uvm_stat.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_stat.h,v 1.8 2001/06/24 21:29:04 mickey Exp $ */
-/* $NetBSD: uvm_stat.h,v 1.15 1999/06/21 17:25:12 thorpej Exp $ */
+/* $OpenBSD: uvm_stat.h,v 1.9 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_stat.h,v 1.18 2000/04/11 08:12:14 pk Exp $ */
/*
*
@@ -192,24 +192,24 @@ do { \
#define UVMHIST_LOG(NAME,FMT,A,B,C,D) \
do { \
- register int i, s = splhigh(); \
+ int _i_, _s_ = splhigh(); \
simple_lock(&(NAME).l); \
- i = (NAME).f; \
- (NAME).f = (i + 1) % (NAME).n; \
+ _i_ = (NAME).f; \
+ (NAME).f = (_i_ + 1) % (NAME).n; \
simple_unlock(&(NAME).l); \
- splx(s); \
+ splx(_s_); \
if (!cold) \
- microtime(&(NAME).e[i].tv); \
- (NAME).e[i].fmt = (FMT); \
- (NAME).e[i].fmtlen = strlen((NAME).e[i].fmt); \
- (NAME).e[i].fn = _uvmhist_name; \
- (NAME).e[i].fnlen = strlen((NAME).e[i].fn); \
- (NAME).e[i].call = _uvmhist_call; \
- (NAME).e[i].v[0] = (u_long)(A); \
- (NAME).e[i].v[1] = (u_long)(B); \
- (NAME).e[i].v[2] = (u_long)(C); \
- (NAME).e[i].v[3] = (u_long)(D); \
- UVMHIST_PRINTNOW(&((NAME).e[i])); \
+ microtime(&(NAME).e[_i_].tv); \
+ (NAME).e[_i_].fmt = (FMT); \
+ (NAME).e[_i_].fmtlen = strlen((NAME).e[_i_].fmt); \
+ (NAME).e[_i_].fn = _uvmhist_name; \
+ (NAME).e[_i_].fnlen = strlen((NAME).e[_i_].fn); \
+ (NAME).e[_i_].call = _uvmhist_call; \
+ (NAME).e[_i_].v[0] = (u_long)(A); \
+ (NAME).e[_i_].v[1] = (u_long)(B); \
+ (NAME).e[_i_].v[2] = (u_long)(C); \
+ (NAME).e[_i_].v[3] = (u_long)(D); \
+ UVMHIST_PRINTNOW(&((NAME).e[_i_])); \
} while (0)
#define UVMHIST_CALLED(NAME) \
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index 1b85acc0f9a..86c8d0b0c36 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_swap.c,v 1.33 2001/08/06 22:34:44 mickey Exp $ */
-/* $NetBSD: uvm_swap.c,v 1.34 2000/02/07 20:16:59 thorpej Exp $ */
+/* $OpenBSD: uvm_swap.c,v 1.34 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_swap.c,v 1.37 2000/05/19 03:45:04 thorpej Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
@@ -35,6 +35,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/buf.h>
+#include <sys/conf.h>
#include <sys/proc.h>
#include <sys/namei.h>
#include <sys/disklabel.h>
@@ -48,10 +49,8 @@
#include <sys/pool.h>
#include <sys/syscallargs.h>
#include <sys/swap.h>
-#include <sys/conf.h>
#include <vm/vm.h>
-
#include <uvm/uvm.h>
#ifdef UVM_SWAP_ENCRYPT
#include <sys/syslog.h>
@@ -230,6 +229,9 @@ struct pool vndbuf_pool;
pool_put(&vndbuf_pool, (void *)(vbp)); \
}
+/* /dev/drum */
+bdev_decl(sw);
+cdev_decl(sw);
/*
* local variables
@@ -1575,6 +1577,13 @@ sw_reg_iodone(bp)
}
/*
+ * disassociate this buffer from the vnode (if any).
+ */
+ if (vbp->vb_buf.b_vp != NULLVP) {
+ brelvp(&vbp->vb_buf);
+ }
+
+ /*
* kill vbp structure
*/
putvndbuf(vbp);
@@ -1865,7 +1874,7 @@ uvm_swap_io(pps, startslot, npages, flags)
struct swapbuf *sbp;
struct buf *bp;
vaddr_t kva;
- int result, s, waitf, pflag;
+ int result, s, mapinflags, pflag;
#ifdef UVM_SWAP_ENCRYPT
vaddr_t dstkva;
struct vm_page *tpps[MAXBSIZE >> PAGE_SHIFT];
@@ -1888,9 +1897,12 @@ uvm_swap_io(pps, startslot, npages, flags)
* an aiodesc structure because we don't want to chance a malloc.
* we've got our own pool of aiodesc structures (in swapbuf).
*/
- waitf = (flags & B_ASYNC) ? M_NOWAIT : M_WAITOK;
- kva = uvm_pagermapin(pps, npages, NULL, waitf);
- if (kva == NULL)
+ mapinflags = (flags & B_READ) ? UVMPAGER_MAPIN_READ :
+ UVMPAGER_MAPIN_WRITE;
+ if ((flags & B_ASYNC) == 0)
+ mapinflags |= UVMPAGER_MAPIN_WAITOK;
+ kva = uvm_pagermapin(pps, npages, NULL, mapinflags);
+ if (kva == 0)
return (VM_PAGER_AGAIN);
#ifdef UVM_SWAP_ENCRYPT
@@ -1928,13 +1940,19 @@ uvm_swap_io(pps, startslot, npages, flags)
caddr_t src, dst;
struct swap_key *key;
u_int64_t block;
+ int swmapflags;
+
+ /* We always need write access. */
+ swmapflags = UVMPAGER_MAPIN_READ;
+ if ((flags & B_ASYNC) == 0)
+ swmapflags |= UVMPAGER_MAPIN_WAITOK;
if (!uvm_swap_allocpages(tpps, npages)) {
uvm_pagermapout(kva, npages);
return (VM_PAGER_AGAIN);
}
- dstkva = uvm_pagermapin(tpps, npages, NULL, waitf);
+ dstkva = uvm_pagermapin(tpps, npages, NULL, swmapflags);
if (dstkva == NULL) {
uvm_pagermapout(kva, npages);
uvm_swap_freepages(tpps, npages);
diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c
index adff42fd360..b0645679815 100644
--- a/sys/uvm/uvm_unix.c
+++ b/sys/uvm/uvm_unix.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_unix.c,v 1.12 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_unix.c,v 1.11 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_unix.c,v 1.13 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_unix.c,v 1.12 2000/03/30 12:31:50 augustss Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -128,8 +128,8 @@ uvm_grow(p, sp)
struct proc *p;
vaddr_t sp;
{
- register struct vmspace *vm = p->p_vmspace;
- register int si;
+ struct vmspace *vm = p->p_vmspace;
+ int si;
/*
* For user defined stacks (from sendsig).
@@ -196,9 +196,9 @@ uvm_coredump(p, vp, cred, chdr)
struct ucred *cred;
struct core *chdr;
{
- register struct vmspace *vm = p->p_vmspace;
- register vm_map_t map = &vm->vm_map;
- register vm_map_entry_t entry;
+ struct vmspace *vm = p->p_vmspace;
+ vm_map_t map = &vm->vm_map;
+ vm_map_entry_t entry;
vaddr_t start, end;
struct coreseg cseg;
off_t offset;
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index a9ba03bd53a..cb362452c27 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_vnode.c,v 1.17 2001/08/06 14:03:05 art Exp $ */
-/* $NetBSD: uvm_vnode.c,v 1.30 2000/03/26 20:54:47 kleink Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.18 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_vnode.c,v 1.33 2000/05/19 03:45:05 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -90,7 +90,6 @@ lock_data_t uvn_sync_lock; /* locks sync operation */
static int uvn_asyncget __P((struct uvm_object *, voff_t,
int));
-struct uvm_object *uvn_attach __P((void *, vm_prot_t));
static void uvn_cluster __P((struct uvm_object *, voff_t,
voff_t *, voff_t *));
static void uvn_detach __P((struct uvm_object *));
@@ -123,7 +122,6 @@ struct uvm_pagerops uvm_vnodeops = {
uvn_put,
uvn_cluster,
uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */
- uvm_shareprot, /* !NULL: allow us in share maps */
NULL, /* AIO-DONE function (not until we have asyncio) */
uvn_releasepg,
};
@@ -1594,7 +1592,7 @@ uvn_io(uvn, pps, npages, flags, rw)
struct iovec iov;
vaddr_t kva;
off_t file_offset;
- int waitf, result;
+ int waitf, result, mapinflags;
size_t got, wanted;
UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist);
@@ -1638,8 +1636,11 @@ uvn_io(uvn, pps, npages, flags, rw)
* first try and map the pages in (without waiting)
*/
- kva = uvm_pagermapin(pps, npages, NULL, M_NOWAIT);
- if (kva == NULL && waitf == M_NOWAIT) {
+ mapinflags = (rw == UIO_READ) ?
+ UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE;
+
+ kva = uvm_pagermapin(pps, npages, NULL, mapinflags);
+ if (kva == 0 && waitf == M_NOWAIT) {
simple_unlock(&uvn->u_obj.vmobjlock);
UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0);
return(VM_PAGER_AGAIN);
@@ -1654,9 +1655,9 @@ uvn_io(uvn, pps, npages, flags, rw)
uvn->u_nio++; /* we have an I/O in progress! */
simple_unlock(&uvn->u_obj.vmobjlock);
/* NOTE: object now unlocked */
- if (kva == NULL) {
- kva = uvm_pagermapin(pps, npages, NULL, M_WAITOK);
- }
+ if (kva == 0)
+ kva = uvm_pagermapin(pps, npages, NULL,
+ mapinflags | UVMPAGER_MAPIN_WAITOK);
/*
* ok, mapped in. our pages are PG_BUSY so they are not going to