summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-11-28 19:28:16 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-11-28 19:28:16 +0000
commitcba71608cdd2f0b18c4f6ff86897f18e4c30118a (patch)
treeb92e865b4ac88831d7b112a580bfcf3af96d5ed8 /sys
parente6a242c5ac01267b1b262ca9daa51b8ef92c97e2 (diff)
Sync in more uvm from NetBSD. Mostly just cosmetic stuff.
Contains also support for page coloring.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_malloc.c6
-rw-r--r--sys/sys/malloc.h7
-rw-r--r--sys/uvm/uvm.h46
-rw-r--r--sys/uvm/uvm_amap.c46
-rw-r--r--sys/uvm/uvm_amap.h44
-rw-r--r--sys/uvm/uvm_amap_i.h9
-rw-r--r--sys/uvm/uvm_anon.c18
-rw-r--r--sys/uvm/uvm_anon.h10
-rw-r--r--sys/uvm/uvm_aobj.c99
-rw-r--r--sys/uvm/uvm_bio.c27
-rw-r--r--sys/uvm/uvm_ddb.h6
-rw-r--r--sys/uvm/uvm_device.c30
-rw-r--r--sys/uvm/uvm_extern.h109
-rw-r--r--sys/uvm/uvm_fault.c182
-rw-r--r--sys/uvm/uvm_fault.h16
-rw-r--r--sys/uvm/uvm_fault_i.h53
-rw-r--r--sys/uvm/uvm_glue.c36
-rw-r--r--sys/uvm/uvm_io.c10
-rw-r--r--sys/uvm/uvm_km.c96
-rw-r--r--sys/uvm/uvm_loan.c48
-rw-r--r--sys/uvm/uvm_map.c458
-rw-r--r--sys/uvm/uvm_map.h157
-rw-r--r--sys/uvm/uvm_map_i.h59
-rw-r--r--sys/uvm/uvm_meter.c23
-rw-r--r--sys/uvm/uvm_mmap.c42
-rw-r--r--sys/uvm/uvm_object.h6
-rw-r--r--sys/uvm/uvm_page.c398
-rw-r--r--sys/uvm/uvm_page.h63
-rw-r--r--sys/uvm/uvm_page_i.h42
-rw-r--r--sys/uvm/uvm_pager.c63
-rw-r--r--sys/uvm/uvm_pager.h25
-rw-r--r--sys/uvm/uvm_pager_i.h10
-rw-r--r--sys/uvm/uvm_param.h22
-rw-r--r--sys/uvm/uvm_pdaemon.c61
-rw-r--r--sys/uvm/uvm_pdaemon.h20
-rw-r--r--sys/uvm/uvm_pglist.c43
-rw-r--r--sys/uvm/uvm_pglist.h12
-rw-r--r--sys/uvm/uvm_pmap.h27
-rw-r--r--sys/uvm/uvm_stat.c8
-rw-r--r--sys/uvm/uvm_stat.h12
-rw-r--r--sys/uvm/uvm_swap.c105
-rw-r--r--sys/uvm/uvm_unix.c30
-rw-r--r--sys/uvm/uvm_user.c6
-rw-r--r--sys/uvm/uvm_vnode.c61
44 files changed, 1395 insertions, 1256 deletions
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 1b84c19fb82..c18f8067f51 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc.c,v 1.41 2001/11/28 16:13:29 art Exp $ */
+/* $OpenBSD: kern_malloc.c,v 1.42 2001/11/28 19:28:14 art Exp $ */
/* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
/*
@@ -46,7 +46,7 @@
#include <uvm/uvm_extern.h>
-static struct vm_map_intrsafe kmem_map_store;
+static struct vm_map kmem_map_store;
struct vm_map *kmem_map = NULL;
int nkmempages;
@@ -429,7 +429,7 @@ kmeminit()
(vsize_t)(npg * sizeof(struct kmemusage)));
kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase,
(vaddr_t *)&kmemlimit, (vsize_t)(npg * PAGE_SIZE),
- VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map);
+ VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
#ifdef KMEMSTATS
for (indx = 0; indx < MINBUCKET + 16; indx++) {
if (1 << indx >= PAGE_SIZE)
diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h
index fcc11001f45..39ece01e3ff 100644
--- a/sys/sys/malloc.h
+++ b/sys/sys/malloc.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: malloc.h,v 1.48 2001/08/18 18:15:52 art Exp $ */
+/* $OpenBSD: malloc.h,v 1.49 2001/11/28 19:28:14 art Exp $ */
/* $NetBSD: malloc.h,v 1.39 1998/07/12 19:52:01 augustss Exp $ */
/*
@@ -93,7 +93,8 @@
#define M_UFSMNT 28 /* UFS mount structure */
#define M_SHM 29 /* SVID compatible shared memory segments */
#define M_VMMAP 30 /* VM map structures */
-/* 31-33 - free */
+#define M_VMPAGE 31 /* VM page structures */
+/* 32-33 - free */
#define M_VMPMAP 34 /* VM pmap */
#define M_VMPVENT 35 /* VM phys-virt mapping entry */
/* 36-37 - free */
@@ -211,7 +212,7 @@
"UFS mount", /* 28 M_UFSMNT */ \
"shm", /* 29 M_SHM */ \
"VM map", /* 30 M_VMMAP */ \
- NULL, /* 31 */ \
+ "VM page", /* 31 M_VMPAGE */ \
NULL, /* 32 */ \
NULL, /* 33 */ \
"VM pmap", /* 34 M_VMPMAP */ \
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h
index f3ff4214dd5..92d420cd160 100644
--- a/sys/uvm/uvm.h
+++ b/sys/uvm/uvm.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm.h,v 1.14 2001/11/10 18:42:31 art Exp $ */
-/* $NetBSD: uvm.h,v 1.24 2000/11/27 08:40:02 chs Exp $ */
+/* $OpenBSD: uvm.h,v 1.15 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm.h,v 1.30 2001/06/27 21:18:34 thorpej Exp $ */
/*
*
@@ -38,6 +38,12 @@
#ifndef _UVM_UVM_H_
#define _UVM_UVM_H_
+#if defined(_KERNEL_OPT)
+#include "opt_lockdebug.h"
+#include "opt_multiprocessor.h"
+#include "opt_uvmhist.h"
+#endif
+
#include <uvm/uvm_extern.h>
#include <uvm/uvm_stat.h>
@@ -77,11 +83,11 @@ struct uvm {
/* vm_page queues */
struct pgfreelist page_free[VM_NFREELIST]; /* unallocated pages */
+ int page_free_nextcolor; /* next color to allocate from */
struct pglist page_active; /* allocated pages, in use */
- struct pglist page_inactive_swp;/* pages inactive (reclaim or free) */
- struct pglist page_inactive_obj;/* pages inactive (reclaim or free) */
- simple_lock_data_t pageqlock; /* lock for active/inactive page q */
- simple_lock_data_t fpageqlock; /* lock for free page q */
+ struct pglist page_inactive; /* pages between the clock hands */
+ struct simplelock pageqlock; /* lock for active/inactive page q */
+ struct simplelock fpageqlock; /* lock for free page q */
boolean_t page_init_done; /* TRUE if uvm_page_init() finished */
boolean_t page_idle_zero; /* TRUE if we should try to zero
pages in the idle loop */
@@ -89,26 +95,26 @@ struct uvm {
/* page daemon trigger */
int pagedaemon; /* daemon sleeps on this */
struct proc *pagedaemon_proc; /* daemon's pid */
- simple_lock_data_t pagedaemon_lock;
+ struct simplelock pagedaemon_lock;
/* aiodone daemon trigger */
int aiodoned; /* daemon sleeps on this */
struct proc *aiodoned_proc; /* daemon's pid */
- simple_lock_data_t aiodoned_lock;
+ struct simplelock aiodoned_lock;
/* page hash */
struct pglist *page_hash; /* page hash table (vp/off->page) */
int page_nhash; /* number of buckets */
int page_hashmask; /* hash mask */
- simple_lock_data_t hashlock; /* lock on page_hash array */
+ struct simplelock hashlock; /* lock on page_hash array */
/* anon stuff */
struct vm_anon *afree; /* anon free list */
- simple_lock_data_t afreelock; /* lock on anon free list */
+ struct simplelock afreelock; /* lock on anon free list */
/* static kernel map entry pool */
- vm_map_entry_t kentry_free; /* free page pool */
- simple_lock_data_t kentry_lock;
+ struct vm_map_entry *kentry_free; /* free page pool */
+ struct simplelock kentry_lock;
/* aio_done is locked by uvm.pagedaemon_lock and splbio! */
TAILQ_HEAD(, buf) aio_done; /* done async i/o reqs */
@@ -118,7 +124,7 @@ struct uvm {
vaddr_t pager_eva; /* end of pager VA area */
/* swap-related items */
- simple_lock_data_t swap_data_lock;
+ struct simplelock swap_data_lock;
/* kernel object: to support anonymous pageable kernel memory */
struct uvm_object *kernel_object;
@@ -165,6 +171,20 @@ do { \
} while (0)
/*
+ * UVM_KICK_PDAEMON: perform checks to determine if we need to
+ * give the pagedaemon a nudge, and do so if necessary.
+ */
+
+#define UVM_KICK_PDAEMON() \
+do { \
+ if (uvmexp.free + uvmexp.paging < uvmexp.freemin || \
+ (uvmexp.free + uvmexp.paging < uvmexp.freetarg && \
+ uvmexp.inactive < uvmexp.inactarg)) { \
+ wakeup(&uvm.pagedaemon); \
+ } \
+} while (/*CONSTCOND*/0)
+
+/*
* UVM_PAGE_OWN: track page ownership (only if UVM_PAGE_TRKOWN)
*/
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 42350cf9e0e..9717ac38a10 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap.c,v 1.15 2001/11/11 01:16:56 art Exp $ */
-/* $NetBSD: uvm_amap.c,v 1.30 2001/02/18 21:19:09 chs Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.16 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.32 2001/06/02 18:09:25 chs Exp $ */
/*
*
@@ -101,7 +101,7 @@ static struct vm_amap *amap_alloc1 __P((int, int, int));
* chunk. note that the "plus one" part is needed because a reference
* count of zero is neither positive or negative (need a way to tell
* if we've got one zero or a bunch of them).
- *
+ *
* here are some in-line functions to help us.
*/
@@ -157,7 +157,7 @@ amap_init()
* Initialize the vm_amap pool.
*/
pool_init(&uvm_amap_pool, sizeof(struct vm_amap), 0, 0, 0,
- "amappl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+ "amappl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
M_UVMAMAP);
}
@@ -283,7 +283,7 @@ amap_free(amap)
*/
void
amap_extend(entry, addsize)
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
vsize_t addsize;
{
struct vm_amap *amap = entry->aref.ar_amap;
@@ -324,7 +324,7 @@ amap_extend(entry, addsize)
}
#endif
amap_unlock(amap);
- UVMHIST_LOG(maphist,"<- done (case 1), amap = 0x%x, sltneed=%d",
+ UVMHIST_LOG(maphist,"<- done (case 1), amap = 0x%x, sltneed=%d",
amap, slotneed, 0, 0);
return; /* done! */
}
@@ -337,10 +337,10 @@ amap_extend(entry, addsize)
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if ((slotoff + slotmapped) < amap->am_nslot)
- amap_pp_adjref(amap, slotoff + slotmapped,
+ amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)),
1);
- pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
+ pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
}
#endif
@@ -350,7 +350,7 @@ amap_extend(entry, addsize)
* no need to zero am_anon since that was done at
* alloc time and we never shrink an allocation.
*/
- UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, slotneed=%d",
+ UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, slotneed=%d",
amap, slotneed, 0, 0);
return;
}
@@ -359,7 +359,7 @@ amap_extend(entry, addsize)
* case 3: we need to malloc a new amap and copy all the amap
* data over from old amap to the new one.
*
- * XXXCDC: could we take advantage of a kernel realloc()?
+ * XXXCDC: could we take advantage of a kernel realloc()?
*/
amap_unlock(amap); /* unlock in case we sleep in malloc */
@@ -412,7 +412,7 @@ amap_extend(entry, addsize)
memset(newppref + amap->am_nslot, 0, sizeof(int) * slotadded);
amap->am_ppref = newppref;
if ((slotoff + slotmapped) < amap->am_nslot)
- amap_pp_adjref(amap, slotoff + slotmapped,
+ amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)), 1);
pp_setreflen(newppref, amap->am_nslot, 1, slotadded);
}
@@ -433,7 +433,7 @@ amap_extend(entry, addsize)
if (oldppref && oldppref != PPREF_NONE)
free(oldppref, M_UVMAMAP);
#endif
- UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d",
+ UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d",
amap, slotneed, 0, 0);
}
@@ -452,7 +452,7 @@ amap_extend(entry, addsize)
*/
void
amap_share_protect(entry, prot)
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
vm_prot_t prot;
{
struct vm_amap *amap = entry->aref.ar_amap;
@@ -489,7 +489,7 @@ amap_share_protect(entry, prot)
/*
* amap_wipeout: wipeout all anon's in an amap; then free the amap!
*
- * => called from amap_unref when the final reference to an amap is
+ * => called from amap_unref when the final reference to an amap is
* discarded (i.e. when reference count == 1)
* => the amap should be locked (by the caller)
*/
@@ -511,12 +511,12 @@ amap_wipeout(amap)
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
- if (anon == NULL || anon->an_ref == 0)
+ if (anon == NULL || anon->an_ref == 0)
panic("amap_wipeout: corrupt amap");
simple_lock(&anon->an_lock); /* lock anon */
- UVMHIST_LOG(maphist," processing anon 0x%x, ref=%d", anon,
+ UVMHIST_LOG(maphist," processing anon 0x%x, ref=%d", anon,
anon->an_ref, 0, 0);
refs = --anon->an_ref;
@@ -542,7 +542,7 @@ amap_wipeout(amap)
/*
* amap_copy: ensure that a map entry's "needs_copy" flag is false
* by copying the amap if necessary.
- *
+ *
* => an entry with a null amap pointer will get a new (blank) one.
* => the map that the map entry belongs to must be locked by caller.
* => the amap currently attached to "entry" (if any) must be unlocked.
@@ -555,8 +555,8 @@ amap_wipeout(amap)
void
amap_copy(map, entry, waitf, canchunk, startva, endva)
- vm_map_t map;
- vm_map_entry_t entry;
+ struct vm_map *map;
+ struct vm_map_entry *entry;
int waitf;
boolean_t canchunk;
vaddr_t startva, endva;
@@ -595,7 +595,7 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
UVM_MAP_CLIP_END(map, entry, endva);
}
- UVMHIST_LOG(maphist, "<- done [creating new amap 0x%x->0x%x]",
+ UVMHIST_LOG(maphist, "<- done [creating new amap 0x%x->0x%x]",
entry->start, entry->end, 0, 0);
entry->aref.ar_pageoff = 0;
entry->aref.ar_amap = amap_alloc(entry->end - entry->start, 0,
@@ -626,7 +626,7 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
* looks like we need to copy the map.
*/
- UVMHIST_LOG(maphist," amap=%p, ref=%d, must copy it",
+ UVMHIST_LOG(maphist," amap=%p, ref=%d, must copy it",
entry->aref.ar_amap, entry->aref.ar_amap->am_ref, 0, 0);
AMAP_B2SLOT(slots, entry->end - entry->start);
amap = amap_alloc1(slots, 0, waitf);
@@ -683,7 +683,7 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
srcamap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
#ifdef UVM_AMAP_PPREF
if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
- amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
+ amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
(entry->end - entry->start) >> PAGE_SHIFT, -1);
}
#endif
@@ -813,7 +813,7 @@ ReStart:
uvm_wait("cownowpage");
goto ReStart;
}
-
+
/*
* got it... now we can copy the data and replace anon
* with our new one...
diff --git a/sys/uvm/uvm_amap.h b/sys/uvm/uvm_amap.h
index 4bdf119d927..e6b071d5b63 100644
--- a/sys/uvm/uvm_amap.h
+++ b/sys/uvm/uvm_amap.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap.h,v 1.8 2001/11/11 01:16:56 art Exp $ */
-/* $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $ */
+/* $OpenBSD: uvm_amap.h,v 1.9 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_amap.h,v 1.17 2001/06/02 18:09:25 chs Exp $ */
/*
*
@@ -60,7 +60,7 @@
/*
* forward definition of vm_amap structure. only amap
* implementation-specific code should directly access the fields of
- * this structure.
+ * this structure.
*/
struct vm_amap;
@@ -72,13 +72,13 @@ struct vm_amap;
#ifdef UVM_AMAP_INLINE /* defined/undef'd in uvm_amap.c */
#define AMAP_INLINE static __inline /* inline enabled */
-#else
+#else
#define AMAP_INLINE /* inline disabled */
#endif /* UVM_AMAP_INLINE */
/*
- * prototypes for the amap interface
+ * prototypes for the amap interface
*/
AMAP_INLINE
@@ -88,16 +88,16 @@ void amap_add /* add an anon to an amap */
struct vm_amap *amap_alloc /* allocate a new amap */
__P((vaddr_t, vaddr_t, int));
void amap_copy /* clear amap needs-copy flag */
- __P((vm_map_t, vm_map_entry_t, int,
+ __P((struct vm_map *, struct vm_map_entry *, int,
boolean_t, vaddr_t, vaddr_t));
void amap_cow_now /* resolve all COW faults now */
- __P((vm_map_t, vm_map_entry_t));
+ __P((struct vm_map *, struct vm_map_entry *));
void amap_extend /* make amap larger */
- __P((vm_map_entry_t, vsize_t));
+ __P((struct vm_map_entry *, vsize_t));
int amap_flags /* get amap's flags */
__P((struct vm_amap *));
void amap_free /* free amap */
- __P((struct vm_amap *));
+ __P((struct vm_amap *));
void amap_init /* init amap module (at boot time) */
__P((void));
void amap_lock /* lock amap */
@@ -107,7 +107,7 @@ struct vm_anon *amap_lookup /* lookup an anon @ offset in amap */
__P((struct vm_aref *, vaddr_t));
AMAP_INLINE
void amap_lookups /* lookup multiple anons */
- __P((struct vm_aref *, vaddr_t,
+ __P((struct vm_aref *, vaddr_t,
struct vm_anon **, int));
AMAP_INLINE
void amap_ref /* add a reference to an amap */
@@ -115,9 +115,9 @@ void amap_ref /* add a reference to an amap */
int amap_refs /* get number of references of amap */
__P((struct vm_amap *));
void amap_share_protect /* protect pages in a shared amap */
- __P((vm_map_entry_t, vm_prot_t));
+ __P((struct vm_map_entry *, vm_prot_t));
void amap_splitref /* split reference to amap into two */
- __P((struct vm_aref *, struct vm_aref *,
+ __P((struct vm_aref *, struct vm_aref *,
vaddr_t));
AMAP_INLINE
void amap_unadd /* remove an anon from an amap */
@@ -159,7 +159,7 @@ void amap_wipeout /* remove all anons from amap */
*/
struct vm_amap {
- simple_lock_data_t am_l; /* simple lock [locks all vm_amap fields] */
+ struct simplelock am_l; /* simple lock [locks all vm_amap fields] */
int am_ref; /* reference count */
int am_flags; /* flags */
int am_maxslot; /* max # of slots allocated */
@@ -177,7 +177,7 @@ struct vm_amap {
* note that am_slots, am_bckptr, and am_anon are arrays. this allows
* fast lookup of pages based on their virual address at the expense of
* some extra memory. in the future we should be smarter about memory
- * usage and fall back to a non-array based implementation on systems
+ * usage and fall back to a non-array based implementation on systems
* that are short of memory (XXXCDC).
*
* the entries in the array are called slots... for example an amap that
@@ -185,13 +185,13 @@ struct vm_amap {
* is an example of the array usage for a four slot amap. note that only
* slots one and three have anons assigned to them. "D/C" means that we
* "don't care" about the value.
- *
+ *
* 0 1 2 3
* am_anon: NULL, anon0, NULL, anon1 (actual pointers to anons)
* am_bckptr: D/C, 1, D/C, 0 (points to am_slots entry)
*
* am_slots: 3, 1, D/C, D/C (says slots 3 and 1 are in use)
- *
+ *
* note that am_bckptr is D/C if the slot in am_anon is set to NULL.
* to find the entry in am_slots for an anon, look at am_bckptr[slot],
* thus the entry for slot 3 in am_slots[] is at am_slots[am_bckptr[3]].
@@ -203,7 +203,7 @@ struct vm_amap {
/*
* defines for handling of large sparce amaps:
- *
+ *
* one of the problems of array-based amaps is that if you allocate a
* large sparcely-used area of virtual memory you end up allocating
* large arrays that, for the most part, don't get used. this is a
@@ -216,15 +216,15 @@ struct vm_amap {
* it makes sense for it to live in an amap, but if we allocated an
* amap for the entire stack range we could end up wasting a large
* amount of malloc'd KVM.
- *
- * for example, on the i386 at boot time we allocate two amaps for the stack
- * of /sbin/init:
+ *
+ * for example, on the i386 at boot time we allocate two amaps for the stack
+ * of /sbin/init:
* 1. a 7680 slot amap at protection 0 (reserve space for stack)
* 2. a 512 slot amap at protection 7 (top of stack)
*
- * most of the array allocated for the amaps for this is never used.
+ * most of the array allocated for the amaps for this is never used.
* the amap interface provides a way for us to avoid this problem by
- * allowing amap_copy() to break larger amaps up into smaller sized
+ * allowing amap_copy() to break larger amaps up into smaller sized
* chunks (controlled by the "canchunk" option). we use this feature
* to reduce our memory usage with the BSD stack management. if we
* are asked to create an amap with more than UVM_AMAP_LARGE slots in it,
diff --git a/sys/uvm/uvm_amap_i.h b/sys/uvm/uvm_amap_i.h
index adcdc76072b..c88f7916bae 100644
--- a/sys/uvm/uvm_amap_i.h
+++ b/sys/uvm/uvm_amap_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap_i.h,v 1.10 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_amap_i.h,v 1.15 2000/11/25 06:27:59 chs Exp $ */
+/* $OpenBSD: uvm_amap_i.h,v 1.11 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_amap_i.h,v 1.17 2001/05/25 04:06:11 chs Exp $ */
/*
*
@@ -109,10 +109,9 @@ amap_lookups(aref, offset, anons, npages)
/*
* amap_add: add (or replace) a page to an amap
*
- * => caller must lock amap.
+ * => caller must lock amap.
* => if (replace) caller must lock anon because we might have to call
* pmap_page_protect on the anon's page.
- * => returns an "offset" which is meaningful to amap_unadd().
*/
AMAP_INLINE void
amap_add(aref, offset, anon, replace)
@@ -135,7 +134,7 @@ amap_add(aref, offset, anon, replace)
if (amap->am_anon[slot] == NULL)
panic("amap_add: replacing null anon");
- if (amap->am_anon[slot]->u.an_page != NULL &&
+ if (amap->am_anon[slot]->u.an_page != NULL &&
(amap->am_flags & AMAP_SHARED) != 0) {
pmap_page_protect(amap->am_anon[slot]->u.an_page,
VM_PROT_NONE);
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index 4089ca48991..b05abc32642 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_anon.c,v 1.17 2001/11/28 13:47:39 art Exp $ */
-/* $NetBSD: uvm_anon.c,v 1.16 2001/03/10 22:46:47 chs Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.18 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_anon.c,v 1.17 2001/05/25 04:06:12 chs Exp $ */
/*
*
@@ -116,7 +116,7 @@ uvm_anon_add(count)
anonblock->anons = anon;
LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
memset(anon, 0, sizeof(*anon) * needed);
-
+
simple_lock(&uvm.afreelock);
uvmexp.nanon += needed;
uvmexp.nfreeanon += needed;
@@ -214,7 +214,7 @@ uvm_anfree(anon)
if (pg) {
/*
- * if the page is owned by a uobject (now locked), then we must
+ * if the page is owned by a uobject (now locked), then we must
* kill the loan on the page rather than free it.
*/
@@ -240,10 +240,10 @@ uvm_anfree(anon)
/* tell them to dump it when done */
pg->flags |= PG_RELEASED;
UVMHIST_LOG(maphist,
- " anon 0x%x, page 0x%x: BUSY (released!)",
+ " anon 0x%x, page 0x%x: BUSY (released!)",
anon, pg, 0, 0);
return;
- }
+ }
pmap_page_protect(pg, VM_PROT_NONE);
uvm_lock_pageq(); /* lock out pagedaemon */
uvm_pagefree(pg); /* bye bye */
@@ -272,7 +272,7 @@ uvm_anfree(anon)
/*
* uvm_anon_dropswap: release any swap resources from this anon.
- *
+ *
* => anon must be locked or have a reference count of 0.
*/
void
@@ -294,7 +294,7 @@ uvm_anon_dropswap(anon)
simple_lock(&uvm.swap_data_lock);
uvmexp.swpgonly--;
simple_unlock(&uvm.swap_data_lock);
- }
+ }
}
/*
@@ -398,7 +398,7 @@ uvm_anon_lockloanpg(anon)
/*
* page in every anon that is paged out to a range of swslots.
- *
+ *
* swap_syscall_lock should be held (protects anonblock_list).
*/
diff --git a/sys/uvm/uvm_anon.h b/sys/uvm/uvm_anon.h
index 77173d8014a..1dc9ff7b566 100644
--- a/sys/uvm/uvm_anon.h
+++ b/sys/uvm/uvm_anon.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_anon.h,v 1.8 2001/11/11 01:16:56 art Exp $ */
-/* $NetBSD: uvm_anon.h,v 1.13 2000/12/27 09:17:04 chs Exp $ */
+/* $OpenBSD: uvm_anon.h,v 1.9 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_anon.h,v 1.15 2001/05/26 16:32:46 chs Exp $ */
/*
*
@@ -50,12 +50,12 @@
struct vm_anon {
int an_ref; /* reference count [an_lock] */
- simple_lock_data_t an_lock; /* lock for an_ref */
+ struct simplelock an_lock; /* lock for an_ref */
union {
struct vm_anon *an_nxt; /* if on free list [afreelock] */
struct vm_page *an_page;/* if in RAM [an_lock] */
} u;
- int an_swslot; /* drum swap slot # (if != 0)
+ int an_swslot; /* drum swap slot # (if != 0)
[an_lock. also, it is ok to read
an_swslot if we hold an_page PG_BUSY] */
};
@@ -79,7 +79,7 @@ struct vm_anon {
*/
/*
- * processes reference anonymous virtual memory maps with an anonymous
+ * processes reference anonymous virtual memory maps with an anonymous
* reference structure:
*/
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 0d7d7c3aa3f..924769d66bf 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_aobj.c,v 1.22 2001/11/28 13:47:39 art Exp $ */
-/* $NetBSD: uvm_aobj.c,v 1.40 2001/03/10 22:46:47 chs Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.23 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.45 2001/06/23 20:52:03 chs Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -174,7 +174,7 @@ static boolean_t uao_flush __P((struct uvm_object *,
voff_t, voff_t, int));
static void uao_free __P((struct uvm_aobj *));
static int uao_get __P((struct uvm_object *, voff_t,
- vm_page_t *, int *, int,
+ struct vm_page **, int *, int,
vm_prot_t, int, int));
static boolean_t uao_releasepg __P((struct vm_page *,
struct vm_page **));
@@ -183,7 +183,7 @@ static boolean_t uao_pagein_page __P((struct uvm_aobj *, int));
/*
* aobj_pager
- *
+ *
* note that some functions (e.g. put) are handled elsewhere
*/
@@ -205,7 +205,7 @@ struct uvm_pagerops aobj_pager = {
*/
static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
-static simple_lock_data_t uao_list_lock;
+static struct simplelock uao_list_lock;
/*
@@ -233,38 +233,41 @@ uao_find_swhash_elt(aobj, pageidx, create)
struct uao_swhash_elt *elt;
voff_t page_tag;
- swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
- page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
+ swhash = UAO_SWHASH_HASH(aobj, pageidx);
+ page_tag = UAO_SWHASH_ELT_TAG(pageidx);
/*
* now search the bucket for the requested tag
*/
+
LIST_FOREACH(elt, swhash, list) {
- if (elt->tag == page_tag)
- return(elt);
+ if (elt->tag == page_tag) {
+ return elt;
+ }
}
-
- /* fail now if we are not allowed to create a new entry in the bucket */
- if (!create)
+ if (!create) {
return NULL;
-
+ }
/*
* allocate a new entry for the bucket and init/insert it in
*/
- elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
+
+ elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
+ if (elt == NULL) {
+ return NULL;
+ }
LIST_INSERT_HEAD(swhash, elt, list);
elt->tag = page_tag;
elt->count = 0;
memset(elt->slots, 0, sizeof(elt->slots));
-
- return(elt);
+ return elt;
}
/*
* uao_find_swslot: find the swap slot number for an aobj/pageidx
*
- * => object must be locked by caller
+ * => object must be locked by caller
*/
__inline static int
uao_find_swslot(aobj, pageidx)
@@ -293,7 +296,7 @@ uao_find_swslot(aobj, pageidx)
return(0);
}
- /*
+ /*
* otherwise, look in the array
*/
return(aobj->u_swslots[pageidx]);
@@ -304,6 +307,8 @@ uao_find_swslot(aobj, pageidx)
*
* => setting a slot to zero frees the slot
* => object must be locked by caller
+ * => we return the old slot number, or -1 if we failed to allocate
+ * memory to record the new slot number
*/
int
uao_set_swslot(uobj, pageidx, slot)
@@ -311,6 +316,7 @@ uao_set_swslot(uobj, pageidx, slot)
int pageidx, slot;
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
+ struct uao_swhash_elt *elt;
int oldslot;
UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
@@ -342,11 +348,9 @@ uao_set_swslot(uobj, pageidx, slot)
* we are freeing.
*/
- struct uao_swhash_elt *elt =
- uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
+ elt = uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
if (elt == NULL) {
- KASSERT(slot == 0);
- return (0);
+ return slot ? -1 : 0;
}
oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
@@ -361,8 +365,8 @@ uao_set_swslot(uobj, pageidx, slot)
if (slot) {
if (oldslot == 0)
elt->count++;
- } else { /* freeing slot ... */
- if (oldslot) /* to be safe */
+ } else {
+ if (oldslot)
elt->count--;
if (elt->count == 0) {
@@ -370,7 +374,7 @@ uao_set_swslot(uobj, pageidx, slot)
pool_put(&uao_swhash_elt_pool, elt);
}
}
- } else {
+ } else {
/* we are using an array */
oldslot = aobj->u_swslots[pageidx];
aobj->u_swslots[pageidx] = slot;
@@ -626,7 +630,7 @@ uao_reference_locked(uobj)
return;
uobj->uo_refs++; /* bump! */
- UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
+ UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
uobj, uobj->uo_refs,0,0);
}
@@ -659,7 +663,7 @@ uao_detach_locked(uobj)
struct uvm_object *uobj;
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
- struct vm_page *pg;
+ struct vm_page *pg, *nextpg;
boolean_t busybody;
UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
@@ -691,9 +695,8 @@ uao_detach_locked(uobj)
* mark for release any that are.
*/
busybody = FALSE;
- for (pg = TAILQ_FIRST(&uobj->memq);
- pg != NULL;
- pg = TAILQ_NEXT(pg, listq)) {
+ for (pg = TAILQ_FIRST(&uobj->memq); pg != NULL; pg = nextpg) {
+ nextpg = TAILQ_NEXT(pg, listq);
if (pg->flags & PG_BUSY) {
pg->flags |= PG_RELEASED;
busybody = TRUE;
@@ -861,7 +864,7 @@ uao_flush(uobj, start, stop, flags)
if (pp == NULL)
continue;
}
-
+
switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
/*
* XXX In these first 3 cases, we always just
@@ -955,7 +958,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
{
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
voff_t current_offset;
- vm_page_t ptmp;
+ struct vm_page *ptmp;
int lcv, gotpages, maxpages, swslot, rv, pageidx;
boolean_t done;
UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
@@ -1014,7 +1017,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
if (lcv == centeridx ||
(flags & PGO_ALLPAGES) != 0)
/* need to do a wait or I/O! */
- done = FALSE;
+ done = FALSE;
continue;
}
@@ -1023,7 +1026,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
* result array
*/
/* caller must un-busy this page */
- ptmp->flags |= PG_BUSY;
+ ptmp->flags |= PG_BUSY;
UVM_PAGE_OWN(ptmp, "uao_get1");
pps[lcv] = ptmp;
gotpages++;
@@ -1040,7 +1043,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
*npagesp = gotpages;
if (done)
/* bingo! */
- return(0);
+ return(0);
else
/* EEK! Need to unlock and I/O */
return(EBUSY);
@@ -1100,7 +1103,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
uvm_wait("uao_getpage");
simple_lock(&uobj->vmobjlock);
/* goto top of pps while loop */
- continue;
+ continue;
}
/*
@@ -1109,7 +1112,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
*/
ptmp->pqflags |= PQ_AOBJ;
- /*
+ /*
* got new page ready for I/O. break pps while
* loop. pps[lcv] is still NULL.
*/
@@ -1127,8 +1130,8 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
simple_lock(&uobj->vmobjlock);
continue; /* goto top of pps while loop */
}
-
- /*
+
+ /*
* if we get here then the page has become resident and
* unbusy between steps 1 and 2. we busy it now (so we
* own it) and set pps[lcv] (so that we exit the while
@@ -1148,7 +1151,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
continue; /* next lcv */
/*
- * we have a "fake/busy/clean" page that we just allocated.
+ * we have a "fake/busy/clean" page that we just allocated.
* do the needed "i/o", either reading from swap or zeroing.
*/
swslot = uao_find_swslot(aobj, pageidx);
@@ -1192,7 +1195,9 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
*/
swslot = uao_set_swslot(&aobj->u_obj, pageidx,
SWSLOT_BAD);
- uvm_swap_markbad(swslot, 1);
+ if (swslot != -1) {
+ uvm_swap_markbad(swslot, 1);
+ }
ptmp->flags &= ~(PG_WANTED|PG_BUSY);
UVM_PAGE_OWN(ptmp, NULL);
@@ -1205,10 +1210,10 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
}
}
- /*
+ /*
* we got the page! clear the fake flag (indicates valid
* data now in page) and plug into our result array. note
- * that page is still busy.
+ * that page is still busy.
*
* it is the callers job to:
* => check if the page is released
@@ -1233,7 +1238,7 @@ uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
/*
* uao_releasepg: handle released page in an aobj
- *
+ *
* => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
* to dispose of.
* => caller must handle PG_WANTED case
@@ -1294,7 +1299,7 @@ uao_releasepg(pg, nextpgp)
/*
* uao_dropswap: release any swap resources from this aobj page.
- *
+ *
* => aobj must be locked or have a reference count of 0.
*/
@@ -1314,7 +1319,7 @@ uao_dropswap(uobj, pageidx)
/*
* page in every page in every aobj that is paged-out to a range of swslots.
- *
+ *
* => nothing should be locked.
* => returns TRUE if pagein was aborted due to lack of memory.
*/
@@ -1415,7 +1420,7 @@ restart:
/*
* if the slot isn't in range, skip it.
*/
- if (slot < startslot ||
+ if (slot < startslot ||
slot >= endslot) {
continue;
}
diff --git a/sys/uvm/uvm_bio.c b/sys/uvm/uvm_bio.c
index 9ba758f2680..571d0932e7c 100644
--- a/sys/uvm/uvm_bio.c
+++ b/sys/uvm/uvm_bio.c
@@ -1,6 +1,6 @@
-/* $NetBSD: uvm_bio.c,v 1.11 2001/03/19 00:29:04 chs Exp $ */
+/* $NetBSD: uvm_bio.c,v 1.16 2001/07/18 16:44:39 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1998 Chuck Silvers.
* All rights reserved.
*
@@ -51,9 +51,8 @@
* local functions
*/
-static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
- vm_page_t *, int, int, vm_fault_t, vm_prot_t,
- int));
+static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
+ struct vm_page **, int, int, vm_fault_t, vm_prot_t, int));
static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
/*
@@ -129,6 +128,12 @@ ubc_init(void)
int i;
/*
+ * Make sure ubc_winshift is sane.
+ */
+ if (ubc_winshift < PAGE_SHIFT)
+ ubc_winshift = PAGE_SHIFT;
+
+ /*
* init ubc_object.
* alloc and init ubc_map's.
* init inactive queues.
@@ -146,7 +151,7 @@ ubc_init(void)
M_TEMP, M_NOWAIT);
if (ubc_object.umap == NULL)
panic("ubc_init: failed to allocate ubc_map");
- bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
+ memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
va = (vaddr_t)1L;
#ifdef PMAP_PREFER
@@ -197,7 +202,7 @@ int
ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
struct uvm_faultinfo *ufi;
vaddr_t ign1;
- vm_page_t *ign2;
+ struct vm_page **ign2;
int ign3, ign4;
vm_fault_t fault_type;
vm_prot_t access_type;
@@ -329,6 +334,7 @@ again:
UVM_PAGE_OWN(pg, NULL);
}
simple_unlock(&uobj->vmobjlock);
+ pmap_update();
return 0;
}
@@ -414,6 +420,7 @@ again:
va = (vaddr_t)(ubc_object.kva +
((umap - ubc_object.umap) << ubc_winshift));
pmap_remove(pmap_kernel(), va, va + ubc_winsize);
+ pmap_update();
}
if (umap->refcount == 0) {
@@ -485,6 +492,7 @@ ubc_release(va, wlen)
va = (vaddr_t)(ubc_object.kva +
((umap - ubc_object.umap) << ubc_winshift));
pmap_remove(pmap_kernel(), va, va + ubc_winsize);
+ pmap_update();
LIST_REMOVE(umap, hash);
umap->uobj = NULL;
TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
@@ -517,13 +525,13 @@ ubc_flush(uobj, start, end)
UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
uobj, start, end,0);
- s = splbio();
+ s = splbio();
simple_lock(&ubc_object.uobj.vmobjlock);
for (umap = ubc_object.umap;
umap < &ubc_object.umap[ubc_nwins];
umap++) {
- if (umap->uobj != uobj ||
+ if (umap->uobj != uobj ||
umap->offset < start ||
(umap->offset >= end && end != 0) ||
umap->refcount > 0) {
@@ -538,6 +546,7 @@ ubc_flush(uobj, start, end)
va = (vaddr_t)(ubc_object.kva +
((umap - ubc_object.umap) << ubc_winshift));
pmap_remove(pmap_kernel(), va, va + ubc_winsize);
+ pmap_update();
LIST_REMOVE(umap, hash);
umap->uobj = NULL;
diff --git a/sys/uvm/uvm_ddb.h b/sys/uvm/uvm_ddb.h
index e80d8cf3db6..f2de2a1c9e8 100644
--- a/sys/uvm/uvm_ddb.h
+++ b/sys/uvm/uvm_ddb.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_ddb.h,v 1.7 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_ddb.h,v 1.5 2000/11/25 06:27:59 chs Exp $ */
+/* $OpenBSD: uvm_ddb.h,v 1.8 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_ddb.h,v 1.7 2001/06/02 18:09:26 chs Exp $ */
/*
*
@@ -41,7 +41,7 @@
#ifdef _KERNEL
#ifdef DDB
-void uvm_map_printit __P((vm_map_t, boolean_t,
+void uvm_map_printit __P((struct vm_map *, boolean_t,
int (*) __P((const char *, ...))));
void uvm_object_printit __P((struct uvm_object *, boolean_t,
int (*) __P((const char *, ...))));
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index 2704d728c7d..b03086bc5c7 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_device.c,v 1.18 2001/11/28 13:47:39 art Exp $ */
-/* $NetBSD: uvm_device.c,v 1.32 2001/03/15 06:10:56 chs Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.19 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_device.c,v 1.36 2001/05/26 21:27:21 chs Exp $ */
/*
*
@@ -57,7 +57,7 @@
LIST_HEAD(udv_list_struct, uvm_device);
static struct udv_list_struct udv_list;
-static simple_lock_data_t udv_lock;
+static struct simplelock udv_lock;
/*
* functions
@@ -67,7 +67,7 @@ static void udv_init __P((void));
static void udv_reference __P((struct uvm_object *));
static void udv_detach __P((struct uvm_object *));
static int udv_fault __P((struct uvm_faultinfo *, vaddr_t,
- vm_page_t *, int, int, vm_fault_t,
+ struct vm_page **, int, int, vm_fault_t,
vm_prot_t, int));
static boolean_t udv_flush __P((struct uvm_object *, voff_t, voff_t,
int));
@@ -145,7 +145,7 @@ udv_attach(arg, accessprot, off, size)
/*
* Check that the specified range of the device allows the
* desired protection.
- *
+ *
* XXX assumes VM_PROT_* == PROT_*
* XXX clobbers off and size, but nothing else here needs them.
*/
@@ -163,7 +163,7 @@ udv_attach(arg, accessprot, off, size)
for (;;) {
/*
- * first, attempt to find it on the main list
+ * first, attempt to find it on the main list
*/
simple_lock(&udv_lock);
@@ -259,7 +259,7 @@ udv_attach(arg, accessprot, off, size)
}
/*NOTREACHED*/
}
-
+
/*
* udv_reference
*
@@ -278,7 +278,7 @@ udv_reference(uobj)
simple_lock(&uobj->vmobjlock);
uobj->uo_refs++;
- UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
+ UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
uobj, uobj->uo_refs,0,0);
simple_unlock(&uobj->vmobjlock);
}
@@ -306,7 +306,7 @@ again:
if (uobj->uo_refs > 1) {
uobj->uo_refs--;
simple_unlock(&uobj->vmobjlock);
- UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
+ UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
uobj,uobj->uo_refs,0,0);
return;
}
@@ -374,7 +374,7 @@ static int
udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
struct uvm_faultinfo *ufi;
vaddr_t vaddr;
- vm_page_t *pps;
+ struct vm_page **pps;
int npages, centeridx, flags;
vm_fault_t fault_type;
vm_prot_t access_type;
@@ -396,16 +396,16 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
* we do not allow device mappings to be mapped copy-on-write
* so we kill any attempt to do so here.
*/
-
+
if (UVM_ET_ISCOPYONWRITE(entry)) {
- UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
+ UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
entry->etype, 0,0,0);
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
return(EIO);
}
/*
- * get device map function.
+ * get device map function.
*/
device = udv->u_device;
@@ -422,7 +422,7 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
curr_offset = entry->offset + (vaddr - entry->start);
/* pmap va = vaddr (virtual address of pps[0]) */
curr_va = vaddr;
-
+
/*
* loop over the page range entering in as needed
*/
@@ -460,11 +460,13 @@ udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
*/
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
uobj, NULL);
+ pmap_update(); /* sync what we have so far */
uvm_wait("udv_fault");
return (ERESTART);
}
}
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
+ pmap_update();
return (retval);
}
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 3e3d255a5a9..e2101e71378 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.35 2001/11/28 13:47:39 art Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.58 2001/03/15 06:10:56 chs Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.36 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.65 2001/06/02 18:09:26 chs Exp $ */
/*
*
@@ -88,24 +88,12 @@
* typedefs, necessary for standard UVM headers.
*/
-typedef unsigned int uvm_flag_t;
+typedef unsigned int uvm_flag_t;
typedef int vm_fault_t;
typedef int vm_inherit_t; /* XXX: inheritance codes */
typedef off_t voff_t; /* XXX: offset within a uvm_object */
-union vm_map_object;
-typedef union vm_map_object vm_map_object_t;
-
-struct vm_map_entry;
-typedef struct vm_map_entry *vm_map_entry_t;
-
-struct vm_map;
-typedef struct vm_map *vm_map_t;
-
-struct vm_page;
-typedef struct vm_page *vm_page_t;
-
/*
* defines
*/
@@ -263,6 +251,9 @@ struct pmap;
struct vnode;
struct pool;
struct simplelock;
+struct vm_map_entry;
+struct vm_map;
+struct vm_page;
extern struct pool *uvm_aiobuf_pool;
@@ -285,6 +276,9 @@ struct uvmexp {
int paging; /* number of pages in the process of being paged out */
int wired; /* number of wired pages */
+ int ncolors; /* number of page color buckets: must be p-o-2 */
+ int colormask; /* color bucket mask */
+
int zeropages; /* number of zero'd pages */
int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
int reserve_kernel; /* number of pages reserved for kernel */
@@ -336,6 +330,8 @@ struct uvmexp {
not available */
int zeroaborts; /* number of times page zeroing was
aborted */
+ int colorhit; /* pagealloc where we got optimal color */
+ int colormiss; /* pagealloc where we didn't */
/* fault subcounters */
int fltnoram; /* number of times fault was out of ram */
@@ -407,7 +403,7 @@ struct vmspace {
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
/* we copy from vm_startcopy to the end of the structure on fork */
#define vm_startcopy vm_rssize
- segsz_t vm_rssize; /* current resident set size in pages */
+ segsz_t vm_rssize; /* current resident set size in pages */
segsz_t vm_swrss; /* resident set size before last swap */
segsz_t vm_tsize; /* text size (pages) XXX */
segsz_t vm_dsize; /* data size (pages) XXX */
@@ -429,7 +425,6 @@ extern struct vm_map *kmem_map;
extern struct vm_map *mb_map;
extern struct vm_map *phys_map;
-
/*
* macros
*/
@@ -440,11 +435,7 @@ extern struct vm_map *phys_map;
#endif /* _KERNEL */
-#ifdef pmap_resident_count
#define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
-#else
-#define vm_resident_count(vm) ((vm)->vm_rssize)
-#endif
/* XXX clean up later */
struct buf;
@@ -487,7 +478,7 @@ void ubc_release __P((void *, vsize_t));
void ubc_flush __P((struct uvm_object *, voff_t, voff_t));
/* uvm_fault.c */
-int uvm_fault __P((vm_map_t, vaddr_t, vm_fault_t,
+int uvm_fault __P((struct vm_map *, vaddr_t, vm_fault_t,
vm_prot_t));
/* handle a page fault */
@@ -509,45 +500,49 @@ void uvm_vsunlock __P((struct proc *, caddr_t, size_t));
/* uvm_init.c */
-void uvm_init __P((void));
+void uvm_init __P((void));
/* init the uvm system */
/* uvm_io.c */
-int uvm_io __P((vm_map_t, struct uio *));
+int uvm_io __P((struct vm_map *, struct uio *));
/* uvm_km.c */
-vaddr_t uvm_km_alloc1 __P((vm_map_t, vsize_t, boolean_t));
-void uvm_km_free __P((vm_map_t, vaddr_t, vsize_t));
-void uvm_km_free_wakeup __P((vm_map_t, vaddr_t,
- vsize_t));
-vaddr_t uvm_km_kmemalloc __P((vm_map_t, struct uvm_object *,
- vsize_t, int));
-struct vm_map *uvm_km_suballoc __P((vm_map_t, vaddr_t *,
- vaddr_t *, vsize_t, int,
- boolean_t, vm_map_t));
-vaddr_t uvm_km_valloc __P((vm_map_t, vsize_t));
-vaddr_t uvm_km_valloc_align __P((vm_map_t, vsize_t, vsize_t));
-vaddr_t uvm_km_valloc_wait __P((vm_map_t, vsize_t));
-vaddr_t uvm_km_valloc_prefer_wait __P((vm_map_t, vsize_t,
- voff_t));
-vaddr_t uvm_km_alloc_poolpage1 __P((vm_map_t,
- struct uvm_object *, boolean_t));
-void uvm_km_free_poolpage1 __P((vm_map_t, vaddr_t));
-
-#define uvm_km_alloc_poolpage(waitok) uvm_km_alloc_poolpage1(kmem_map, \
- uvmexp.kmem_object, (waitok))
-#define uvm_km_free_poolpage(addr) uvm_km_free_poolpage1(kmem_map, (addr))
+vaddr_t uvm_km_alloc1 __P((struct vm_map *, vsize_t,
+ boolean_t));
+void uvm_km_free __P((struct vm_map *, vaddr_t, vsize_t));
+void uvm_km_free_wakeup __P((struct vm_map *, vaddr_t,
+ vsize_t));
+vaddr_t uvm_km_kmemalloc __P((struct vm_map *, struct
+ uvm_object *, vsize_t, int));
+struct vm_map *uvm_km_suballoc __P((struct vm_map *, vaddr_t *,
+ vaddr_t *, vsize_t, int, boolean_t,
+ struct vm_map *));
+vaddr_t uvm_km_valloc __P((struct vm_map *, vsize_t));
+vaddr_t uvm_km_valloc_align __P((struct vm_map *, vsize_t,
+ vsize_t));
+vaddr_t uvm_km_valloc_wait __P((struct vm_map *, vsize_t));
+vaddr_t uvm_km_valloc_prefer_wait __P((struct vm_map *, vsize_t,
+ voff_t));
+vaddr_t uvm_km_alloc_poolpage1 __P((struct vm_map *,
+ struct uvm_object *, boolean_t));
+void uvm_km_free_poolpage1 __P((struct vm_map *, vaddr_t));
+
+#define uvm_km_alloc_poolpage(waitok) \
+ uvm_km_alloc_poolpage1(kmem_map, uvmexp.kmem_object, (waitok))
+#define uvm_km_free_poolpage(addr) \
+ uvm_km_free_poolpage1(kmem_map, (addr))
/* uvm_map.c */
-int uvm_map __P((vm_map_t, vaddr_t *, vsize_t,
+int uvm_map __P((struct vm_map *, vaddr_t *, vsize_t,
struct uvm_object *, voff_t, vsize_t,
uvm_flag_t));
-int uvm_map_pageable __P((vm_map_t, vaddr_t,
+int uvm_map_pageable __P((struct vm_map *, vaddr_t,
vaddr_t, boolean_t, int));
-int uvm_map_pageable_all __P((vm_map_t, int, vsize_t));
-boolean_t uvm_map_checkprot __P((vm_map_t, vaddr_t,
+int uvm_map_pageable_all __P((struct vm_map *, int,
+ vsize_t));
+boolean_t uvm_map_checkprot __P((struct vm_map *, vaddr_t,
vaddr_t, vm_prot_t));
-int uvm_map_protect __P((vm_map_t, vaddr_t,
+int uvm_map_protect __P((struct vm_map *, vaddr_t,
vaddr_t, vm_prot_t, boolean_t));
struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t,
boolean_t));
@@ -562,13 +557,13 @@ void uvmspace_unshare __P((struct proc *));
/* uvm_meter.c */
void uvm_meter __P((void));
-int uvm_sysctl __P((int *, u_int, void *, size_t *,
+int uvm_sysctl __P((int *, u_int, void *, size_t *,
void *, size_t, struct proc *));
void uvm_total __P((struct vmtotal *));
/* uvm_mmap.c */
-int uvm_mmap __P((vm_map_t, vaddr_t *, vsize_t,
- vm_prot_t, vm_prot_t, int,
+int uvm_mmap __P((struct vm_map *, vaddr_t *, vsize_t,
+ vm_prot_t, vm_prot_t, int,
void *, voff_t, vsize_t));
/* uvm_page.c */
@@ -579,7 +574,7 @@ struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
UVM_PGA_STRAT_NORMAL, 0)
vaddr_t uvm_pagealloc_contig __P((vaddr_t, vaddr_t,
vaddr_t, vaddr_t));
-void uvm_pagerealloc __P((struct vm_page *,
+void uvm_pagerealloc __P((struct vm_page *,
struct uvm_object *, voff_t));
/* Actually, uvm_page_physload takes PF#s which need their own type */
void uvm_page_physload __P((paddr_t, paddr_t,
@@ -598,19 +593,19 @@ void uvm_aiodone_daemon __P((void *));
/* uvm_pglist.c */
int uvm_pglistalloc __P((psize_t, paddr_t,
paddr_t, paddr_t, paddr_t,
- struct pglist *, int, int));
+ struct pglist *, int, int));
void uvm_pglistfree __P((struct pglist *));
/* uvm_swap.c */
void uvm_swap_init __P((void));
/* uvm_unix.c */
-int uvm_coredump __P((struct proc *, struct vnode *,
+int uvm_coredump __P((struct proc *, struct vnode *,
struct ucred *, struct core *));
int uvm_grow __P((struct proc *, vaddr_t));
/* uvm_user.c */
-void uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t));
+void uvm_deallocate __P((struct vm_map *, vaddr_t, vsize_t));
/* uvm_vnode.c */
void uvm_vnp_setsize __P((struct vnode *, voff_t));
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 3be2966ea58..8b47ada9019 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.c,v 1.26 2001/11/28 13:47:39 art Exp $ */
-/* $NetBSD: uvm_fault.c,v 1.60 2001/04/01 16:45:53 chs Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.27 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.67 2001/06/26 17:55:14 thorpej Exp $ */
/*
*
@@ -59,7 +59,7 @@
*
* CASE 1A CASE 1B CASE 2A CASE 2B
* read/write1 write>1 read/write +-cow_write/zero
- * | | | |
+ * | | | |
* +--|--+ +--|--+ +-----+ + | + | +-----+
* amap | V | | ----------->new| | | | ^ |
* +-----+ +-----+ +-----+ + | + | +--|--+
@@ -69,7 +69,7 @@
* +-----+ +-----+ +-----+ +-----+
*
* d/c = don't care
- *
+ *
* case [0]: layerless fault
* no amap or uobj is present. this is an error.
*
@@ -83,17 +83,17 @@
* 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
* I/O takes place directly in object.
* 2B: [write to copy_on_write] or [read on NULL uobj]
- * data is "promoted" from uobj to a new anon.
+ * data is "promoted" from uobj to a new anon.
* if uobj is null, then we zero fill.
*
* we follow the standard UVM locking protocol ordering:
*
- * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
+ * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
* we hold a PG_BUSY page if we unlock for I/O
*
*
* the code is structured as follows:
- *
+ *
* - init the "IN" params in the ufi structure
* ReFault:
* - do lookups [locks maps], check protection, handle needs_copy
@@ -125,7 +125,7 @@
*
* alternative 1: unbusy the page in question and restart the page fault
* from the top (ReFault). this is easy but does not take advantage
- * of the information that we already have from our previous lookup,
+ * of the information that we already have from our previous lookup,
* although it is possible that the "hints" in the vm_map will help here.
*
* alternative 2: the system already keeps track of a "version" number of
@@ -159,7 +159,7 @@ struct uvm_advice {
/*
* page range array:
- * note: index in array must match "advice" value
+ * note: index in array must match "advice" value
* XXX: borrowed numbers from freebsd. do they work well for us?
*/
@@ -195,7 +195,7 @@ uvmfault_anonflush(anons, n)
{
int lcv;
struct vm_page *pg;
-
+
for (lcv = 0 ; lcv < n ; lcv++) {
if (anons[lcv] == NULL)
continue;
@@ -248,7 +248,7 @@ uvmfault_amapcopy(ufi)
*/
if (UVM_ET_ISNEEDSCOPY(ufi->entry))
- amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
+ amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
/*
@@ -264,7 +264,7 @@ uvmfault_amapcopy(ufi)
/*
* got it! unlock and return.
*/
-
+
uvmfault_unlockmaps(ufi, TRUE);
return;
}
@@ -309,7 +309,7 @@ uvmfault_anonget(ufi, amap, anon)
else
curproc->p_addr->u_stats.p_ru.ru_majflt++;
- /*
+ /*
* loop until we get it, or fail.
*/
@@ -369,7 +369,7 @@ uvmfault_anonget(ufi, amap, anon)
/* ready to relock and try again */
} else {
-
+
/*
* no page, we must try and bring it in.
*/
@@ -385,9 +385,9 @@ uvmfault_anonget(ufi, amap, anon)
/* ready to relock and try again */
} else {
-
+
/* we set the PG_BUSY bit */
- we_own = TRUE;
+ we_own = TRUE;
uvmfault_unlockall(ufi, amap, NULL, anon);
/*
@@ -425,23 +425,23 @@ uvmfault_anonget(ufi, amap, anon)
* to clean up after the I/O. there are three cases to
* consider:
* [1] page released during I/O: free anon and ReFault.
- * [2] I/O not OK. free the page and cause the fault
+ * [2] I/O not OK. free the page and cause the fault
* to fail.
* [3] I/O OK! activate the page and sync with the
* non-we_own case (i.e. drop anon lock if not locked).
*/
-
+
if (we_own) {
if (pg->flags & PG_WANTED) {
/* still holding object lock */
- wakeup(pg);
+ wakeup(pg);
}
/* un-busy! */
pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
- /*
+ /*
* if we were RELEASED during I/O, then our anon is
* no longer part of an amap. we need to free the
* anon and try again.
@@ -488,7 +488,7 @@ uvmfault_anonget(ufi, amap, anon)
UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
return error;
}
-
+
/*
* must be OK, clear modify (already PG_CLEAN)
* and activate
@@ -515,16 +515,16 @@ uvmfault_anonget(ufi, amap, anon)
*/
if (ufi != NULL &&
- amap_lookup(&ufi->entry->aref,
+ amap_lookup(&ufi->entry->aref,
ufi->orig_rvaddr - ufi->entry->start) != anon) {
-
+
uvmfault_unlockall(ufi, amap, NULL, anon);
UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
return (ERESTART);
}
-
+
/*
- * try it again!
+ * try it again!
*/
uvmexp.fltanretry++;
@@ -543,11 +543,12 @@ uvmfault_anonget(ufi, amap, anon)
* uvm_fault: page fault handler
*
* => called from MD code to resolve a page fault
- * => VM data structures usually should be unlocked. however, it is
+ * => VM data structures usually should be unlocked. however, it is
* possible to call here with the main map locked if the caller
* gets a write lock, sets it recusive, and then calls us (c.f.
* uvm_map_pageable). this should be avoided because it keeps
* the map locked off during I/O.
+ * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
*/
#define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
@@ -555,7 +556,7 @@ uvmfault_anonget(ufi, amap, anon)
int
uvm_fault(orig_map, vaddr, fault_type, access_type)
- vm_map_t orig_map;
+ struct vm_map *orig_map;
vaddr_t vaddr;
vm_fault_t fault_type;
vm_prot_t access_type;
@@ -565,7 +566,7 @@ uvm_fault(orig_map, vaddr, fault_type, access_type)
boolean_t wired, narrow, promote, locked, shadowed;
int npages, nback, nforw, centeridx, error, lcv, gotpages;
vaddr_t startva, objaddr, currva, offset, uoff;
- paddr_t pa;
+ paddr_t pa;
struct vm_amap *amap;
struct uvm_object *uobj;
struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
@@ -594,20 +595,6 @@ uvm_fault(orig_map, vaddr, fault_type, access_type)
narrow = FALSE; /* normal fault */
/*
- * before we do anything else, if this is a fault on a kernel
- * address, check to see if the address is managed by an
- * interrupt-safe map. If it is, we fail immediately. Intrsafe
- * maps are never pageable, and this approach avoids an evil
- * locking mess.
- */
-
- if (orig_map == kernel_map && uvmfault_check_intrsafe(&ufi)) {
- UVMHIST_LOG(maphist, "<- VA 0x%lx in intrsafe map %p",
- ufi.orig_rvaddr, ufi.map, 0, 0);
- return EFAULT;
- }
-
- /*
* "goto ReFault" means restart the page fault from ground zero.
*/
ReFault:
@@ -622,7 +609,15 @@ ReFault:
}
/* locked: maps(read) */
- KASSERT(ufi.map->flags & VM_MAP_PAGEABLE);
+#ifdef DIAGNOSTIC
+ if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) {
+ printf("Page fault on non-pageable map:\n");
+ printf("ufi.map = %p\n", ufi.map);
+ printf("ufi.orig_map = %p\n", ufi.orig_map);
+ printf("ufi.orig_rvaddr = 0x%lx\n", (u_long) ufi.orig_rvaddr);
+ panic("uvm_fault: (ufi.map->flags & VM_MAP_PAGEABLE) == 0");
+ }
+#endif
/*
* check protection
@@ -672,7 +667,7 @@ ReFault:
* ensure that we pmap_enter page R/O since
* needs_copy is still true
*/
- enter_prot &= ~VM_PROT_WRITE;
+ enter_prot &= ~VM_PROT_WRITE;
}
}
@@ -698,7 +693,7 @@ ReFault:
/*
* establish range of interest based on advice from mapper
* and then clip to fit map entry. note that we only want
- * to do this the first time through the fault. if we
+ * to do this the first time through the fault. if we
* ReFault we will disable this by setting "narrow" to true.
*/
@@ -723,7 +718,7 @@ ReFault:
narrow = TRUE; /* ensure only once per-fault */
} else {
-
+
/* narrow fault! */
nback = nforw = 0;
startva = ufi.orig_rvaddr;
@@ -763,7 +758,7 @@ ReFault:
UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
0,0,0,0);
/* flush back-page anons? */
- if (amap)
+ if (amap)
uvmfault_anonflush(anons, nback);
/* flush object? */
@@ -771,7 +766,7 @@ ReFault:
objaddr =
(startva - ufi.entry->start) + ufi.entry->offset;
simple_lock(&uobj->vmobjlock);
- (void) uobj->pgops->pgo_flush(uobj, objaddr, objaddr +
+ (void) uobj->pgops->pgo_flush(uobj, objaddr, objaddr +
(nback << PAGE_SHIFT), PGO_DEACTIVATE);
simple_unlock(&uobj->vmobjlock);
}
@@ -850,11 +845,12 @@ ReFault:
(VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
}
simple_unlock(&anon->an_lock);
+ pmap_update();
}
/* locked: maps(read), amap(if there) */
/* (shadowed == TRUE) if there is an anon at the faulting address */
- UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
+ UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
(uobj && shadowed == FALSE),0,0);
/*
@@ -864,7 +860,7 @@ ReFault:
* XXX Actually, that is bad; pmap_enter() should just fail in that
* XXX case. --thorpej
*/
-
+
/*
* if the desired page is not shadowed by the amap and we have a
* backing object, then we check to see if the backing object would
@@ -884,6 +880,9 @@ ReFault:
if (error == ERESTART)
goto ReFault; /* try again! */
+ /*
+ * object fault routine responsible for pmap_update().
+ */
return error;
}
@@ -937,16 +936,16 @@ ReFault:
* us a handle to it. remember this
* page as "uobjpage." (for later use).
*/
-
+
if (lcv == centeridx) {
uobjpage = pages[lcv];
UVMHIST_LOG(maphist, " got uobjpage "
- "(0x%x) with locked get",
+ "(0x%x) with locked get",
uobjpage, 0,0,0);
continue;
}
-
- /*
+
+ /*
* note: calling pgo_get with locked data
* structures returns us pages which are
* neither busy nor released, so we don't
@@ -977,7 +976,7 @@ ReFault:
PMAP_CANFAIL |
(wired ? PMAP_WIRED : 0));
- /*
+ /*
* NOTE: page can't be PG_WANTED or PG_RELEASED
* because we've held the lock the whole time
* we've had the handle.
@@ -986,6 +985,7 @@ ReFault:
pages[lcv]->flags &= ~(PG_BUSY); /* un-busy! */
UVM_PAGE_OWN(pages[lcv], NULL);
} /* for "lcv" loop */
+ pmap_update();
} /* "gotpages" != 0 */
/* note: object still _locked_ */
} else {
@@ -993,7 +993,7 @@ ReFault:
}
/* locked (shadowed): maps(read), amap */
- /* locked (!shadowed): maps(read), amap(if there),
+ /* locked (!shadowed): maps(read), amap(if there),
uobj(if !null), uobjpage(if !null) */
/*
@@ -1015,7 +1015,7 @@ ReFault:
* redirect case 2: if we are not shadowed, go to case 2.
*/
- if (shadowed == FALSE)
+ if (shadowed == FALSE)
goto Case2;
/* locked: maps(read), amap */
@@ -1047,7 +1047,7 @@ ReFault:
error = uvmfault_anonget(&ufi, amap, anon);
switch (error) {
case 0:
- break;
+ break;
case ERESTART:
goto ReFault;
@@ -1069,13 +1069,13 @@ ReFault:
/* locked: maps(read), amap, anon, uobj(if one) */
/*
- * special handling for loaned pages
+ * special handling for loaned pages
*/
if (anon->u.an_page->loan_count) {
if ((access_type & VM_PROT_WRITE) == 0) {
-
+
/*
* for read faults on loaned pages we just cap the
* protection at read-only.
@@ -1151,8 +1151,8 @@ ReFault:
* also note that the ref count can't drop to zero here because
* it is > 1 and we are only dropping one ref.
*
- * in the (hopefully very rare) case that we are out of RAM we
- * will unlock, wait for more RAM, and refault.
+ * in the (hopefully very rare) case that we are out of RAM we
+ * will unlock, wait for more RAM, and refault.
*
* if we are out of anon VM we kill the process (XXX: could wait?).
*/
@@ -1284,6 +1284,7 @@ ReFault:
if (anon != oanon)
simple_unlock(&anon->an_lock);
uvmfault_unlockall(&ufi, amap, uobj, oanon);
+ pmap_update();
return 0;
@@ -1305,7 +1306,7 @@ Case2:
*/
if (uobj == NULL) {
- uobjpage = PGO_DONTCARE;
+ uobjpage = PGO_DONTCARE;
promote = TRUE; /* always need anon here */
} else {
KASSERT(uobjpage != PGO_DONTCARE);
@@ -1319,7 +1320,7 @@ Case2:
* if uobjpage is not null then we do not need to do I/O to get the
* uobjpage.
*
- * if uobjpage is null, then we need to unlock and ask the pager to
+ * if uobjpage is null, then we need to unlock and ask the pager to
* get the data for us. once we have the data, we need to reverify
* the state the world. we are currently not holding any resources.
*/
@@ -1330,7 +1331,7 @@ Case2:
} else {
/* update rusage counters */
curproc->p_addr->u_stats.p_ru.ru_majflt++;
-
+
/* locked: maps(read), amap(if there), uobj */
uvmfault_unlockall(&ufi, amap, NULL, NULL);
/* locked: uobj */
@@ -1372,7 +1373,7 @@ Case2:
if (locked && amap)
amap_lock(amap);
simple_lock(&uobj->vmobjlock);
-
+
/* locked(locked): maps(read), amap(if !null), uobj, uobjpage */
/* locked(!locked): uobj, uobjpage */
@@ -1383,10 +1384,10 @@ Case2:
*/
if ((uobjpage->flags & PG_RELEASED) != 0 ||
- (locked && amap &&
+ (locked && amap &&
amap_lookup(&ufi.entry->aref,
ufi.orig_rvaddr - ufi.entry->start))) {
- if (locked)
+ if (locked)
uvmfault_unlockall(&ufi, amap, NULL, NULL);
locked = FALSE;
}
@@ -1398,7 +1399,7 @@ Case2:
if (locked == FALSE) {
UVMHIST_LOG(maphist,
- " wasn't able to relock after fault: retry",
+ " wasn't able to relock after fault: retry",
0,0,0,0);
if (uobjpage->flags & PG_WANTED)
/* still holding object lock */
@@ -1448,7 +1449,7 @@ Case2:
* for it above)
* - at this point uobjpage could be PG_WANTED (handle later)
*/
-
+
if (promote == FALSE) {
/*
@@ -1552,7 +1553,7 @@ Case2:
} /* if loan_count */
} else {
-
+
/*
* if we are going to promote the data to an anon we
* allocate a blank anon here and plug it into our amap.
@@ -1634,8 +1635,11 @@ Case2:
*/
if ((amap_flags(amap) & AMAP_SHARED) != 0) {
pmap_page_protect(uobjpage, VM_PROT_NONE);
+ /*
+ * XXX: PAGE MIGHT BE WIRED!
+ */
}
-
+
/*
* dispose of uobjpage. it can't be PG_RELEASED
* since we still hold the object lock.
@@ -1703,11 +1707,11 @@ Case2:
if (pg->flags & PG_WANTED)
wakeup(pg); /* lock still held */
- /*
+ /*
* note that pg can't be PG_RELEASED since we did not drop
* the object lock since the last time we checked.
*/
-
+
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
uvmfault_unlockall(&ufi, amap, uobj, anon);
@@ -1748,15 +1752,17 @@ Case2:
if (pg->flags & PG_WANTED)
wakeup(pg); /* lock still held */
- /*
- * note that pg can't be PG_RELEASED since we did not drop the object
+ /*
+ * note that pg can't be PG_RELEASED since we did not drop the object
* lock since the last time we checked.
*/
-
+
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
uvmfault_unlockall(&ufi, amap, uobj, anon);
+ pmap_update();
+
UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
return 0;
}
@@ -1773,22 +1779,27 @@ Case2:
int
uvm_fault_wire(map, start, end, access_type)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
vm_prot_t access_type;
{
vaddr_t va;
- pmap_t pmap;
int error;
- pmap = vm_map_pmap(map);
-
/*
* now fault it in a page at a time. if the fault fails then we have
- * to undo what we have done. note that in uvm_fault VM_PROT_NONE
+ * to undo what we have done. note that in uvm_fault VM_PROT_NONE
* is replaced with the max protection if fault_type is VM_FAULT_WIRE.
*/
+ /*
+ * XXX work around overflowing a vaddr_t. this prevents us from
+ * wiring the last page in the address space, though.
+ */
+ if (start > end) {
+ return EFAULT;
+ }
+
for (va = start ; va < end ; va += PAGE_SIZE) {
error = uvm_fault(map, va, VM_FAULT_WIRE, access_type);
if (error) {
@@ -1798,7 +1809,6 @@ uvm_fault_wire(map, start, end, access_type)
return error;
}
}
-
return 0;
}
@@ -1808,7 +1818,7 @@ uvm_fault_wire(map, start, end, access_type)
void
uvm_fault_unwire(map, start, end)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
{
@@ -1825,10 +1835,10 @@ uvm_fault_unwire(map, start, end)
void
uvm_fault_unwire_locked(map, start, end)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
{
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
pmap_t pmap = vm_map_pmap(map);
vaddr_t va;
paddr_t pa;
diff --git a/sys/uvm/uvm_fault.h b/sys/uvm/uvm_fault.h
index 0abb41d7e46..8bb25b00b12 100644
--- a/sys/uvm/uvm_fault.h
+++ b/sys/uvm/uvm_fault.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.h,v 1.9 2001/11/05 22:14:54 art Exp $ */
-/* $NetBSD: uvm_fault.h,v 1.14 2000/06/26 14:21:17 mrg Exp $ */
+/* $OpenBSD: uvm_fault.h,v 1.10 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_fault.h,v 1.15 2001/06/02 18:09:26 chs Exp $ */
/*
*
@@ -57,12 +57,12 @@
struct uvm_faultinfo {
- vm_map_t orig_map; /* IN: original map */
+ struct vm_map *orig_map; /* IN: original map */
vaddr_t orig_rvaddr; /* IN: original rounded VA */
vsize_t orig_size; /* IN: original size of interest */
- vm_map_t map; /* map (could be a submap) */
+ struct vm_map *map; /* map (could be a submap) */
unsigned int mapv; /* map's version number */
- vm_map_entry_t entry; /* map entry (from 'map') */
+ struct vm_map_entry *entry; /* map entry (from 'map') */
vsize_t size; /* size of interest */
};
@@ -76,9 +76,9 @@ struct uvm_faultinfo {
int uvmfault_anonget __P((struct uvm_faultinfo *, struct vm_amap *,
struct vm_anon *));
-int uvm_fault_wire __P((vm_map_t, vaddr_t, vaddr_t, vm_prot_t));
-void uvm_fault_unwire __P((vm_map_t, vaddr_t, vaddr_t));
-void uvm_fault_unwire_locked __P((vm_map_t, vaddr_t, vaddr_t));
+int uvm_fault_wire __P((struct vm_map *, vaddr_t, vaddr_t, vm_prot_t));
+void uvm_fault_unwire __P((struct vm_map *, vaddr_t, vaddr_t));
+void uvm_fault_unwire_locked __P((struct vm_map *, vaddr_t, vaddr_t));
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_fault_i.h b/sys/uvm/uvm_fault_i.h
index 66f98503340..f262e48f09f 100644
--- a/sys/uvm/uvm_fault_i.h
+++ b/sys/uvm/uvm_fault_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault_i.h,v 1.7 2001/11/05 22:14:54 art Exp $ */
-/* $NetBSD: uvm_fault_i.h,v 1.11 2000/06/26 14:21:17 mrg Exp $ */
+/* $OpenBSD: uvm_fault_i.h,v 1.8 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_fault_i.h,v 1.14 2001/06/26 17:55:15 thorpej Exp $ */
/*
*
@@ -41,7 +41,6 @@
/*
* uvm_fault_i.h: fault inline functions
*/
-static boolean_t uvmfault_check_intrsafe __P((struct uvm_faultinfo *));
static boolean_t uvmfault_lookup __P((struct uvm_faultinfo *, boolean_t));
static boolean_t uvmfault_relock __P((struct uvm_faultinfo *));
static void uvmfault_unlockall __P((struct uvm_faultinfo *, struct vm_amap *,
@@ -97,39 +96,6 @@ uvmfault_unlockall(ufi, amap, uobj, anon)
}
/*
- * uvmfault_check_intrsafe: check for a virtual address managed by
- * an interrupt-safe map.
- *
- * => caller must provide a uvm_faultinfo structure with the IN
- * params properly filled in
- * => if we find an intersafe VA, we fill in ufi->map, and return TRUE
- */
-
-static __inline boolean_t
-uvmfault_check_intrsafe(ufi)
- struct uvm_faultinfo *ufi;
-{
- struct vm_map_intrsafe *vmi;
- int s;
-
- s = vmi_list_lock();
- for (vmi = LIST_FIRST(&vmi_list); vmi != NULL;
- vmi = LIST_NEXT(vmi, vmi_list)) {
- if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) &&
- ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map))
- break;
- }
- vmi_list_unlock(s);
-
- if (vmi != NULL) {
- ufi->map = &vmi->vmi_map;
- return (TRUE);
- }
-
- return (FALSE);
-}
-
-/*
* uvmfault_lookup: lookup a virtual address in a map
*
* => caller must provide a uvm_faultinfo structure with the IN
@@ -138,7 +104,7 @@ uvmfault_check_intrsafe(ufi)
* => if the lookup is a success we will return with the maps locked
* => if "write_lock" is TRUE, we write_lock the map, otherwise we only
* get a read lock.
- * => note that submaps can only appear in the kernel and they are
+ * => note that submaps can only appear in the kernel and they are
* required to use the same virtual addresses as the map they
* are referenced by (thus address translation between the main
* map and the submap is unnecessary).
@@ -149,7 +115,7 @@ uvmfault_lookup(ufi, write_lock)
struct uvm_faultinfo *ufi;
boolean_t write_lock;
{
- vm_map_t tmpmap;
+ struct vm_map *tmpmap;
/*
* init ufi values for lookup.
@@ -164,6 +130,13 @@ uvmfault_lookup(ufi, write_lock)
*/
while (1) {
+ /*
+ * Make sure this is not an "interrupt safe" map.
+ * Such maps are never supposed to be involved in
+ * a fault.
+ */
+ if (ufi->map->flags & VM_MAP_INTRSAFE)
+ return (FALSE);
/*
* lock map
@@ -177,7 +150,7 @@ uvmfault_lookup(ufi, write_lock)
/*
* lookup
*/
- if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
+ if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
&ufi->entry)) {
uvmfault_unlockmaps(ufi, write_lock);
return(FALSE);
@@ -239,7 +212,7 @@ uvmfault_relock(ufi)
uvmexp.fltrelck++;
/*
- * relock map. fail if version mismatch (in which case nothing
+ * relock map. fail if version mismatch (in which case nothing
* gets locked).
*/
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index 4252b9c9c42..8349434d0f8 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_glue.c,v 1.28 2001/11/28 14:29:13 art Exp $ */
-/* $NetBSD: uvm_glue.c,v 1.46 2001/04/21 17:38:24 thorpej Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.29 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.50 2001/06/02 18:09:26 chs Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -148,7 +148,7 @@ uvm_useracc(addr, len, rw)
size_t len;
int rw;
{
- vm_map_t map;
+ struct vm_map *map;
boolean_t rv;
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
@@ -191,14 +191,12 @@ uvm_chgkprot(addr, len, rw)
for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
/*
* Extract physical address for the page.
- * We use a cheezy hack to differentiate physical
- * page 0 from an invalid mapping, not that it
- * really matters...
*/
if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
panic("chgkprot: invalid page");
pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
}
+ pmap_update();
}
#endif
@@ -216,7 +214,7 @@ uvm_vslock(p, addr, len, access_type)
size_t len;
vm_prot_t access_type;
{
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
int error;
@@ -302,7 +300,7 @@ uvm_fork(p1, p2, shared, stack, stacksize, func, arg)
memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy,
((caddr_t)&up->u_stats.pstat_endcopy -
(caddr_t)&up->u_stats.pstat_startcopy));
-
+
/*
* cpu_fork() copy and update the pcb, and make the child ready
* to run. If this is a normal user fork, the child will exit
@@ -502,7 +500,7 @@ uvm_swapout_threads()
struct proc *outp, *outp2;
int outpri, outpri2;
int didswap = 0;
- extern int maxslp;
+ extern int maxslp;
/* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
#ifdef DEBUG
@@ -526,7 +524,7 @@ uvm_swapout_threads()
outpri2 = p->p_swtime;
}
continue;
-
+
case SSLEEP:
case SSTOP:
if (p->p_slptime >= maxslp) {
@@ -561,7 +559,7 @@ uvm_swapout_threads()
/*
* uvm_swapout: swap out process "p"
*
- * - currently "swapout" means "unwire U-area" and "pmap_collect()"
+ * - currently "swapout" means "unwire U-area" and "pmap_collect()"
* the pmap.
* - XXXCDC: should deactivate all process' private anonymous memory
*/
diff --git a/sys/uvm/uvm_io.c b/sys/uvm/uvm_io.c
index cd64da0ac95..100e82cfe3b 100644
--- a/sys/uvm/uvm_io.c
+++ b/sys/uvm/uvm_io.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_io.c,v 1.11 2001/11/28 13:47:39 art Exp $ */
-/* $NetBSD: uvm_io.c,v 1.13 2001/03/15 06:10:57 chs Exp $ */
+/* $OpenBSD: uvm_io.c,v 1.12 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_io.c,v 1.15 2001/06/02 18:09:26 chs Exp $ */
/*
*
@@ -61,12 +61,12 @@
int
uvm_io(map, uio)
- vm_map_t map;
+ struct vm_map *map;
struct uio *uio;
{
vaddr_t baseva, endva, pageoffset, kva;
vsize_t chunksz, togo, sz;
- vm_map_entry_t dead_entries;
+ struct vm_map_entry *dead_entries;
int error;
/*
@@ -106,7 +106,7 @@ uvm_io(map, uio)
*/
error = uvm_map_extract(map, baseva, chunksz, kernel_map, &kva,
- UVM_EXTRACT_QREF | UVM_EXTRACT_CONTIG |
+ UVM_EXTRACT_QREF | UVM_EXTRACT_CONTIG |
UVM_EXTRACT_FIXPROT);
if (error) {
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 416f3b64338..584a3eeadf7 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_km.c,v 1.24 2001/11/28 14:29:13 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.45 2001/04/12 21:11:47 thorpej Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.25 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_km.c,v 1.50 2001/06/26 17:55:15 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -78,11 +78,11 @@
* starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
* note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
*
- * the kernel_map has several "submaps." submaps can only appear in
+ * the kernel_map has several "submaps." submaps can only appear in
* the kernel_map (user processes can't use them). submaps "take over"
* the management of a sub-range of the kernel's address space. submaps
* are typically allocated at boot time and are never released. kernel
- * virtual address space that is mapped by a submap is locked by the
+ * virtual address space that is mapped by a submap is locked by the
* submap's lock -- not the kernel_map's lock.
*
* thus, the useful feature of submaps is that they allow us to break
@@ -102,19 +102,19 @@
* the kernel allocates its private memory out of special uvm_objects whose
* reference count is set to UVM_OBJ_KERN (thus indicating that the objects
* are "special" and never die). all kernel objects should be thought of
- * as large, fixed-sized, sparsely populated uvm_objects. each kernel
+ * as large, fixed-sized, sparsely populated uvm_objects. each kernel
* object is equal to the size of kernel virtual address space (i.e. the
* value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
*
* most kernel private memory lives in kernel_object. the only exception
* to this is for memory that belongs to submaps that must be protected
- * by splvm(). each of these submaps has their own private kernel
+ * by splvm(). each of these submaps has their own private kernel
* object (e.g. kmem_object, mb_object).
*
* note that just because a kernel object spans the entire kernel virutal
* address space doesn't mean that it has to be mapped into the entire space.
- * large chunks of a kernel object's space go unused either because
- * that area of kernel VM is unmapped, or there is some other type of
+ * large chunks of a kernel object's space go unused either because
+ * that area of kernel VM is unmapped, or there is some other type of
* object mapped into that range (e.g. a vnode). for submap's kernel
* objects, the only part of the object that can ever be populated is the
* offsets that are managed by the submap.
@@ -126,7 +126,7 @@
* uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
* kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
* then that means that the page at offset 0x235000 in kernel_object is
- * mapped at 0xf8235000.
+ * mapped at 0xf8235000.
*
* note that the offsets in kmem_object and mb_object also follow this
* rule. this means that the offsets for kmem_object must fall in the
@@ -151,10 +151,7 @@
* global data structures
*/
-vm_map_t kernel_map = NULL;
-
-struct vmi_list vmi_list;
-simple_lock_data_t vmi_list_slock;
+struct vm_map *kernel_map = NULL;
/*
* local data structues
@@ -187,12 +184,6 @@ uvm_km_init(start, end)
vaddr_t base = VM_MIN_KERNEL_ADDRESS;
/*
- * first, initialize the interrupt-safe map list.
- */
- LIST_INIT(&vmi_list);
- simple_lock_init(&vmi_list_slock);
-
- /*
* next, init kernel memory objects.
*/
@@ -211,7 +202,7 @@ uvm_km_init(start, end)
TAILQ_INIT(&kmem_object_store.memq);
kmem_object_store.uo_npages = 0;
/* we are special. we never die */
- kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
+ kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
uvmexp.kmem_object = &kmem_object_store;
/*
@@ -224,11 +215,11 @@ uvm_km_init(start, end)
TAILQ_INIT(&mb_object_store.memq);
mb_object_store.uo_npages = 0;
/* we are special. we never die */
- mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
+ mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
uvmexp.mb_object = &mb_object_store;
/*
- * init the map and reserve allready allocated kernel space
+ * init the map and reserve allready allocated kernel space
* before installing.
*/
@@ -238,7 +229,7 @@ uvm_km_init(start, end)
UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0)
panic("uvm_km_init: could not reserve space for kernel");
-
+
/*
* install!
*/
@@ -334,7 +325,7 @@ uvm_km_pgremove(uobj, start, end)
/* choose cheapest traversal */
by_list = (uobj->uo_npages <=
((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
-
+
if (by_list)
goto loop_by_list;
@@ -426,7 +417,7 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
/* choose cheapest traversal */
by_list = (uobj->uo_npages <=
((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
-
+
if (by_list)
goto loop_by_list;
@@ -481,7 +472,7 @@ loop_by_list:
vaddr_t
uvm_km_kmemalloc(map, obj, size, flags)
- vm_map_t map;
+ struct vm_map *map;
struct uvm_object *obj;
vsize_t size;
int flags;
@@ -509,7 +500,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
+ UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
!= 0)) {
UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
return(0);
@@ -546,7 +537,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
UVM_PAGE_OWN(pg, NULL);
}
simple_unlock(&obj->vmobjlock);
-
+
/*
* out of memory?
*/
@@ -561,7 +552,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
continue;
}
}
-
+
/*
* map it in: note that we call pmap_enter with the map and
* object unlocked in case we are kmem_map/kmem_object
@@ -581,6 +572,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
offset += PAGE_SIZE;
loopsize -= PAGE_SIZE;
}
+ pmap_update();
UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
return(kva);
}
@@ -591,7 +583,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
void
uvm_km_free(map, addr, size)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t addr;
vsize_t size;
{
@@ -607,14 +599,14 @@ uvm_km_free(map, addr, size)
void
uvm_km_free_wakeup(map, addr, size)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t addr;
vsize_t size;
{
- vm_map_entry_t dead_entries;
+ struct vm_map_entry *dead_entries;
vm_map_lock(map);
- uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size),
+ uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size),
&dead_entries);
wakeup(map);
vm_map_unlock(map);
@@ -630,7 +622,7 @@ uvm_km_free_wakeup(map, addr, size)
vaddr_t
uvm_km_alloc1(map, size, zeroit)
- vm_map_t map;
+ struct vm_map *map;
vsize_t size;
boolean_t zeroit;
{
@@ -684,7 +676,7 @@ uvm_km_alloc1(map, size, zeroit)
FALSE, "km_alloc", 0);
continue; /* retry */
}
-
+
/* allocate ram */
pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
if (pg) {
@@ -696,7 +688,7 @@ uvm_km_alloc1(map, size, zeroit)
uvm_wait("km_alloc1w"); /* wait for memory */
continue;
}
-
+
/*
* map it in; note we're never called with an intrsafe
* object, so we always use regular old pmap_enter().
@@ -708,7 +700,9 @@ uvm_km_alloc1(map, size, zeroit)
offset += PAGE_SIZE;
size -= PAGE_SIZE;
}
-
+
+ pmap_update();
+
/*
* zero on request (note that "size" is now zero due to the above loop
* so we need to subtract kva from loopva to reconstruct the size).
@@ -729,7 +723,7 @@ uvm_km_alloc1(map, size, zeroit)
vaddr_t
uvm_km_valloc(map, size)
- vm_map_t map;
+ struct vm_map *map;
vsize_t size;
{
return(uvm_km_valloc_align(map, size, 0));
@@ -737,7 +731,7 @@ uvm_km_valloc(map, size)
vaddr_t
uvm_km_valloc_align(map, size, align)
- vm_map_t map;
+ struct vm_map *map;
vsize_t size;
vsize_t align;
{
@@ -776,7 +770,7 @@ uvm_km_valloc_align(map, size, align)
vaddr_t
uvm_km_valloc_prefer_wait(map, size, prefer)
- vm_map_t map;
+ struct vm_map *map;
vsize_t size;
voff_t prefer;
{
@@ -818,7 +812,7 @@ uvm_km_valloc_prefer_wait(map, size, prefer)
vaddr_t
uvm_km_valloc_wait(map, size)
- vm_map_t map;
+ struct vm_map *map;
vsize_t size;
{
return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);
@@ -839,7 +833,7 @@ uvm_km_valloc_wait(map, size)
/* ARGSUSED */
vaddr_t
uvm_km_alloc_poolpage1(map, obj, waitok)
- vm_map_t map;
+ struct vm_map *map;
struct uvm_object *obj;
boolean_t waitok;
{
@@ -890,7 +884,7 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
/* ARGSUSED */
void
uvm_km_free_poolpage1(map, addr)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t addr;
{
#if defined(PMAP_UNMAP_POOLPAGE)
diff --git a/sys/uvm/uvm_loan.c b/sys/uvm/uvm_loan.c
index 7d0926360d9..630460b1316 100644
--- a/sys/uvm/uvm_loan.c
+++ b/sys/uvm/uvm_loan.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_loan.c,v 1.14 2001/11/28 14:29:13 art Exp $ */
-/* $NetBSD: uvm_loan.c,v 1.28 2001/04/10 00:53:21 chuck Exp $ */
+/* $OpenBSD: uvm_loan.c,v 1.15 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_loan.c,v 1.29 2001/05/25 04:06:14 chs Exp $ */
/*
*
@@ -49,7 +49,7 @@
#include <uvm/uvm.h>
/*
- * "loaned" pages are pages which are (read-only, copy-on-write) loaned
+ * "loaned" pages are pages which are (read-only, copy-on-write) loaned
* from the VM system to other parts of the kernel. this allows page
* copying to be avoided (e.g. you can loan pages from objs/anons to
* the mbuf system).
@@ -75,7 +75,7 @@
* object/anon which the page is owned by. this is a good side-effect,
* since a kernel write to a loaned page is an error.
*
- * owners that want to free their pages and discover that they are
+ * owners that want to free their pages and discover that they are
* loaned out simply "disown" them (the page becomes an orphan). these
* pages should be freed when the last loan is dropped. in some cases
* an anon may "adopt" an orphaned page.
@@ -92,7 +92,7 @@
* use "try" locking.
*
* loans are typically broken by the following events:
- * 1. user-level xwrite fault to a loaned page
+ * 1. user-level xwrite fault to a loaned page
* 2. pageout of clean+inactive O->A loaned page
* 3. owner frees page (e.g. pager flush)
*
@@ -105,10 +105,10 @@
* local prototypes
*/
-static int uvm_loananon __P((struct uvm_faultinfo *, void ***,
+static int uvm_loananon __P((struct uvm_faultinfo *, void ***,
int, struct vm_anon *));
static int uvm_loanentry __P((struct uvm_faultinfo *, void ***, int));
-static int uvm_loanuobj __P((struct uvm_faultinfo *, void ***,
+static int uvm_loanuobj __P((struct uvm_faultinfo *, void ***,
int, vaddr_t));
static int uvm_loanzero __P((struct uvm_faultinfo *, void ***, int));
@@ -209,7 +209,7 @@ uvm_loanentry(ufi, output, flags)
/*
* uvm_loan: loan pages in a map out to anons or to the kernel
- *
+ *
* => map should be unlocked
* => start and len should be multiples of PAGE_SIZE
* => result is either an array of anon's or vm_pages (depending on flags)
@@ -259,7 +259,7 @@ uvm_loan(map, start, len, result, flags)
ufi.orig_map = map;
ufi.orig_rvaddr = start;
ufi.orig_size = len;
-
+
/*
* do the lookup, the only time this will fail is if we hit on
* an unmapped region (an error)
@@ -282,10 +282,10 @@ uvm_loan(map, start, len, result, flags)
/*
* done! the map is locked only if rv > 0. if that
- * is the case, advance and unlock.
+ * is the case, advance and unlock.
*
* XXXCDC: could avoid the unlock with smarter code
- * (but it only happens on map entry boundaries,
+ * (but it only happens on map entry boundaries,
* so it isn't that bad).
*/
if (rv) {
@@ -295,7 +295,7 @@ uvm_loan(map, start, len, result, flags)
uvmfault_unlockmaps(&ufi, FALSE);
}
}
-
+
/*
* got it! return success.
*/
@@ -320,7 +320,7 @@ fail:
/*
* uvm_loananon: loan a page from an anon out
- *
+ *
* => called with map, amap, uobj locked
* => return value:
* -1 = fatal error, everything is unlocked, abort.
@@ -464,13 +464,13 @@ uvm_loanuobj(ufi, output, flags, va)
if (result == EBUSY) {
uvmfault_unlockall(ufi, amap, NULL, NULL);
-
+
npages = 1;
/* locked: uobj */
result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
&pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, 0);
/* locked: <nothing> */
-
+
/*
* check for errors
*/
@@ -479,7 +479,7 @@ uvm_loanuobj(ufi, output, flags, va)
if (result == EAGAIN) {
tsleep(&lbolt, PVM, "fltagain2", 0);
return(0); /* redo the lookup and try again */
- }
+ }
return(-1); /* total failure */
}
@@ -497,15 +497,15 @@ uvm_loanuobj(ufi, output, flags, va)
* that amap slot is still free. if there is a problem we
* drop our lock (thus force a lookup refresh/retry).
*/
-
+
if ((pg->flags & PG_RELEASED) != 0 ||
(locked && amap && amap_lookup(&ufi->entry->aref,
ufi->orig_rvaddr - ufi->entry->start))) {
-
+
if (locked)
uvmfault_unlockall(ufi, amap, NULL, NULL);
locked = FALSE;
- }
+ }
/*
* didn't get the lock? release the page and retry.
@@ -563,7 +563,7 @@ uvm_loanuobj(ufi, output, flags, va)
/*
* must be a loan to an anon. check to see if there is already
* an anon associated with this page. if so, then just return
- * a reference to this object. the page should already be
+ * a reference to this object. the page should already be
* mapped read-only because it is already on loan.
*/
@@ -583,7 +583,7 @@ uvm_loanuobj(ufi, output, flags, va)
UVM_PAGE_OWN(pg, NULL);
return(1);
}
-
+
/*
* need to allocate a new anon
*/
@@ -640,7 +640,7 @@ uvm_loanzero(ufi, output, flags)
while ((pg = uvm_pagealloc(NULL, 0, NULL,
UVM_PGA_ZERO)) == NULL) {
- uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
+ uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
ufi->entry->object.uvm_obj, NULL);
uvm_wait("loanzero1");
if (!uvmfault_relock(ufi))
@@ -652,7 +652,7 @@ uvm_loanzero(ufi, output, flags)
&ufi->entry->object.uvm_obj->vmobjlock);
/* ... and try again */
}
-
+
/* got a zero'd page; return */
pg->flags &= ~(PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
@@ -667,7 +667,7 @@ uvm_loanzero(ufi, output, flags)
}
/* loaning to an anon */
- while ((anon = uvm_analloc()) == NULL ||
+ while ((anon = uvm_analloc()) == NULL ||
(pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) {
/* unlock everything */
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index da4bdd44f9f..67b856277aa 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_map.c,v 1.32 2001/11/28 13:47:39 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.94 2001/03/15 06:10:57 chs Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.33 2001/11/28 19:28:14 art Exp $ */
+/* $NetBSD: uvm_map.c,v 1.99 2001/06/02 18:09:26 chs Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -176,12 +176,12 @@ vaddr_t uvm_maxkaddr;
* local prototypes
*/
-static vm_map_entry_t uvm_mapent_alloc __P((vm_map_t));
-static void uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));
-static void uvm_mapent_free __P((vm_map_entry_t));
-static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
-static void uvm_map_reference_amap __P((vm_map_entry_t, int));
-static void uvm_map_unreference_amap __P((vm_map_entry_t, int));
+static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *));
+static void uvm_mapent_copy __P((struct vm_map_entry *, struct vm_map_entry *));
+static void uvm_mapent_free __P((struct vm_map_entry *));
+static void uvm_map_entry_unwire __P((struct vm_map *, struct vm_map_entry *));
+static void uvm_map_reference_amap __P((struct vm_map_entry *, int));
+static void uvm_map_unreference_amap __P((struct vm_map_entry *, int));
/*
* local inlines
@@ -193,11 +193,11 @@ static void uvm_map_unreference_amap __P((vm_map_entry_t, int));
* => XXX: static pool for kernel map?
*/
-static __inline vm_map_entry_t
+static __inline struct vm_map_entry *
uvm_mapent_alloc(map)
- vm_map_t map;
+ struct vm_map *map;
{
- vm_map_entry_t me;
+ struct vm_map_entry *me;
int s;
UVMHIST_FUNC("uvm_mapent_alloc");
UVMHIST_CALLED(maphist);
@@ -219,7 +219,7 @@ uvm_mapent_alloc(map)
me->flags = UVM_MAP_STATIC;
}
- UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]",
+ UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]",
me, ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map)
? TRUE : FALSE, 0, 0);
return(me);
@@ -233,12 +233,12 @@ uvm_mapent_alloc(map)
static __inline void
uvm_mapent_free(me)
- vm_map_entry_t me;
+ struct vm_map_entry *me;
{
int s;
UVMHIST_FUNC("uvm_mapent_free");
UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
+ UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
me, me->flags, 0, 0);
if ((me->flags & UVM_MAP_STATIC) == 0) {
pool_put(&uvm_map_entry_pool, me);
@@ -258,11 +258,11 @@ uvm_mapent_free(me)
static __inline void
uvm_mapent_copy(src, dst)
- vm_map_entry_t src;
- vm_map_entry_t dst;
+ struct vm_map_entry *src;
+ struct vm_map_entry *dst;
{
-
- memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) - ((char*)src));
+ memcpy(dst, src,
+ ((char *)&src->uvm_map_entry_stop_copy) - ((char *)src));
}
/*
@@ -273,10 +273,9 @@ uvm_mapent_copy(src, dst)
static __inline void
uvm_map_entry_unwire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+ struct vm_map *map;
+ struct vm_map_entry *entry;
{
-
entry->wired_count = 0;
uvm_fault_unwire_locked(map, entry->start, entry->end);
}
@@ -287,34 +286,34 @@ uvm_map_entry_unwire(map, entry)
*/
static __inline void
uvm_map_reference_amap(entry, flags)
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
int flags;
{
- amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
+ amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
(entry->end - entry->start) >> PAGE_SHIFT, flags);
}
/*
- * wrapper for calling amap_unref()
+ * wrapper for calling amap_unref()
*/
static __inline void
uvm_map_unreference_amap(entry, flags)
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
int flags;
{
- amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
+ amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
(entry->end - entry->start) >> PAGE_SHIFT, flags);
}
/*
* uvm_map_init: init mapping system at boot time. note that we allocate
- * and init the static pool of vm_map_entry_t's for the kernel here.
+ * and init the static pool of struct vm_map_entry *'s for the kernel here.
*/
void
-uvm_map_init()
+uvm_map_init()
{
static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
#if defined(UVMHIST)
@@ -369,18 +368,19 @@ uvm_map_init()
/*
* uvm_map_clip_start: ensure that the entry begins at or after
* the starting address, if it doesn't we split the entry.
- *
+ *
* => caller should use UVM_MAP_CLIP_START macro rather than calling
* this directly
* => map must be locked by caller
*/
-void uvm_map_clip_start(map, entry, start)
- vm_map_t map;
- vm_map_entry_t entry;
- vaddr_t start;
+void
+uvm_map_clip_start(map, entry, start)
+ struct vm_map *map;
+ struct vm_map_entry *entry;
+ vaddr_t start;
{
- vm_map_entry_t new_entry;
+ struct vm_map_entry *new_entry;
vaddr_t new_adj;
/* uvm_map_simplify_entry(map, entry); */ /* XXX */
@@ -394,7 +394,7 @@ void uvm_map_clip_start(map, entry, start)
new_entry = uvm_mapent_alloc(map);
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
- new_entry->end = start;
+ new_entry->end = start;
new_adj = start - new_entry->start;
if (entry->object.uvm_obj)
entry->offset += new_adj; /* shift start over */
@@ -410,7 +410,7 @@ void uvm_map_clip_start(map, entry, start)
/* ... unlikely to happen, but play it safe */
uvm_map_reference(new_entry->object.sub_map);
} else {
- if (UVM_ET_ISOBJ(entry) &&
+ if (UVM_ET_ISOBJ(entry) &&
entry->object.uvm_obj->pgops &&
entry->object.uvm_obj->pgops->pgo_reference)
entry->object.uvm_obj->pgops->pgo_reference(
@@ -421,7 +421,7 @@ void uvm_map_clip_start(map, entry, start)
/*
* uvm_map_clip_end: ensure that the entry ends at or before
* the ending address, if it does't we split the reference
- *
+ *
* => caller should use UVM_MAP_CLIP_END macro rather than calling
* this directly
* => map must be locked by caller
@@ -429,11 +429,11 @@ void uvm_map_clip_start(map, entry, start)
void
uvm_map_clip_end(map, entry, end)
- vm_map_t map;
- vm_map_entry_t entry;
+ struct vm_map *map;
+ struct vm_map_entry *entry;
vaddr_t end;
{
- vm_map_entry_t new_entry;
+ struct vm_map_entry * new_entry;
vaddr_t new_adj; /* #bytes we move start forward */
/*
@@ -483,7 +483,7 @@ uvm_map_clip_end(map, entry, end)
* [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
* [3] <uobj,uoffset> == normal mapping
* [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
- *
+ *
* case [4] is for kernel mappings where we don't know the offset until
* we've found a virtual address. note that kernel object offsets are
* always relative to vm_map_min(kernel_map).
@@ -498,7 +498,7 @@ uvm_map_clip_end(map, entry, end)
int
uvm_map(map, startp, size, uobj, uoffset, align, flags)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t *startp; /* IN/OUT */
vsize_t size;
struct uvm_object *uobj;
@@ -506,7 +506,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
vsize_t align;
uvm_flag_t flags;
{
- vm_map_entry_t prev_entry, new_entry;
+ struct vm_map_entry *prev_entry, *new_entry;
vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
UVM_MAXPROTECTION(flags);
vm_inherit_t inherit = UVM_INHERIT(flags);
@@ -523,7 +523,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
*/
if ((prot & maxprot) != prot) {
- UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
+ UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
prot, maxprot,0,0);
return EACCES;
}
@@ -537,7 +537,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
return EAGAIN;
vm_map_lock(map); /* could sleep here */
}
- if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
+ if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
uobj, uoffset, align, flags)) == NULL) {
UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
vm_map_unlock(map);
@@ -559,11 +559,11 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
/*
* if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
- * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
- * either case we want to zero it before storing it in the map entry
+ * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
+ * either case we want to zero it before storing it in the map entry
* (because it looks strange and confusing when debugging...)
- *
- * if uobj is not null
+ *
+ * if uobj is not null
* if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
* and we do not need to change uoffset.
* if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
@@ -589,7 +589,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
* for a stack, but we are currently allocating our stack in advance.
*/
- if ((flags & UVM_FLAG_NOMERGE) == 0 &&
+ if ((flags & UVM_FLAG_NOMERGE) == 0 &&
prev_entry->end == *startp && prev_entry != &map->header &&
prev_entry->object.uvm_obj == uobj) {
@@ -600,7 +600,7 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
if (UVM_ET_ISSUBMAP(prev_entry))
goto step3;
- if (prev_entry->protection != prot ||
+ if (prev_entry->protection != prot ||
prev_entry->max_protection != maxprot)
goto step3;
@@ -610,10 +610,10 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
/* wiring status must match (new area is unwired) */
if (VM_MAPENT_ISWIRED(prev_entry))
- goto step3;
+ goto step3;
/*
- * can't extend a shared amap. note: no need to lock amap to
+ * can't extend a shared amap. note: no need to lock amap to
* look at refs since we don't care about its exact value.
* if it is one (i.e. we have only reference) it will stay there
*/
@@ -652,11 +652,11 @@ step3:
/*
* check for possible forward merge (which we don't do) and count
- * the number of times we missed a *possible* chance to merge more
+ * the number of times we missed a *possible* chance to merge more
*/
if ((flags & UVM_FLAG_NOMERGE) == 0 &&
- prev_entry->next != &map->header &&
+ prev_entry->next != &map->header &&
prev_entry->next->start == (*startp + size))
UVMCNT_INCR(map_forwmerge);
@@ -670,7 +670,7 @@ step3:
new_entry->object.uvm_obj = uobj;
new_entry->offset = uoffset;
- if (uobj)
+ if (uobj)
new_entry->etype = UVM_ET_OBJ;
else
new_entry->etype = 0;
@@ -691,7 +691,7 @@ step3:
* to_add: for BSS we overallocate a little since we
* are likely to extend
*/
- vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
+ vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
new_entry->aref.ar_pageoff = 0;
@@ -700,9 +700,7 @@ step3:
new_entry->aref.ar_pageoff = 0;
new_entry->aref.ar_amap = NULL;
}
-
uvm_map_entry_link(map, prev_entry, new_entry);
-
map->size += size;
/*
@@ -728,12 +726,12 @@ step3:
boolean_t
uvm_map_lookup_entry(map, address, entry)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t address;
- vm_map_entry_t *entry; /* OUT */
+ struct vm_map_entry **entry; /* OUT */
{
- vm_map_entry_t cur;
- vm_map_entry_t last;
+ struct vm_map_entry *cur;
+ struct vm_map_entry *last;
UVMHIST_FUNC("uvm_map_lookup_entry");
UVMHIST_CALLED(maphist);
@@ -754,6 +752,7 @@ uvm_map_lookup_entry(map, address, entry)
UVMCNT_INCR(uvm_mlk_call);
if (address >= cur->start) {
+
/*
* go from hint to end of list.
*
@@ -765,6 +764,7 @@ uvm_map_lookup_entry(map, address, entry)
* at the header, in which case the hint didn't
* buy us anything anyway).
*/
+
last = &map->header;
if ((cur != last) && (cur->end > address)) {
UVMCNT_INCR(uvm_mlk_hint);
@@ -774,9 +774,11 @@ uvm_map_lookup_entry(map, address, entry)
return (TRUE);
}
} else {
+
/*
* go from start to hint, *inclusively*
*/
+
last = cur->next;
cur = map->header.next;
}
@@ -822,9 +824,9 @@ uvm_map_lookup_entry(map, address, entry)
* => note this is a cross between the old vm_map_findspace and vm_map_find
*/
-vm_map_entry_t
+struct vm_map_entry *
uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t hint;
vsize_t length;
vaddr_t *result; /* OUT */
@@ -833,12 +835,12 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
vsize_t align;
int flags;
{
- vm_map_entry_t entry, next, tmp;
+ struct vm_map_entry *entry, *next, *tmp;
vaddr_t end, orig_hint;
UVMHIST_FUNC("uvm_map_findspace");
UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
+ UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
map, hint, length, flags);
KASSERT((align & (align - 1)) == 0);
KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
@@ -869,7 +871,7 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
*/
if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) {
- if ((entry = map->first_free) != &map->header)
+ if ((entry = map->first_free) != &map->header)
hint = entry->end;
} else {
if (uvm_map_lookup_entry(map, hint, &tmp)) {
@@ -890,7 +892,9 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
* note: entry->end = base VA of current gap,
* next->start = VA of end of current gap
*/
+
for (;; hint = (entry = next)->end) {
+
/*
* Find the end of the proposed new region. Be sure we didn't
* go beyond the end of the map, or wrap around the address;
@@ -904,6 +908,7 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
* push hint forward as needed to avoid VAC alias problems.
* we only do this if a valid offset is specified.
*/
+
if ((flags & UVM_FLAG_FIXED) == 0 &&
uoffset != UVM_UNKNOWN_OFFSET)
PMAP_PREFER(uoffset, &hint);
@@ -948,7 +953,7 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
/*
* uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
*
- * => caller must check alignment and size
+ * => caller must check alignment and size
* => map must be locked by caller
* => we return a list of map entries that we've remove from the map
* in "entry_list"
@@ -956,14 +961,13 @@ uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
void
uvm_unmap_remove(map, start, end, entry_list)
- vm_map_t map;
- vaddr_t start,end;
- vm_map_entry_t *entry_list; /* OUT */
+ struct vm_map *map;
+ vaddr_t start, end;
+ struct vm_map_entry **entry_list; /* OUT */
{
- vm_map_entry_t entry, first_entry, next;
+ struct vm_map_entry *entry, *first_entry, *next;
vaddr_t len;
- UVMHIST_FUNC("uvm_unmap_remove");
- UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
map, start, end, 0);
@@ -973,13 +977,13 @@ uvm_unmap_remove(map, start, end, entry_list)
/*
* find first entry
*/
+
if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
/* clip and go... */
entry = first_entry;
UVM_MAP_CLIP_START(map, entry, start);
/* critical! prevents stale hint */
SAVE_HINT(map, entry, entry->prev);
-
} else {
entry = first_entry->next;
}
@@ -1003,13 +1007,14 @@ uvm_unmap_remove(map, start, end, entry_list)
* [3] dropping references may trigger pager I/O, and if we hit
* a pager that does synchronous I/O we may have to wait for it.
* [4] we would like all waiting for I/O to occur with maps unlocked
- * so that we don't block other threads.
+ * so that we don't block other threads.
*/
+
first_entry = NULL;
*entry_list = NULL; /* to be safe */
/*
- * break up the area into map entry sized regions and unmap. note
+ * break up the area into map entry sized regions and unmap. note
* that all mappings have to be removed before we can even consider
* dropping references to amaps or VM objects (otherwise we could end
* up with a mapping to a page on the free list which would be very bad)
@@ -1017,7 +1022,7 @@ uvm_unmap_remove(map, start, end, entry_list)
while ((entry != &map->header) && (entry->start < end)) {
- UVM_MAP_CLIP_END(map, entry, end);
+ UVM_MAP_CLIP_END(map, entry, end);
next = entry->next;
len = entry->end - entry->start;
@@ -1033,6 +1038,7 @@ uvm_unmap_remove(map, start, end, entry_list)
* special case: handle mappings to anonymous kernel objects.
* we want to free these pages right away...
*/
+
if (UVM_ET_ISOBJ(entry) &&
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
KASSERT(vm_map_pmap(map) == pmap_kernel());
@@ -1058,8 +1064,8 @@ uvm_unmap_remove(map, start, end, entry_list)
* doesn't hurt to call uvm_km_pgremove just to be
* safe?]
*
- * uvm_km_pgremove currently does the following:
- * for pages in the kernel object in range:
+ * uvm_km_pgremove currently does the following:
+ * for pages in the kernel object in range:
* - drops the swap slot
* - uvm_pagefree the page
*
@@ -1072,6 +1078,7 @@ uvm_unmap_remove(map, start, end, entry_list)
* from the object. offsets are always relative
* to vm_map_min(kernel_map).
*/
+
if (UVM_OBJ_IS_INTRSAFE_OBJECT(entry->object.uvm_obj)) {
pmap_kremove(entry->start, len);
uvm_km_pgremove_intrsafe(entry->object.uvm_obj,
@@ -1089,20 +1096,24 @@ uvm_unmap_remove(map, start, end, entry_list)
* null out kernel_object reference, we've just
* dropped it
*/
+
entry->etype &= ~UVM_ET_OBJ;
entry->object.uvm_obj = NULL; /* to be safe */
} else {
+
/*
* remove mappings the standard way.
*/
+
pmap_remove(map->pmap, entry->start, entry->end);
}
/*
- * remove entry from map and put it on our list of entries
+ * remove entry from map and put it on our list of entries
* that we've nuked. then go do next entry.
*/
+
UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
/* critical! prevents stale hint */
@@ -1114,10 +1125,11 @@ uvm_unmap_remove(map, start, end, entry_list)
first_entry = entry;
entry = next; /* next entry, please */
}
+ pmap_update();
/*
* now we've cleaned up the map and are ready for the caller to drop
- * references to the mapped objects.
+ * references to the mapped objects.
*/
*entry_list = first_entry;
@@ -1132,17 +1144,17 @@ uvm_unmap_remove(map, start, end, entry_list)
void
uvm_unmap_detach(first_entry, flags)
- vm_map_entry_t first_entry;
+ struct vm_map_entry *first_entry;
int flags;
{
- vm_map_entry_t next_entry;
+ struct vm_map_entry *next_entry;
UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
while (first_entry) {
KASSERT(!VM_MAPENT_ISWIRED(first_entry));
UVMHIST_LOG(maphist,
- " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
- first_entry, first_entry->aref.ar_amap,
+ " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
+ first_entry, first_entry->aref.ar_amap,
first_entry->object.uvm_obj,
UVM_ET_ISSUBMAP(first_entry));
@@ -1166,7 +1178,6 @@ uvm_unmap_detach(first_entry, flags)
first_entry->object.uvm_obj->pgops->
pgo_detach(first_entry->object.uvm_obj);
}
-
next_entry = first_entry->next;
uvm_mapent_free(first_entry);
first_entry = next_entry;
@@ -1178,10 +1189,10 @@ uvm_unmap_detach(first_entry, flags)
* E X T R A C T I O N F U N C T I O N S
*/
-/*
+/*
* uvm_map_reserve: reserve space in a vm_map for future use.
*
- * => we reserve space in a map by putting a dummy map entry in the
+ * => we reserve space in a map by putting a dummy map entry in the
* map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
* => map should be unlocked (we will write lock it)
* => we return true if we were able to reserve space
@@ -1190,13 +1201,13 @@ uvm_unmap_detach(first_entry, flags)
int
uvm_map_reserve(map, size, offset, align, raddr)
- vm_map_t map;
+ struct vm_map *map;
vsize_t size;
vaddr_t offset; /* hint for pmap_prefer */
vsize_t align; /* alignment hint */
vaddr_t *raddr; /* IN:hint, OUT: reserved VA */
{
- UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
map,size,offset,raddr);
@@ -1214,17 +1225,17 @@ uvm_map_reserve(map, size, offset, align, raddr)
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
return (FALSE);
- }
+ }
UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
return (TRUE);
}
/*
- * uvm_map_replace: replace a reserved (blank) area of memory with
+ * uvm_map_replace: replace a reserved (blank) area of memory with
* real mappings.
*
- * => caller must WRITE-LOCK the map
+ * => caller must WRITE-LOCK the map
* => we return TRUE if replacement was a success
* => we expect the newents chain to have nnewents entrys on it and
* we expect newents->prev to point to the last entry on the list
@@ -1235,10 +1246,10 @@ int
uvm_map_replace(map, start, end, newents, nnewents)
struct vm_map *map;
vaddr_t start, end;
- vm_map_entry_t newents;
+ struct vm_map_entry *newents;
int nnewents;
{
- vm_map_entry_t oldent, last;
+ struct vm_map_entry *oldent, *last;
/*
* first find the blank map entry at the specified address
@@ -1252,17 +1263,19 @@ uvm_map_replace(map, start, end, newents, nnewents)
* check to make sure we have a proper blank entry
*/
- if (oldent->start != start || oldent->end != end ||
+ if (oldent->start != start || oldent->end != end ||
oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
return (FALSE);
}
#ifdef DIAGNOSTIC
+
/*
* sanity check the newents chain
*/
+
{
- vm_map_entry_t tmpent = newents;
+ struct vm_map_entry *tmpent = newents;
int nent = 0;
vaddr_t cur = start;
@@ -1296,8 +1309,7 @@ uvm_map_replace(map, start, end, newents, nnewents)
*/
if (newents) {
-
- last = newents->prev; /* we expect this */
+ last = newents->prev;
/* critical: flush stale hints out of map */
SAVE_HINT(map, map->hint, newents);
@@ -1350,15 +1362,15 @@ uvm_map_replace(map, start, end, newents, nnewents)
int
uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
- vm_map_t srcmap, dstmap;
+ struct vm_map *srcmap, *dstmap;
vaddr_t start, *dstaddrp;
vsize_t len;
int flags;
{
vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge,
oldstart;
- vm_map_entry_t chain, endchain, entry, orig_entry, newentry, deadentry;
- vm_map_entry_t oldentry;
+ struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
+ *deadentry, *oldentry;
vsize_t elen;
int nchain, error, copy_ok;
UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
@@ -1388,7 +1400,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
/*
- * step 2: setup for the extraction process loop by init'ing the
+ * step 2: setup for the extraction process loop by init'ing the
* map entry chain, locking src map, and looking up the first useful
* entry in the map.
*/
@@ -1498,8 +1510,8 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
newentry->offset = 0;
}
newentry->etype = entry->etype;
- newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
- entry->max_protection : entry->protection;
+ newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
+ entry->max_protection : entry->protection;
newentry->max_protection = entry->max_protection;
newentry->inheritance = entry->inheritance;
newentry->wired_count = 0;
@@ -1524,7 +1536,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
}
/* end of 'while' loop! */
- if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
+ if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
(entry->next == &srcmap->header ||
entry->next->start != entry->end)) {
error = EINVAL;
@@ -1543,7 +1555,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
/*
* step 5: attempt to lock the dest map so we can pmap_copy.
- * note usage of copy_ok:
+ * note usage of copy_ok:
* 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
* 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
*/
@@ -1594,7 +1606,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
/* we advance "entry" in the following if statement */
if (flags & UVM_EXTRACT_REMOVE) {
- pmap_remove(srcmap->pmap, entry->start,
+ pmap_remove(srcmap->pmap, entry->start,
entry->end);
oldentry = entry; /* save entry */
entry = entry->next; /* advance */
@@ -1609,6 +1621,7 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
/* end of 'while' loop */
fudge = 0;
}
+ pmap_update();
/*
* unlock dstmap. we will dispose of deadentry in
@@ -1618,9 +1631,9 @@ uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
if (copy_ok && srcmap != dstmap)
vm_map_unlock(dstmap);
+ } else {
+ deadentry = NULL;
}
- else
- deadentry = NULL; /* XXX: gcc */
/*
* step 7: we are done with the source map, unlock. if copy_ok
@@ -1671,7 +1684,7 @@ bad2: /* src already unlocked */
* call [with uobj==NULL] to create a blank map entry in the main map.
* [And it had better still be blank!]
* => maps which contain submaps should never be copied or forked.
- * => to remove a submap, use uvm_unmap() on the main map
+ * => to remove a submap, use uvm_unmap() on the main map
* and then uvm_map_deallocate() the submap.
* => main map must be unlocked.
* => submap must have been init'd and have a zero reference count.
@@ -1680,10 +1693,10 @@ bad2: /* src already unlocked */
int
uvm_map_submap(map, start, end, submap)
- vm_map_t map, submap;
+ struct vm_map *map, *submap;
vaddr_t start, end;
{
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
int error;
vm_map_lock(map);
@@ -1697,7 +1710,7 @@ uvm_map_submap(map, start, end, submap)
entry = NULL;
}
- if (entry != NULL &&
+ if (entry != NULL &&
entry->start == start && entry->end == end &&
entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
!UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
@@ -1726,12 +1739,12 @@ uvm_map_submap(map, start, end, submap)
int
uvm_map_protect(map, start, end, new_prot, set_max)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
vm_prot_t new_prot;
boolean_t set_max;
{
- vm_map_entry_t current, entry;
+ struct vm_map_entry *current, *entry;
int error = 0;
UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
@@ -1777,7 +1790,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
current->protection = new_prot;
/*
- * update physical map if necessary. worry about copy-on-write
+ * update physical map if necessary. worry about copy-on-write
* here -- CHECK THIS XXX
*/
@@ -1800,6 +1813,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
if (uvm_map_pageable(map, entry->start,
entry->end, FALSE,
UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
+
/*
* If locking the entry fails, remember the
* error if it's the first one. Note we
@@ -1813,12 +1827,13 @@ uvm_map_protect(map, start, end, new_prot, set_max)
* XXX what uvm_map_protect() itself would
* XXX normally return.
*/
+
error = ENOMEM;
}
}
-
current = current->next;
}
+ pmap_update();
out:
vm_map_unlock(map);
@@ -1828,7 +1843,7 @@ uvm_map_protect(map, start, end, new_prot, set_max)
#undef MASK
-/*
+/*
* uvm_map_inherit: set inheritance code for range of addrs in map.
*
* => map must be unlocked
@@ -1838,12 +1853,12 @@ uvm_map_protect(map, start, end, new_prot, set_max)
int
uvm_map_inherit(map, start, end, new_inheritance)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start;
vaddr_t end;
vm_inherit_t new_inheritance;
{
- vm_map_entry_t entry, temp_entry;
+ struct vm_map_entry *entry, *temp_entry;
UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
map, start, end, new_inheritance);
@@ -1858,10 +1873,8 @@ uvm_map_inherit(map, start, end, new_inheritance)
return EINVAL;
}
- vm_map_lock(map);
-
+ vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
-
if (uvm_map_lookup_entry(map, start, &temp_entry)) {
entry = temp_entry;
UVM_MAP_CLIP_START(map, entry, start);
@@ -1874,13 +1887,12 @@ uvm_map_inherit(map, start, end, new_inheritance)
entry->inheritance = new_inheritance;
entry = entry->next;
}
-
vm_map_unlock(map);
UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
return 0;
}
-/*
+/*
* uvm_map_advice: set advice code for range of addrs in map.
*
* => map must be unlocked
@@ -1888,12 +1900,12 @@ uvm_map_inherit(map, start, end, new_inheritance)
int
uvm_map_advice(map, start, end, new_advice)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start;
vaddr_t end;
int new_advice;
{
- vm_map_entry_t entry, temp_entry;
+ struct vm_map_entry *entry, *temp_entry;
UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
map, start, end, new_advice);
@@ -1951,12 +1963,12 @@ uvm_map_advice(map, start, end, new_advice)
int
uvm_map_pageable(map, start, end, new_pageable, lockflags)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
boolean_t new_pageable;
int lockflags;
{
- vm_map_entry_t entry, start_entry, failed_entry;
+ struct vm_map_entry *entry, *start_entry, *failed_entry;
int rv;
#ifdef DIAGNOSTIC
u_int timestamp_save;
@@ -1970,12 +1982,12 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
- /*
+ /*
* only one pageability change may take place at one time, since
* uvm_fault_wire assumes it will be called only once for each
* wiring/unwiring. therefore, we have to make sure we're actually
* changing the pageability for the entire region. we do so before
- * making any changes.
+ * making any changes.
*/
if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
@@ -1987,7 +1999,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
}
entry = start_entry;
- /*
+ /*
* handle wiring and unwiring separately.
*/
@@ -1996,7 +2008,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
/*
* unwiring. first ensure that the range to be unwired is
- * really wired down and that there are no holes.
+ * really wired down and that there are no holes.
*/
while ((entry != &map->header) && (entry->start < end)) {
@@ -2012,7 +2024,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
entry = entry->next;
}
- /*
+ /*
* POSIX 1003.1b - a single munlock call unlocks a region,
* regardless of the number of mlock calls made on that
* region.
@@ -2036,7 +2048,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
*
* 1: holding the write lock, we create any anonymous maps that need
* to be created. then we clip each map entry to the region to
- * be wired and increment its wiring count.
+ * be wired and increment its wiring count.
*
* 2: we downgrade to a read lock, and call uvm_fault_wire to fault
* in the pages for any newly wired area (wired_count == 1).
@@ -2064,11 +2076,11 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
*/
if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
- if (UVM_ET_ISNEEDSCOPY(entry) &&
+ if (UVM_ET_ISNEEDSCOPY(entry) &&
((entry->protection & VM_PROT_WRITE) ||
(entry->object.uvm_obj == NULL))) {
amap_copy(map, entry, M_WAITOK, TRUE,
- start, end);
+ start, end);
/* XXXCDC: wait OK? */
}
}
@@ -2078,7 +2090,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
entry->wired_count++;
/*
- * Check for holes
+ * Check for holes
*/
if (entry->protection == VM_PROT_NONE ||
@@ -2088,7 +2100,7 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
/*
* found one. amap creation actions do not need to
- * be undone, but the wired counts need to be restored.
+ * be undone, but the wired counts need to be restored.
*/
while (entry != &map->header && entry->end > start) {
@@ -2205,11 +2217,11 @@ uvm_map_pageable(map, start, end, new_pageable, lockflags)
int
uvm_map_pageable_all(map, flags, limit)
- vm_map_t map;
+ struct vm_map *map;
int flags;
vsize_t limit;
{
- vm_map_entry_t entry, failed_entry;
+ struct vm_map_entry *entry, *failed_entry;
vsize_t size;
int rv;
#ifdef DIAGNOSTIC
@@ -2227,10 +2239,12 @@ uvm_map_pageable_all(map, flags, limit)
*/
if (flags == 0) { /* unwire */
+
/*
* POSIX 1003.1b -- munlockall unlocks all regions,
* regardless of how many times mlockall has been called.
*/
+
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
if (VM_MAPENT_ISWIRED(entry))
@@ -2240,23 +2254,23 @@ uvm_map_pageable_all(map, flags, limit)
vm_map_unlock(map);
UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
return 0;
-
- /*
- * end of unwire case!
- */
}
if (flags & MCL_FUTURE) {
+
/*
* must wire all future mappings; remember this.
*/
+
vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
}
if ((flags & MCL_CURRENT) == 0) {
+
/*
* no more work to do!
*/
+
UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
vm_map_unlock(map);
return 0;
@@ -2316,6 +2330,7 @@ uvm_map_pageable_all(map, flags, limit)
if (entry->protection == VM_PROT_NONE)
continue;
if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
+
/*
* perform actions of vm_map_lookup that need the
* write lock on the map: create an anonymous map
@@ -2323,8 +2338,9 @@ uvm_map_pageable_all(map, flags, limit)
* for a zero-fill region. (XXXCDC: submap case
* ok?)
*/
+
if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
- if (UVM_ET_ISNEEDSCOPY(entry) &&
+ if (UVM_ET_ISNEEDSCOPY(entry) &&
((entry->protection & VM_PROT_WRITE) ||
(entry->object.uvm_obj == NULL))) {
amap_copy(map, entry, M_WAITOK, TRUE,
@@ -2353,20 +2369,24 @@ uvm_map_pageable_all(map, flags, limit)
rv = uvm_fault_wire(map, entry->start, entry->end,
entry->protection);
if (rv) {
+
/*
* wiring failed. break out of the loop.
* we'll clean up the map below, once we
* have a write lock again.
*/
+
break;
}
}
}
- if (rv) { /* failed? */
+ if (rv) {
+
/*
* Get back an exclusive (write) lock.
*/
+
vm_map_upgrade(map);
vm_map_unbusy(map);
@@ -2381,6 +2401,7 @@ uvm_map_pageable_all(map, flags, limit)
*
* Skip VM_PROT_NONE entries like we did above.
*/
+
failed_entry = entry;
for (/* nothing */; entry != &map->header;
entry = entry->next) {
@@ -2395,6 +2416,7 @@ uvm_map_pageable_all(map, flags, limit)
*
* Skip VM_PROT_NONE entries like we did above.
*/
+
for (entry = map->header.next; entry != failed_entry;
entry = entry->next) {
if (entry->protection == VM_PROT_NONE)
@@ -2425,7 +2447,7 @@ uvm_map_pageable_all(map, flags, limit)
* if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
* if (flags & PGO_FREE): any cached pages are freed after clean
* => returns an error if any part of the specified range isn't mapped
- * => never a need to flush amap layer since the anonymous memory has
+ * => never a need to flush amap layer since the anonymous memory has
* no permanent home, but may deactivate pages there
* => called from sys_msync() and sys_madvise()
* => caller must not write-lock map (read OK).
@@ -2434,11 +2456,11 @@ uvm_map_pageable_all(map, flags, limit)
int
uvm_map_clean(map, start, end, flags)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
int flags;
{
- vm_map_entry_t current, entry;
+ struct vm_map_entry *current, *entry;
struct uvm_object *uobj;
struct vm_amap *amap;
struct vm_anon *anon;
@@ -2602,7 +2624,7 @@ uvm_map_clean(map, start, end, flags)
start += size;
}
vm_map_unlock_read(map);
- return (error);
+ return (error);
}
@@ -2615,12 +2637,12 @@ uvm_map_clean(map, start, end, flags)
boolean_t
uvm_map_checkprot(map, start, end, protection)
- vm_map_t map;
- vaddr_t start, end;
- vm_prot_t protection;
+ struct vm_map * map;
+ vaddr_t start, end;
+ vm_prot_t protection;
{
- vm_map_entry_t entry;
- vm_map_entry_t tmp_entry;
+ struct vm_map_entry *entry;
+ struct vm_map_entry *tmp_entry;
if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
return(FALSE);
@@ -2646,9 +2668,6 @@ uvm_map_checkprot(map, start, end, protection)
if ((entry->protection & protection) != protection) {
return(FALSE);
}
-
- /* go to next entry */
-
start = entry->end;
entry = entry->next;
}
@@ -2725,7 +2744,7 @@ uvmspace_share(p1, p2)
void
uvmspace_unshare(p)
- struct proc *p;
+ struct proc *p;
{
struct vmspace *nvm, *ovm = p->p_vmspace;
@@ -2737,7 +2756,7 @@ uvmspace_unshare(p)
nvm = uvmspace_fork(ovm);
pmap_deactivate(p); /* unbind old vmspace */
- p->p_vmspace = nvm;
+ p->p_vmspace = nvm;
pmap_activate(p); /* switch to new vmspace */
uvmspace_free(ovm); /* drop reference to old vmspace */
@@ -2755,7 +2774,7 @@ uvmspace_exec(p, start, end)
vaddr_t start, end;
{
struct vmspace *nvm, *ovm = p->p_vmspace;
- vm_map_t map = &ovm->vm_map;
+ struct vm_map *map = &ovm->vm_map;
#ifdef __sparc__
/* XXX cgd 960926: the sparc #ifdef should be a MD hook */
@@ -2777,6 +2796,7 @@ uvmspace_exec(p, start, end)
/*
* SYSV SHM semantics require us to kill all segments on an exec
*/
+
if (ovm->vm_shm)
shmexit(ovm);
#endif
@@ -2785,6 +2805,7 @@ uvmspace_exec(p, start, end)
* POSIX 1003.1b -- "lock future mappings" is revoked
* when a process execs another program image.
*/
+
vm_map_lock(map);
vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
vm_map_unlock(map);
@@ -2792,11 +2813,13 @@ uvmspace_exec(p, start, end)
/*
* now unmap the old program
*/
+
uvm_unmap(map, map->min_offset, map->max_offset);
/*
* resize the map
*/
+
vm_map_lock(map);
map->min_offset = start;
map->max_offset = end;
@@ -2808,6 +2831,7 @@ uvmspace_exec(p, start, end)
* it is still being used for others. allocate a new vmspace
* for p
*/
+
nvm = uvmspace_alloc(start, end,
(map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE);
@@ -2833,16 +2857,18 @@ void
uvmspace_free(vm)
struct vmspace *vm;
{
- vm_map_entry_t dead_entries;
+ struct vm_map_entry *dead_entries;
UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
if (--vm->vm_refcnt == 0) {
+
/*
* lock the map, to wait out all other references to it. delete
* all of the mappings and pages they hold, then call the pmap
* module to reclaim anything left.
*/
+
#ifdef SYSVSHM
/* Get rid of any SYSV shared memory segments. */
if (vm->vm_shm != NULL)
@@ -2878,12 +2904,12 @@ uvmspace_fork(vm1)
struct vmspace *vm1;
{
struct vmspace *vm2;
- vm_map_t old_map = &vm1->vm_map;
- vm_map_t new_map;
- vm_map_entry_t old_entry;
- vm_map_entry_t new_entry;
- pmap_t new_pmap;
- boolean_t protect_child;
+ struct vm_map *old_map = &vm1->vm_map;
+ struct vm_map *new_map;
+ struct vm_map_entry *old_entry;
+ struct vm_map_entry *new_entry;
+ pmap_t new_pmap;
+ boolean_t protect_child;
UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
vm_map_lock(old_map);
@@ -2906,23 +2932,26 @@ uvmspace_fork(vm1)
/*
* first, some sanity checks on the old entry
*/
+
KASSERT(!UVM_ET_ISSUBMAP(old_entry));
KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
!UVM_ET_ISNEEDSCOPY(old_entry));
switch (old_entry->inheritance) {
case MAP_INHERIT_NONE:
+
/*
* drop the mapping
*/
+
break;
case MAP_INHERIT_SHARE:
+
/*
* share the mapping: this means we want the old and
* new entries to share amaps and backing objects.
*/
-
/*
* if the old_entry needs a new amap (due to prev fork)
* then we need to allocate it now so that we have
@@ -2933,7 +2962,7 @@ uvmspace_fork(vm1)
if (UVM_ET_ISNEEDSCOPY(old_entry)) {
/* get our own amap, clears needs_copy */
amap_copy(old_map, old_entry, M_WAITOK, FALSE,
- 0, 0);
+ 0, 0);
/* XXXCDC: WAITOK??? */
}
@@ -2948,8 +2977,8 @@ uvmspace_fork(vm1)
* gain reference to object backing the map (can't
* be a submap, already checked this case).
*/
+
if (new_entry->aref.ar_amap)
- /* share reference */
uvm_map_reference_amap(new_entry, AMAP_SHARED);
if (new_entry->object.uvm_obj &&
@@ -2962,7 +2991,7 @@ uvmspace_fork(vm1)
uvm_map_entry_link(new_map, new_map->header.prev,
new_entry);
- /*
+ /*
* pmap_copy the mappings: this routine is optional
* but if it is there it will reduce the number of
* page faults in the new proc.
@@ -2980,7 +3009,7 @@ uvmspace_fork(vm1)
* copy-on-write the mapping (using mmap's
* MAP_PRIVATE semantics)
*
- * allocate new_entry, adjust reference counts.
+ * allocate new_entry, adjust reference counts.
* (note that new references are read-only).
*/
@@ -3016,20 +3045,20 @@ uvmspace_fork(vm1)
* conditions hold:
* 1. the old entry has an amap and that amap is
* being shared. this means that the old (parent)
- * process is sharing the amap with another
+ * process is sharing the amap with another
* process. if we do not clear needs_copy here
* we will end up in a situation where both the
* parent and child process are refering to the
- * same amap with "needs_copy" set. if the
+ * same amap with "needs_copy" set. if the
* parent write-faults, the fault routine will
* clear "needs_copy" in the parent by allocating
- * a new amap. this is wrong because the
+ * a new amap. this is wrong because the
* parent is supposed to be sharing the old amap
* and the new amap will break that.
*
* 2. if the old entry has an amap and a non-zero
* wire count then we are going to have to call
- * amap_cow_now to avoid page faults in the
+ * amap_cow_now to avoid page faults in the
* parent process. since amap_cow_now requires
* "needs_copy" to be clear we might as well
* clear it here as well.
@@ -3037,15 +3066,14 @@ uvmspace_fork(vm1)
*/
if (old_entry->aref.ar_amap != NULL) {
+ if ((amap_flags(old_entry->aref.ar_amap) &
+ AMAP_SHARED) != 0 ||
+ VM_MAPENT_ISWIRED(old_entry)) {
- if ((amap_flags(old_entry->aref.ar_amap) &
- AMAP_SHARED) != 0 ||
- VM_MAPENT_ISWIRED(old_entry)) {
-
- amap_copy(new_map, new_entry, M_WAITOK, FALSE,
- 0, 0);
- /* XXXCDC: M_WAITOK ... ok? */
- }
+ amap_copy(new_map, new_entry, M_WAITOK,
+ FALSE, 0, 0);
+ /* XXXCDC: M_WAITOK ... ok? */
+ }
}
/*
@@ -3061,9 +3089,9 @@ uvmspace_fork(vm1)
if (VM_MAPENT_ISWIRED(old_entry)) {
- /*
+ /*
* resolve all copy-on-write faults now
- * (note that there is nothing to do if
+ * (note that there is nothing to do if
* the old mapping does not have an amap).
* XXX: is it worthwhile to bother with pmap_copy
* in this case?
@@ -3071,7 +3099,7 @@ uvmspace_fork(vm1)
if (old_entry->aref.ar_amap)
amap_cow_now(new_map, new_entry);
- } else {
+ } else {
/*
* setup mappings to trigger copy-on-write faults
@@ -3099,6 +3127,7 @@ uvmspace_fork(vm1)
old_entry->end,
old_entry->protection &
~VM_PROT_WRITE);
+ pmap_update();
}
old_entry->etype |= UVM_ET_NEEDSCOPY;
}
@@ -3110,7 +3139,7 @@ uvmspace_fork(vm1)
} else {
/*
- * we only need to protect the child if the
+ * we only need to protect the child if the
* parent has write access.
*/
if (old_entry->max_protection & VM_PROT_WRITE)
@@ -3135,9 +3164,10 @@ uvmspace_fork(vm1)
*/
if (protect_child) {
pmap_protect(new_pmap, new_entry->start,
- new_entry->end,
- new_entry->protection &
+ new_entry->end,
+ new_entry->protection &
~VM_PROT_WRITE);
+ pmap_update();
}
}
@@ -3147,7 +3177,7 @@ uvmspace_fork(vm1)
}
new_map->size = old_map->size;
- vm_map_unlock(old_map);
+ vm_map_unlock(old_map);
#ifdef SYSVSHM
if (vm1->vm_shm)
@@ -3159,7 +3189,7 @@ uvmspace_fork(vm1)
#endif
UVMHIST_LOG(maphist,"<- done",0,0,0,0);
- return(vm2);
+ return(vm2);
}
@@ -3175,11 +3205,11 @@ uvmspace_fork(vm1)
void
uvm_map_printit(map, full, pr)
- vm_map_t map;
+ struct vm_map *map;
boolean_t full;
int (*pr) __P((const char *, ...));
{
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
(*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
(*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
@@ -3204,12 +3234,12 @@ uvm_map_printit(map, full, pr)
"\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
"wc=%d, adv=%d\n",
(entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
- (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
+ (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
(entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
entry->protection, entry->max_protection,
entry->inheritance, entry->wired_count, entry->advice);
}
-}
+}
/*
* uvm_object_printit: actually prints the object
@@ -3246,7 +3276,7 @@ uvm_object_printit(uobj, full, pr)
if ((cnt % 3) != 2) {
(*pr)("\n");
}
-}
+}
/*
* uvm_page_printit: actually print the page
@@ -3318,11 +3348,11 @@ uvm_page_printit(pg, full, pr)
/* cross-verify page queue */
if (pg->pqflags & PQ_FREE) {
int fl = uvm_page_lookup_freelist(pg);
- pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
- PGFL_ZEROS : PGFL_UNKNOWN];
+ int color = VM_PGCOLOR_BUCKET(pg);
+ pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
+ ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
} else if (pg->pqflags & PQ_INACTIVE) {
- pgl = (pg->pqflags & PQ_SWAPBACKED) ?
- &uvm.page_inactive_swp : &uvm.page_inactive_obj;
+ pgl = &uvm.page_inactive;
} else if (pg->pqflags & PQ_ACTIVE) {
pgl = &uvm.page_active;
} else {
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index fa135af21d5..d16b48b4509 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_map.h,v 1.17 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_map.h,v 1.25 2001/03/15 06:10:57 chs Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.18 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_map.h,v 1.29 2001/06/26 17:55:15 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -83,7 +83,7 @@
/*
* UVM_MAP_CLIP_START: ensure that the entry begins at or after
* the starting address, if it doesn't we split the entry.
- *
+ *
* => map must be locked by caller
*/
@@ -113,26 +113,6 @@
#include <uvm/uvm_anon.h>
/*
- * types defined:
- *
- * vm_map_t the high-level address map data structure.
- * vm_map_entry_t an entry in an address map.
- * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
- */
-
-/*
- * Objects which live in maps may be either VM objects, or another map
- * (called a "sharing map") which denotes read-write sharing with other maps.
- *
- * XXXCDC: private pager data goes here now
- */
-
-union vm_map_object {
- struct uvm_object *uvm_obj; /* UVM OBJECT */
- struct vm_map *sub_map; /* belongs to another map */
-};
-
-/*
* Address map entries consist of start and end addresses,
* a VM object (or sharing map) and offset into that object,
* and user-exported inheritance and protection information.
@@ -143,7 +123,10 @@ struct vm_map_entry {
struct vm_map_entry *next; /* next entry */
vaddr_t start; /* start address */
vaddr_t end; /* end address */
- union vm_map_object object; /* object I point to */
+ union {
+ struct uvm_object *uvm_obj; /* uvm object */
+ struct vm_map *sub_map; /* belongs to another map */
+ } object; /* object I point to */
voff_t offset; /* offset into object */
int etype; /* entry type */
vm_prot_t protection; /* protection code */
@@ -215,17 +198,17 @@ struct vm_map_entry {
*/
struct vm_map {
struct pmap * pmap; /* Physical map */
- lock_data_t lock; /* Lock for map data */
+ struct lock lock; /* Lock for map data */
struct vm_map_entry header; /* List of entries */
int nentries; /* Number of entries */
vsize_t size; /* virtual size */
int ref_count; /* Reference count */
- simple_lock_data_t ref_lock; /* Lock for ref_count field */
- vm_map_entry_t hint; /* hint for quick lookups */
- simple_lock_data_t hint_lock; /* lock for hint storage */
- vm_map_entry_t first_free; /* First free space hint */
+ struct simplelock ref_lock; /* Lock for ref_count field */
+ struct vm_map_entry * hint; /* hint for quick lookups */
+ struct simplelock hint_lock; /* lock for hint storage */
+ struct vm_map_entry * first_free; /* First free space hint */
int flags; /* flags */
- simple_lock_data_t flags_lock; /* Lock for flags field */
+ struct simplelock flags_lock; /* Lock for flags field */
unsigned int timestamp; /* Version number */
#define min_offset header.start
#define max_offset header.end
@@ -258,49 +241,12 @@ do { \
#endif /* _KERNEL */
/*
- * Interrupt-safe maps must also be kept on a special list,
- * to assist uvm_fault() in avoiding locking problems.
- */
-struct vm_map_intrsafe {
- struct vm_map vmi_map;
- LIST_ENTRY(vm_map_intrsafe) vmi_list;
-};
-
-LIST_HEAD(vmi_list, vm_map_intrsafe);
-#ifdef _KERNEL
-extern simple_lock_data_t vmi_list_slock;
-extern struct vmi_list vmi_list;
-
-static __inline int vmi_list_lock __P((void));
-static __inline void vmi_list_unlock __P((int));
-
-static __inline int
-vmi_list_lock()
-{
- int s;
-
- s = splhigh();
- simple_lock(&vmi_list_slock);
- return (s);
-}
-
-static __inline void
-vmi_list_unlock(s)
- int s;
-{
-
- simple_unlock(&vmi_list_slock);
- splx(s);
-}
-#endif /* _KERNEL */
-
-/*
* handle inline options
*/
#ifdef UVM_MAP_INLINE
#define MAP_INLINE static __inline
-#else
+#else
#define MAP_INLINE /* nothing */
#endif /* UVM_MAP_INLINE */
@@ -319,34 +265,39 @@ extern vaddr_t uvm_maxkaddr;
*/
MAP_INLINE
-void uvm_map_deallocate __P((vm_map_t));
+void uvm_map_deallocate __P((struct vm_map *));
-int uvm_map_clean __P((vm_map_t, vaddr_t, vaddr_t, int));
-void uvm_map_clip_start __P((vm_map_t, vm_map_entry_t, vaddr_t));
-void uvm_map_clip_end __P((vm_map_t, vm_map_entry_t, vaddr_t));
+int uvm_map_clean __P((struct vm_map *, vaddr_t, vaddr_t, int));
+void uvm_map_clip_start __P((struct vm_map *, struct vm_map_entry *,
+ vaddr_t));
+void uvm_map_clip_end __P((struct vm_map *, struct vm_map_entry *,
+ vaddr_t));
MAP_INLINE
-vm_map_t uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int));
-int uvm_map_extract __P((vm_map_t, vaddr_t, vsize_t,
- vm_map_t, vaddr_t *, int));
-vm_map_entry_t uvm_map_findspace __P((vm_map_t, vaddr_t, vsize_t, vaddr_t *,
- struct uvm_object *, voff_t, vsize_t, int));
-int uvm_map_inherit __P((vm_map_t, vaddr_t, vaddr_t, vm_inherit_t));
-int uvm_map_advice __P((vm_map_t, vaddr_t, vaddr_t, int));
+struct vm_map *uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int));
+int uvm_map_extract __P((struct vm_map *, vaddr_t, vsize_t,
+ struct vm_map *, vaddr_t *, int));
+struct vm_map_entry *uvm_map_findspace __P((struct vm_map *, vaddr_t, vsize_t,
+ vaddr_t *, struct uvm_object *, voff_t, vsize_t, int));
+int uvm_map_inherit __P((struct vm_map *, vaddr_t, vaddr_t,
+ vm_inherit_t));
+int uvm_map_advice __P((struct vm_map *, vaddr_t, vaddr_t, int));
void uvm_map_init __P((void));
-boolean_t uvm_map_lookup_entry __P((vm_map_t, vaddr_t, vm_map_entry_t *));
+boolean_t uvm_map_lookup_entry __P((struct vm_map *, vaddr_t,
+ struct vm_map_entry **));
MAP_INLINE
-void uvm_map_reference __P((vm_map_t));
-int uvm_map_replace __P((vm_map_t, vaddr_t, vaddr_t,
- vm_map_entry_t, int));
-int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vsize_t,
- vaddr_t *));
-void uvm_map_setup __P((vm_map_t, vaddr_t, vaddr_t, int));
-int uvm_map_submap __P((vm_map_t, vaddr_t, vaddr_t, vm_map_t));
+void uvm_map_reference __P((struct vm_map *));
+int uvm_map_replace __P((struct vm_map *, vaddr_t, vaddr_t,
+ struct vm_map_entry *, int));
+int uvm_map_reserve __P((struct vm_map *, vsize_t, vaddr_t, vsize_t,
+ vaddr_t *));
+void uvm_map_setup __P((struct vm_map *, vaddr_t, vaddr_t, int));
+int uvm_map_submap __P((struct vm_map *, vaddr_t, vaddr_t,
+ struct vm_map *));
MAP_INLINE
-void uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t));
-void uvm_unmap_detach __P((vm_map_entry_t,int));
-void uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t,
- vm_map_entry_t *));
+void uvm_unmap __P((struct vm_map *, vaddr_t, vaddr_t));
+void uvm_unmap_detach __P((struct vm_map_entry *,int));
+void uvm_unmap_remove __P((struct vm_map *, vaddr_t, vaddr_t,
+ struct vm_map_entry **));
#endif /* _KERNEL */
@@ -384,13 +335,13 @@ void uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t,
#include <sys/proc.h> /* for tsleep(), wakeup() */
#include <sys/systm.h> /* for panic() */
-static __inline boolean_t vm_map_lock_try __P((vm_map_t));
-static __inline void vm_map_lock __P((vm_map_t));
+static __inline boolean_t vm_map_lock_try __P((struct vm_map *));
+static __inline void vm_map_lock __P((struct vm_map *));
extern const char vmmapbsy[];
static __inline boolean_t
vm_map_lock_try(map)
- vm_map_t map;
+ struct vm_map *map;
{
boolean_t rv;
@@ -414,7 +365,7 @@ vm_map_lock_try(map)
static __inline void
vm_map_lock(map)
- vm_map_t map;
+ struct vm_map *map;
{
int error;
diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h
index 2f4578f2a22..069cbd5f125 100644
--- a/sys/uvm/uvm_map_i.h
+++ b/sys/uvm/uvm_map_i.h
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_map_i.h,v 1.10 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_map_i.h,v 1.19 2001/03/15 06:10:57 chs Exp $ */
+/* $OpenBSD: uvm_map_i.h,v 1.11 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_map_i.h,v 1.22 2001/06/26 17:55:15 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -84,17 +84,15 @@
* uvm_map_create: create map
*/
-MAP_INLINE vm_map_t
+MAP_INLINE struct vm_map *
uvm_map_create(pmap, min, max, flags)
pmap_t pmap;
vaddr_t min, max;
int flags;
{
- vm_map_t result;
+ struct vm_map *result;
- MALLOC(result, vm_map_t,
- (flags & VM_MAP_INTRSAFE) ? sizeof(struct vm_map_intrsafe) :
- sizeof(struct vm_map),
+ MALLOC(result, struct vm_map *, sizeof(struct vm_map),
M_VMMAP, M_WAITOK);
uvm_map_setup(result, min, max, flags);
result->pmap = pmap;
@@ -109,7 +107,7 @@ uvm_map_create(pmap, min, max, flags)
MAP_INLINE void
uvm_map_setup(map, min, max, flags)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t min, max;
int flags;
{
@@ -128,23 +126,6 @@ uvm_map_setup(map, min, max, flags)
simple_lock_init(&map->ref_lock);
simple_lock_init(&map->hint_lock);
simple_lock_init(&map->flags_lock);
-
- /*
- * If the map is interrupt safe, place it on the list
- * of interrupt safe maps, for uvm_fault().
- *
- * We almost never set up an interrupt-safe map, but we set
- * up quite a few regular ones (at every fork!), so put
- * interrupt-safe map setup in the slow path.
- */
- if (__predict_false(flags & VM_MAP_INTRSAFE)) {
- struct vm_map_intrsafe *vmi = (struct vm_map_intrsafe *)map;
- int s;
-
- s = vmi_list_lock();
- LIST_INSERT_HEAD(&vmi_list, vmi, vmi_list);
- vmi_list_unlock(s);
- }
}
@@ -155,16 +136,16 @@ uvm_map_setup(map, min, max, flags)
/*
* uvm_unmap: remove mappings from a vm_map (from "start" up to "stop")
*
- * => caller must check alignment and size
+ * => caller must check alignment and size
* => map must be unlocked (we will lock it)
*/
MAP_INLINE void
uvm_unmap(map, start, end)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start,end;
{
- vm_map_entry_t dead_entries;
+ struct vm_map_entry *dead_entries;
UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
@@ -192,10 +173,10 @@ uvm_unmap(map, start, end)
MAP_INLINE void
uvm_map_reference(map)
- vm_map_t map;
+ struct vm_map *map;
{
simple_lock(&map->ref_lock);
- map->ref_count++;
+ map->ref_count++;
simple_unlock(&map->ref_lock);
}
@@ -208,7 +189,7 @@ uvm_map_reference(map)
MAP_INLINE void
uvm_map_deallocate(map)
- vm_map_t map;
+ struct vm_map *map;
{
int c;
diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c
index 04bb3b3a2e0..f2d1a3c2c05 100644
--- a/sys/uvm/uvm_meter.c
+++ b/sys/uvm/uvm_meter.c
@@ -1,10 +1,10 @@
-/* $OpenBSD: uvm_meter.c,v 1.15 2001/11/12 01:26:09 art Exp $ */
-/* $NetBSD: uvm_meter.c,v 1.17 2001/03/09 01:02:12 chs Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.16 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1982, 1986, 1989, 1993
- * The Regents of the University of California.
+ * The Regents of the University of California.
*
* All rights reserved.
*
@@ -19,7 +19,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, and the University of California, Berkeley
+ * Washington University, and the University of California, Berkeley
* and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -62,7 +62,7 @@ int maxslp = MAXSLP; /* patchable ... */
struct loadavg averunnable;
/*
- * constants for averages over 1, 5, and 15 minutes when sampling at
+ * constants for averages over 1, 5, and 15 minutes when sampling at
* 5 second intervals.
*/
@@ -91,7 +91,7 @@ uvm_meter()
}
/*
- * uvm_loadav: compute a tenex style load average of a quantity on
+ * uvm_loadav: compute a tenex style load average of a quantity on
* 1, 5, and 15 minute internvals.
*/
static void
@@ -208,6 +208,13 @@ uvm_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
uvmexp.vnodeminpct = t;
uvmexp.vnodemin = t * 256 / 100;
return rv;
+
+ case VM_MAXSLP:
+ return (sysctl_rdint(oldp, oldlenp, newp, maxslp));
+
+ case VM_USPACE:
+ return (sysctl_rdint(oldp, oldlenp, newp, USPACE));
+
default:
return (EOPNOTSUPP);
}
@@ -223,8 +230,8 @@ uvm_total(totalp)
{
struct proc *p;
#if 0
- vm_map_entry_t entry;
- vm_map_t map;
+ struct vm_map_entry * entry;
+ struct vm_map *map;
int paging;
#endif
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 32203733178..1543303d5aa 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,11 +1,11 @@
-/* $OpenBSD: uvm_mmap.c,v 1.29 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_mmap.c,v 1.50 2001/03/15 06:10:57 chs Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.30 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_mmap.c,v 1.54 2001/06/14 20:32:49 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993 The Regents of the University of California.
+ * Copyright (c) 1991, 1993 The Regents of the University of California.
* Copyright (c) 1988 University of Utah.
- *
+ *
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
@@ -23,7 +23,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Charles D. Cranor,
- * Washington University, University of California, Berkeley and
+ * Washington University, University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -132,14 +132,14 @@ sys_mincore(p, v, retval)
syscallarg(size_t) len;
syscallarg(char *) vec;
} */ *uap = v;
- vm_page_t m;
+ struct vm_page *m;
char *vec, pgi;
struct uvm_object *uobj;
struct vm_amap *amap;
struct vm_anon *anon;
- vm_map_entry_t entry;
+ struct vm_map_entry *entry;
vaddr_t start, end, lim;
- vm_map_t map;
+ struct vm_map *map;
vsize_t len;
int error = 0, npgs;
@@ -328,7 +328,7 @@ sys_mmap(p, v, retval)
return (EINVAL); /* don't allow wrap */
/*
- * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
+ * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
*/
if (flags & MAP_FIXED) {
@@ -402,7 +402,7 @@ sys_mmap(p, v, retval)
flags |= MAP_PRIVATE; /* for a file */
}
- /*
+ /*
* MAP_PRIVATE device mappings don't make sense (and aren't
* supported anyway). However, some programs rely on this,
* so just change it to MAP_SHARED.
@@ -508,7 +508,7 @@ sys_msync(p, v, retval)
} */ *uap = v;
vaddr_t addr;
vsize_t size, pageoff;
- vm_map_t map;
+ struct vm_map *map;
int error, rv, flags, uvmflags;
/*
@@ -558,8 +558,8 @@ sys_msync(p, v, retval)
*/
if (size == 0) {
- vm_map_entry_t entry;
-
+ struct vm_map_entry *entry;
+
vm_map_lock_read(map);
rv = uvm_map_lookup_entry(map, addr, &entry);
if (rv == TRUE) {
@@ -603,7 +603,7 @@ sys_munmap(p, v, retval)
} */ *uap = v;
vaddr_t addr;
vsize_t size, pageoff;
- vm_map_t map;
+ struct vm_map *map;
vaddr_t vm_min_address = VM_MIN_ADDRESS;
struct vm_map_entry *dead_entries;
@@ -613,7 +613,7 @@ sys_munmap(p, v, retval)
addr = (vaddr_t)SCARG(uap, addr);
size = (vsize_t)SCARG(uap, len);
-
+
/*
* align the address to a page boundary and adjust the size accordingly.
*/
@@ -641,7 +641,7 @@ sys_munmap(p, v, retval)
map = &p->p_vmspace->vm_map;
/*
- * interesting system call semantic: make sure entire range is
+ * interesting system call semantic: make sure entire range is
* allocated before allowing an unmap.
*/
@@ -720,7 +720,7 @@ sys_minherit(p, v, retval)
vsize_t size, pageoff;
vm_inherit_t inherit;
int error;
-
+
addr = (vaddr_t)SCARG(uap, addr);
size = (vsize_t)SCARG(uap, len);
inherit = SCARG(uap, inherit);
@@ -760,7 +760,7 @@ sys_madvise(p, v, retval)
vaddr_t addr;
vsize_t size, pageoff;
int advice, error;
-
+
addr = (vaddr_t)SCARG(uap, addr);
size = (vsize_t)SCARG(uap, len);
advice = SCARG(uap, behav);
@@ -878,7 +878,7 @@ sys_mlock(p, v, retval)
addr -= pageoff;
size += pageoff;
size = (vsize_t)round_page(size);
-
+
/* disallow wrap-around. */
if (addr + size < addr)
return (EINVAL);
@@ -1005,7 +1005,7 @@ sys_munlockall(p, v, retval)
int
uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t *addr;
vsize_t size;
vm_prot_t prot, maxprot;
@@ -1091,7 +1091,7 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
uvmflag |= UVM_FLAG_COPYONW;
}
- uvmflag = UVM_MAPFLAG(prot, maxprot,
+ uvmflag = UVM_MAPFLAG(prot, maxprot,
(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
advice, uvmflag);
error = uvm_map(map, addr, size, uobj, foff, 0, uvmflag);
diff --git a/sys/uvm/uvm_object.h b/sys/uvm/uvm_object.h
index ac55e44d7be..239152fb5fe 100644
--- a/sys/uvm/uvm_object.h
+++ b/sys/uvm/uvm_object.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_object.h,v 1.6 2001/11/12 01:26:09 art Exp $ */
-/* $NetBSD: uvm_object.h,v 1.11 2001/03/09 01:02:12 chs Exp $ */
+/* $OpenBSD: uvm_object.h,v 1.7 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_object.h,v 1.12 2001/05/26 16:32:47 chs Exp $ */
/*
*
@@ -47,7 +47,7 @@
*/
struct uvm_object {
- simple_lock_data_t vmobjlock; /* lock on memq */
+ struct simplelock vmobjlock; /* lock on memq */
struct uvm_pagerops *pgops; /* pager ops */
struct pglist memq; /* pages in this object */
int uo_npages; /* # of pages in memq */
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index b7648231eb9..7d5c9b1a537 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_page.c,v 1.34 2001/11/28 14:29:13 art Exp $ */
-/* $NetBSD: uvm_page.c,v 1.52 2001/04/22 17:22:58 thorpej Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.35 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_page.c,v 1.65 2001/06/27 23:57:16 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -126,6 +126,16 @@ static vaddr_t virtual_space_end;
static struct pglist uvm_bootbucket;
/*
+ * we allocate an initial number of page colors in uvm_page_init(),
+ * and remember them. We may re-color pages as cache sizes are
+ * discovered during the autoconfiguration phase. But we can never
+ * free the initial set of buckets, since they are allocated using
+ * uvm_pageboot_alloc().
+ */
+
+static boolean_t have_recolored_pages /* = FALSE */;
+
+/*
* local prototypes
*/
@@ -202,9 +212,22 @@ uvm_pageremove(pg)
pg->version++;
}
+static void
+uvm_page_init_buckets(struct pgfreelist *pgfl)
+{
+ int color, i;
+
+ for (color = 0; color < uvmexp.ncolors; color++) {
+ for (i = 0; i < PGFL_NQUEUES; i++) {
+ TAILQ_INIT(&pgfl->pgfl_buckets[
+ color].pgfl_queues[i]);
+ }
+ }
+}
+
/*
* uvm_page_init: init the page system. called from uvm_init().
- *
+ *
* => we return the range of kernel virtual memory in kvm_startp/kvm_endp
*/
@@ -212,22 +235,20 @@ void
uvm_page_init(kvm_startp, kvm_endp)
vaddr_t *kvm_startp, *kvm_endp;
{
- vsize_t freepages, pagecount, n;
- vm_page_t pagearray;
- int lcv, i;
+ vsize_t freepages, pagecount, bucketcount, n;
+ struct pgflbucket *bucketarray;
+ struct vm_page *pagearray;
+ int lcv, i;
paddr_t paddr;
/*
- * init the page queues and page queue locks
+ * init the page queues and page queue locks, except the free
+ * list; we allocate that later (with the initial vm_page
+ * structures).
*/
- for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
- for (i = 0; i < PGFL_NQUEUES; i++)
- TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
- }
TAILQ_INIT(&uvm.page_active);
- TAILQ_INIT(&uvm.page_inactive_swp);
- TAILQ_INIT(&uvm.page_inactive_obj);
+ TAILQ_INIT(&uvm.page_inactive);
simple_lock_init(&uvm.pageqlock);
simple_lock_init(&uvm.fpageqlock);
@@ -243,7 +264,7 @@ uvm_page_init(kvm_startp, kvm_endp)
TAILQ_INIT(uvm.page_hash); /* init hash table */
simple_lock_init(&uvm.hashlock); /* init hash table lock */
- /*
+ /*
* allocate vm_page structures.
*/
@@ -256,20 +277,28 @@ uvm_page_init(kvm_startp, kvm_endp)
if (vm_nphysseg == 0)
panic("uvm_page_bootstrap: no memory pre-allocated");
-
+
/*
- * first calculate the number of free pages...
+ * first calculate the number of free pages...
*
* note that we use start/end rather than avail_start/avail_end.
* this allows us to allocate extra vm_page structures in case we
* want to return some memory to the pool after booting.
*/
-
+
freepages = 0;
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
/*
+ * Let MD code initialize the number of colors, or default
+ * to 1 color if MD code doesn't care.
+ */
+ if (uvmexp.ncolors == 0)
+ uvmexp.ncolors = 1;
+ uvmexp.colormask = uvmexp.ncolors - 1;
+
+ /*
* we now know we have (PAGE_SIZE * freepages) bytes of memory we can
* use. for each page of memory we use we need a vm_page structure.
* thus, the total number of pages we can use is the total size of
@@ -278,13 +307,24 @@ uvm_page_init(kvm_startp, kvm_endp)
* truncation errors (since we can only allocate in terms of whole
* pages).
*/
-
+
+ bucketcount = uvmexp.ncolors * VM_NFREELIST;
pagecount = ((freepages + 1) << PAGE_SHIFT) /
(PAGE_SIZE + sizeof(struct vm_page));
- pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
- sizeof(struct vm_page));
+
+ bucketarray = (void *) uvm_pageboot_alloc((bucketcount *
+ sizeof(struct pgflbucket)) + (pagecount *
+ sizeof(struct vm_page)));
+ pagearray = (struct vm_page *)(bucketarray + bucketcount);
+
+ for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
+ uvm.page_free[lcv].pgfl_buckets =
+ (bucketarray + (lcv * uvmexp.ncolors));
+ uvm_page_init_buckets(&uvm.page_free[lcv]);
+ }
+
memset(pagearray, 0, pagecount * sizeof(struct vm_page));
-
+
/*
* init the vm_page structures and put them in the correct place.
*/
@@ -308,6 +348,9 @@ uvm_page_init(kvm_startp, kvm_endp)
paddr = ptoa(vm_physmem[lcv].start);
for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
vm_physmem[lcv].pgs[i].phys_addr = paddr;
+#ifdef __HAVE_VM_PAGE_MD
+ VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
+#endif
if (atop(paddr) >= vm_physmem[lcv].avail_start &&
atop(paddr) <= vm_physmem[lcv].avail_end) {
uvmexp.npages++;
@@ -361,9 +404,9 @@ uvm_page_init(kvm_startp, kvm_endp)
/*
* uvm_setpagesize: set the page size
- *
+ *
* => sets page_shift and page_mask from uvmexp.pagesize.
- */
+ */
void
uvm_setpagesize()
@@ -411,8 +454,8 @@ uvm_pageboot_alloc(size)
#if defined(PMAP_STEAL_MEMORY)
- /*
- * defer bootstrap allocation to MD code (it may want to allocate
+ /*
+ * defer bootstrap allocation to MD code (it may want to allocate
* from a direct-mapped segment). pmap_steal_memory should adjust
* virtual_space_start/virtual_space_end if necessary.
*/
@@ -463,6 +506,7 @@ uvm_pageboot_alloc(size)
*/
pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
}
+ pmap_update();
return(addr);
#endif /* PMAP_STEAL_MEMORY */
}
@@ -833,6 +877,76 @@ uvm_page_rehash()
return;
}
+/*
+ * uvm_page_recolor: Recolor the pages if the new bucket count is
+ * larger than the old one.
+ */
+
+void
+uvm_page_recolor(int newncolors)
+{
+ struct pgflbucket *bucketarray, *oldbucketarray;
+ struct pgfreelist pgfl;
+ struct vm_page *pg;
+ vsize_t bucketcount;
+ int s, lcv, color, i, ocolors;
+
+ if (newncolors <= uvmexp.ncolors)
+ return;
+
+ bucketcount = newncolors * VM_NFREELIST;
+ bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
+ M_VMPAGE, M_NOWAIT);
+ if (bucketarray == NULL) {
+ printf("WARNING: unable to allocate %ld page color buckets\n",
+ (long) bucketcount);
+ return;
+ }
+
+ s = uvm_lock_fpageq();
+
+ /* Make sure we should still do this. */
+ if (newncolors <= uvmexp.ncolors) {
+ uvm_unlock_fpageq(s);
+ free(bucketarray, M_VMPAGE);
+ return;
+ }
+
+ oldbucketarray = uvm.page_free[0].pgfl_buckets;
+ ocolors = uvmexp.ncolors;
+
+ uvmexp.ncolors = newncolors;
+ uvmexp.colormask = uvmexp.ncolors - 1;
+
+ for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
+ pgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
+ uvm_page_init_buckets(&pgfl);
+ for (color = 0; color < ocolors; color++) {
+ for (i = 0; i < PGFL_NQUEUES; i++) {
+ while ((pg = TAILQ_FIRST(&uvm.page_free[
+ lcv].pgfl_buckets[color].pgfl_queues[i]))
+ != NULL) {
+ TAILQ_REMOVE(&uvm.page_free[
+ lcv].pgfl_buckets[
+ color].pgfl_queues[i], pg, pageq);
+ TAILQ_INSERT_TAIL(&pgfl.pgfl_buckets[
+ VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
+ i], pg, pageq);
+ }
+ }
+ }
+ uvm.page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
+ }
+
+ if (have_recolored_pages) {
+ uvm_unlock_fpageq(s);
+ free(oldbucketarray, M_VMPAGE);
+ return;
+ }
+
+ have_recolored_pages = TRUE;
+ uvm_unlock_fpageq(s);
+}
#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
@@ -864,6 +978,49 @@ uvm_page_physdump()
#endif
/*
+ * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
+ */
+
+static __inline struct vm_page *
+uvm_pagealloc_pgfl(struct pgfreelist *pgfl, int try1, int try2,
+ unsigned int *trycolorp)
+{
+ struct pglist *freeq;
+ struct vm_page *pg;
+ int color, trycolor = *trycolorp;
+
+ color = trycolor;
+ do {
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL)
+ goto gotit;
+ if ((pg = TAILQ_FIRST((freeq =
+ &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL)
+ goto gotit;
+ color = (color + 1) & uvmexp.colormask;
+ } while (color != trycolor);
+
+ return (NULL);
+
+ gotit:
+ TAILQ_REMOVE(freeq, pg, pageq);
+ uvmexp.free--;
+
+ /* update zero'd page count */
+ if (pg->flags & PG_ZERO)
+ uvmexp.zeropages--;
+
+ if (color == trycolor)
+ uvmexp.colorhit++;
+ else {
+ uvmexp.colormiss++;
+ *trycolorp = color;
+ }
+
+ return (pg);
+}
+
+/*
* uvm_pagealloc_strat: allocate vm_page from a particular free list.
*
* => return null if no pages free
@@ -889,10 +1046,8 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
struct vm_anon *anon;
int strat, free_list;
{
- int lcv, try1, try2, s, zeroit = 0;
+ int lcv, try1, try2, s, zeroit = 0, color;
struct vm_page *pg;
- struct pglist *freeq;
- struct pgfreelist *pgfl;
boolean_t use_reserve;
KASSERT(obj == NULL || anon == NULL);
@@ -904,15 +1059,20 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
s = uvm_lock_fpageq();
/*
+ * This implements a global round-robin page coloring
+ * algorithm.
+ *
+ * XXXJRT: Should we make the `nextcolor' per-cpu?
+ * XXXJRT: What about virtually-indexed caches?
+ */
+ color = uvm.page_free_nextcolor;
+
+ /*
* check to see if we need to generate some free pages waking
* the pagedaemon.
*/
- if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
- (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
- uvmexp.inactive < uvmexp.inactarg)) {
- wakeup(&uvm.pagedaemon);
- }
+ UVM_KICK_PDAEMON();
/*
* fail if any of these conditions is true:
@@ -952,11 +1112,9 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
case UVM_PGA_STRAT_NORMAL:
/* Check all freelists in descending priority order. */
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
- pgfl = &uvm.page_free[lcv];
- if ((pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try1]))) != NULL ||
- (pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try2]))) != NULL)
+ pg = uvm_pagealloc_pgfl(&uvm.page_free[lcv],
+ try1, try2, &color);
+ if (pg != NULL)
goto gotit;
}
@@ -967,11 +1125,9 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
case UVM_PGA_STRAT_FALLBACK:
/* Attempt to allocate from the specified free list. */
KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
- pgfl = &uvm.page_free[free_list];
- if ((pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try1]))) != NULL ||
- (pg = TAILQ_FIRST((freeq =
- &pgfl->pgfl_queues[try2]))) != NULL)
+ pg = uvm_pagealloc_pgfl(&uvm.page_free[free_list],
+ try1, try2, &color);
+ if (pg != NULL)
goto gotit;
/* Fall back, if possible. */
@@ -989,12 +1145,11 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
}
gotit:
- TAILQ_REMOVE(freeq, pg, pageq);
- uvmexp.free--;
-
- /* update zero'd page count */
- if (pg->flags & PG_ZERO)
- uvmexp.zeropages--;
+ /*
+ * We now know which color we actually allocated from; set
+ * the next color accordingly.
+ */
+ uvm.page_free_nextcolor = (color + 1) & uvmexp.colormask;
/*
* update allocation statistics and remember if we have to
@@ -1159,24 +1314,24 @@ uvm_pagefree(pg)
* if the object page is on loan we are going to drop ownership.
* it is possible that an anon will take over as owner for this
* page later on. the anon will want a !PG_CLEAN page so that
- * it knows it needs to allocate swap if it wants to page the
- * page out.
+ * it knows it needs to allocate swap if it wants to page the
+ * page out.
*/
if (saved_loan_count)
pg->flags &= ~PG_CLEAN; /* in case an anon takes over */
uvm_pageremove(pg);
-
+
/*
* if our page was on loan, then we just lost control over it
* (in fact, if it was loaned to an anon, the anon may have
* already taken over ownership of the page by now and thus
- * changed the loan_count [e.g. in uvmfault_anonget()]) we just
- * return (when the last loan is dropped, then the page can be
+ * changed the loan_count [e.g. in uvmfault_anonget()]) we just
+ * return (when the last loan is dropped, then the page can be
* freed by whatever was holding the last loan).
*/
- if (saved_loan_count)
+ if (saved_loan_count)
return;
} else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
@@ -1202,12 +1357,8 @@ uvm_pagefree(pg)
TAILQ_REMOVE(&uvm.page_active, pg, pageq);
pg->pqflags &= ~PQ_ACTIVE;
uvmexp.active--;
- }
- if (pg->pqflags & PQ_INACTIVE) {
- if (pg->pqflags & PQ_SWAPBACKED)
- TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
- else
- TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+ } else if (pg->pqflags & PQ_INACTIVE) {
+ TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
pg->pqflags &= ~PQ_INACTIVE;
uvmexp.inactive--;
}
@@ -1233,7 +1384,8 @@ uvm_pagefree(pg)
s = uvm_lock_fpageq();
TAILQ_INSERT_TAIL(&uvm.page_free[
- uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
+ uvm_page_lookup_freelist(pg)].pgfl_buckets[
+ VM_PGCOLOR_BUCKET(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
pg->pqflags = PQ_FREE;
#ifdef DEBUG
pg->uobject = (void *)0xdeadbeef;
@@ -1337,7 +1489,8 @@ uvm_page_own(pg, tag)
/*
* uvm_pageidlezero: zero free pages while the system is idle.
*
- * => we do at least one iteration per call, if we are below the target.
+ * => try to complete one color bucket at a time, to reduce our impact
+ * on the CPU cache.
* => we loop until we either reach the target or whichqs indicates that
* there is a process ready to run.
*/
@@ -1346,67 +1499,72 @@ uvm_pageidlezero()
{
struct vm_page *pg;
struct pgfreelist *pgfl;
- int free_list, s;
+ int free_list, s, firstbucket;
+ static int nextbucket;
- do {
- s = uvm_lock_fpageq();
+ s = uvm_lock_fpageq();
- if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
- uvm.page_idle_zero = FALSE;
+ firstbucket = nextbucket;
+ do {
+ if (whichqs != 0) {
uvm_unlock_fpageq(s);
return;
}
- for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
- pgfl = &uvm.page_free[free_list];
- if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
- PGFL_UNKNOWN])) != NULL)
- break;
- }
-
- if (pg == NULL) {
- /*
- * No non-zero'd pages; don't bother trying again
- * until we know we have non-zero'd pages free.
- */
+ if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
uvm.page_idle_zero = FALSE;
uvm_unlock_fpageq(s);
return;
}
- TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
- uvmexp.free--;
- uvm_unlock_fpageq(s);
-
+ for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
+ pgfl = &uvm.page_free[free_list];
+ while ((pg = TAILQ_FIRST(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
+ if (whichqs != 0) {
+ uvm_unlock_fpageq(s);
+ return;
+ }
+
+ TAILQ_REMOVE(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[PGFL_UNKNOWN],
+ pg, pageq);
+ uvmexp.free--;
+ uvm_unlock_fpageq(s);
#ifdef PMAP_PAGEIDLEZERO
- if (PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg)) == FALSE) {
- /*
- * The machine-dependent code detected some
- * reason for us to abort zeroing pages,
- * probably because there is a process now
- * ready to run.
- */
- s = uvm_lock_fpageq();
- TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN],
- pg, pageq);
- uvmexp.free++;
- uvmexp.zeroaborts++;
- uvm_unlock_fpageq(s);
- return;
- }
+ if (PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg)) ==
+ FALSE) {
+ /*
+ * The machine-dependent code detected
+ * some reason for us to abort zeroing
+ * pages, probably because there is a
+ * process now ready to run.
+ */
+ s = uvm_lock_fpageq();
+ TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[
+ PGFL_UNKNOWN], pg, pageq);
+ uvmexp.free++;
+ uvmexp.zeroaborts++;
+ uvm_unlock_fpageq(s);
+ return;
+ }
#else
- /*
- * XXX This will toast the cache unless the pmap_zero_page()
- * XXX implementation does uncached access.
- */
- pmap_zero_page(VM_PAGE_TO_PHYS(pg));
-#endif
- pg->flags |= PG_ZERO;
+ pmap_zero_page(VM_PAGE_TO_PHYS(pg));
+#endif /* PMAP_PAGEIDLEZERO */
+ pg->flags |= PG_ZERO;
+
+ s = uvm_lock_fpageq();
+ TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
+ nextbucket].pgfl_queues[PGFL_ZEROS],
+ pg, pageq);
+ uvmexp.free++;
+ uvmexp.zeropages++;
+ }
+ }
- s = uvm_lock_fpageq();
- TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
- uvmexp.free++;
- uvmexp.zeropages++;
- uvm_unlock_fpageq(s);
- } while (whichqs == 0);
+ nextbucket = (nextbucket + 1) & uvmexp.colormask;
+ } while (nextbucket != firstbucket);
+
+ uvm_unlock_fpageq(s);
}
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index 9d3f67513ca..e5b01d58a95 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_page.h,v 1.13 2001/11/12 01:26:09 art Exp $ */
-/* $NetBSD: uvm_page.h,v 1.19 2000/12/28 08:24:55 chs Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.14 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_page.h,v 1.27 2001/06/28 00:26:38 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -128,14 +128,22 @@ struct vm_page {
struct uvm_object *uobject; /* object (O,P) */
voff_t offset; /* offset into object (O,P) */
- u_short flags; /* object flags [O] */
- u_short version; /* version count [O] */
- u_short wire_count; /* wired down map refs [P] */
- u_short pqflags; /* page queue flags [P] */
+ u_int flags: 16, /* object flags [O] */
+ version: 16; /* version count [O] */
+
+ u_int wire_count: 16, /* wired down map refs [P] */
+ pqflags: 8, /* page queue flags [P] */
+ : 8;
+
u_int loan_count; /* number of active loans
* to read: [O or P]
* to modify: [O _and_ P] */
paddr_t phys_addr; /* physical address of page */
+
+#ifdef __HAVE_VM_PAGE_MD
+ struct vm_page_md mdpage; /* pmap-specific data */
+#endif
+
#if defined(UVM_PAGE_TRKOWN)
/* debugging fields to track page ownership */
pid_t owner; /* proc that set PG_BUSY */
@@ -145,14 +153,12 @@ struct vm_page {
/*
* These are the flags defined for vm_page.
- *
- * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
*/
/*
* locking rules:
* PG_ ==> locked by object lock
- * PQ_ ==> lock by page queue lock
+ * PQ_ ==> lock by page queue lock
* PQ_FREE is locked by free queue lock and is mutex with all other PQs
*
* PG_ZERO is used to indicate that a page has been pre-zero'd. This flag
@@ -172,12 +178,12 @@ struct vm_page {
#define PG_PAGER1 0x1000 /* pager-specific flag */
-#define PQ_FREE 0x0001 /* page is on free list */
-#define PQ_INACTIVE 0x0002 /* page is in inactive list */
-#define PQ_ACTIVE 0x0004 /* page is in active list */
-#define PQ_ANON 0x0010 /* page is part of an anon, rather
+#define PQ_FREE 0x01 /* page is on free list */
+#define PQ_INACTIVE 0x02 /* page is in inactive list */
+#define PQ_ACTIVE 0x04 /* page is in active list */
+#define PQ_ANON 0x10 /* page is part of an anon, rather
than an uvm_object */
-#define PQ_AOBJ 0x0020 /* page is part of an anonymous
+#define PQ_AOBJ 0x20 /* page is part of an anonymous
uvm_object */
#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
#define PQ_ENCRYPT 0x0040 /* page needs {en,de}cryption */
@@ -210,7 +216,9 @@ struct vm_physseg {
int free_list; /* which free list they belong on */
struct vm_page *pgs; /* vm_page structures (from start) */
struct vm_page *lastpg; /* vm_page structure for end */
+/* #ifdef __HAVE_PMAP_PHYSSEG XXX */
struct pmap_physseg pmseg; /* pmap specific (MD) data */
+/* #endif */
};
#ifdef _KERNEL
@@ -224,7 +232,7 @@ extern boolean_t vm_page_zero_enable;
/*
* Each pageable resident page falls into one of three lists:
*
- * free
+ * free
* Available for allocation now.
* inactive
* Not referenced in any map, but still has an
@@ -254,7 +262,7 @@ extern int vm_nphysseg;
#ifdef UVM_PAGE_INLINE
#define PAGE_INLINE static __inline
-#else
+#else
#define PAGE_INLINE /* nothing */
#endif /* UVM_PAGE_INLINE */
@@ -270,6 +278,7 @@ void uvm_page_own __P((struct vm_page *, char *));
boolean_t uvm_page_physget __P((paddr_t *));
#endif
void uvm_page_rehash __P((void));
+void uvm_page_recolor __P((int));
void uvm_pageidlezero __P((void));
PAGE_INLINE int uvm_lock_fpageq __P((void));
@@ -308,6 +317,12 @@ static int vm_physseg_find __P((paddr_t, int *));
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
/*
+ * Compute the page color bucket for a given page.
+ */
+#define VM_PGCOLOR_BUCKET(pg) \
+ (atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
+
+/*
* when VM_PHYSSEG_MAX is 1, we can simplify these functions
*/
diff --git a/sys/uvm/uvm_page_i.h b/sys/uvm/uvm_page_i.h
index 3ea680714c6..cf8636bb42d 100644
--- a/sys/uvm/uvm_page_i.h
+++ b/sys/uvm/uvm_page_i.h
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_page_i.h,v 1.11 2001/11/27 05:27:12 art Exp $ */
-/* $NetBSD: uvm_page_i.h,v 1.16 2001/01/28 23:30:45 thorpej Exp $ */
+/* $OpenBSD: uvm_page_i.h,v 1.12 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_page_i.h,v 1.19 2001/06/27 23:57:17 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -159,12 +159,8 @@ uvm_pagewire(pg)
TAILQ_REMOVE(&uvm.page_active, pg, pageq);
pg->pqflags &= ~PQ_ACTIVE;
uvmexp.active--;
- }
- if (pg->pqflags & PQ_INACTIVE) {
- if (pg->pqflags & PQ_SWAPBACKED)
- TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
- else
- TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+ } else if (pg->pqflags & PQ_INACTIVE) {
+ TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
pg->pqflags &= ~PQ_INACTIVE;
uvmexp.inactive--;
}
@@ -174,12 +170,12 @@ uvm_pagewire(pg)
}
/*
- * uvm_pageunwire: unwire the page.
+ * uvm_pageunwire: unwire the page.
*
* => activate if wire count goes to zero.
* => caller must lock page queues
*/
-
+
PAGE_INLINE void
uvm_pageunwire(pg)
struct vm_page *pg;
@@ -213,10 +209,7 @@ uvm_pagedeactivate(pg)
}
if ((pg->pqflags & PQ_INACTIVE) == 0) {
KASSERT(pg->wire_count == 0);
- if (pg->pqflags & PQ_SWAPBACKED)
- TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
- else
- TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
+ TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq);
pg->pqflags |= PQ_INACTIVE;
uvmexp.inactive++;
/*
@@ -242,10 +235,7 @@ uvm_pageactivate(pg)
struct vm_page *pg;
{
if (pg->pqflags & PQ_INACTIVE) {
- if (pg->pqflags & PQ_SWAPBACKED)
- TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
- else
- TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+ TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
pg->pqflags &= ~PQ_INACTIVE;
uvmexp.inactive--;
}
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index 2c7619d6c04..662fb9fa346 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager.c,v 1.24 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_pager.c,v 1.43 2001/03/15 06:10:58 chs Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.25 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_pager.c,v 1.48 2001/06/23 20:47:44 chs Exp $ */
/*
*
@@ -71,8 +71,8 @@ struct uvm_pagerops *uvmpagerops[] = {
* the pager map: provides KVA for I/O
*/
-vm_map_t pager_map; /* XXX */
-simple_lock_data_t pager_map_wanted_lock;
+struct vm_map *pager_map; /* XXX */
+struct simplelock pager_map_wanted_lock;
boolean_t pager_map_wanted; /* locked by pager map */
static vaddr_t emergva;
static boolean_t emerginuse;
@@ -100,7 +100,7 @@ uvm_pager_init()
/*
* init ASYNC I/O queue
*/
-
+
TAILQ_INIT(&uvm.aio_done);
/*
@@ -148,7 +148,7 @@ ReStart:
size = npages << PAGE_SHIFT;
kva = 0; /* let system choose VA */
- if (uvm_map(pager_map, &kva, size, NULL,
+ if (uvm_map(pager_map, &kva, size, NULL,
UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) {
if (curproc == uvm.pagedaemon_proc) {
simple_lock(&pager_map_wanted_lock);
@@ -169,9 +169,9 @@ ReStart:
return(0);
}
simple_lock(&pager_map_wanted_lock);
- pager_map_wanted = TRUE;
+ pager_map_wanted = TRUE;
UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
- UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
+ UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
"pager_map", 0);
goto ReStart;
}
@@ -186,6 +186,7 @@ enter:
prot, PMAP_WIRED | ((pp->flags & PG_FAKE) ? prot :
VM_PROT_READ));
}
+ pmap_update();
UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
return(kva);
@@ -204,7 +205,7 @@ uvm_pagermapout(kva, npages)
int npages;
{
vsize_t size = npages << PAGE_SHIFT;
- vm_map_entry_t entries;
+ struct vm_map_entry *entries;
UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
@@ -236,6 +237,7 @@ remove:
pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
if (entries)
uvm_unmap_detach(entries, 0);
+ pmap_update();
UVMHIST_LOG(maphist,"<- done",0,0,0,0);
}
@@ -273,7 +275,7 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
int center_idx, forward, incr;
UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
- /*
+ /*
* center page should already be busy and write protected. XXX:
* suppose page is wired? if we lock, then a process could
* fault/block on it. if we don't lock, a process could write the
@@ -309,8 +311,8 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
*npages = 1;
/*
- * attempt to cluster around the left [backward], and then
- * the right side [forward].
+ * attempt to cluster around the left [backward], and then
+ * the right side [forward].
*/
for (forward = 0 ; forward <= 1 ; forward++) {
@@ -369,7 +371,7 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
(*npages)++;
}
}
-
+
/*
* done! return the cluster array to the caller!!!
*/
@@ -404,7 +406,7 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
* 1. we return the error code of the pageout
* 2. we return with the page queues unlocked
* 3. if (uobj != NULL) [!swap_backed] we return with
- * uobj locked _only_ if PGO_PDFREECLUST is set
+ * uobj locked _only_ if PGO_PDFREECLUST is set
* AND result == 0 AND async. in all other cases
* we return with uobj unlocked. [this is a hack
* that allows the pagedaemon to save one lock/unlock
@@ -494,7 +496,7 @@ ReTry:
* we have attempted the I/O.
*
* if the I/O was a success then:
- * if !PGO_PDFREECLUST, we return the cluster to the
+ * if !PGO_PDFREECLUST, we return the cluster to the
* caller (who must un-busy all pages)
* else we un-busy cluster pages for the pagedaemon
*
@@ -535,23 +537,20 @@ ReTry:
uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
/*
- * for failed swap-backed pageouts with a "pg",
- * we need to reset pg's swslot to either:
- * "swblk" (for transient errors, so we can retry),
- * or 0 (for hard errors).
+ * for hard failures on swap-backed pageouts with a "pg"
+ * we need to clear pg's swslot since uvm_pager_dropcluster()
+ * didn't do it and we aren't going to retry.
*/
- if (uobj == NULL && pg != NULL) {
- int nswblk = (result == EAGAIN) ? swblk : 0;
+ if (uobj == NULL && pg != NULL && result != EAGAIN) {
if (pg->pqflags & PQ_ANON) {
simple_lock(&pg->uanon->an_lock);
- pg->uanon->an_swslot = nswblk;
+ pg->uanon->an_swslot = 0;
simple_unlock(&pg->uanon->an_lock);
} else {
simple_lock(&pg->uobject->vmobjlock);
uao_set_swslot(pg->uobject,
- pg->offset >> PAGE_SHIFT,
- nswblk);
+ pg->offset >> PAGE_SHIFT, 0);
simple_unlock(&pg->uobject->vmobjlock);
}
}
@@ -591,18 +590,18 @@ ReTry:
* was one). give up! the caller only has one page ("pg")
* to worry about.
*/
-
+
if (uobj && (flags & PGO_PDFREECLUST) != 0)
simple_lock(&uobj->vmobjlock);
return(result);
}
/*
- * uvm_pager_dropcluster: drop a cluster we have built (because we
+ * uvm_pager_dropcluster: drop a cluster we have built (because we
* got an error, or, if PGO_PDFREECLUST we are un-busying the
* cluster pages on behalf of the pagedaemon).
*
- * => uobj, if non-null, is a non-swap-backed object that is
+ * => uobj, if non-null, is a non-swap-backed object that is
* locked by the caller. we return with this object still
* locked.
* => page queues are not locked
@@ -610,7 +609,7 @@ ReTry:
* => ppsp/npages is our current cluster
* => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
* pages on behalf of the pagedaemon.
- * PGO_REALLOCSWAP: drop previously allocated swap slots for
+ * PGO_REALLOCSWAP: drop previously allocated swap slots for
* clustered swap-backed pages (except for "pg" if !NULL)
* "swblk" is the start of swap alloc (e.g. for ppsp[0])
* [only meaningful if swap-backed (uobj == NULL)]
@@ -624,7 +623,7 @@ uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
int flags;
{
int lcv;
- boolean_t obj_is_alive;
+ boolean_t obj_is_alive;
struct uvm_object *saved_uobj;
/*
@@ -636,7 +635,7 @@ uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
/* skip "pg" or empty slot */
if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
continue;
-
+
/*
* if swap-backed, gain lock on object that owns page. note
* that PQ_ANON bit can't change as long as we are holding
@@ -689,7 +688,7 @@ uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
saved_uobj = ppsp[lcv]->uobject;
obj_is_alive =
saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
-
+
/* for normal objects, "pg" is still PG_BUSY by us,
* so obj can't die */
KASSERT(!uobj || obj_is_alive);
@@ -712,7 +711,7 @@ uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
}
/*
- * if we are operating on behalf of the pagedaemon and we
+ * if we are operating on behalf of the pagedaemon and we
* had a successful pageout update the page!
*/
if (flags & PGO_PDFREECLUST) {
diff --git a/sys/uvm/uvm_pager.h b/sys/uvm/uvm_pager.h
index a826ada04de..6b7ddc02d24 100644
--- a/sys/uvm/uvm_pager.h
+++ b/sys/uvm/uvm_pager.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager.h,v 1.15 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_pager.h,v 1.21 2001/03/10 22:46:50 chs Exp $ */
+/* $OpenBSD: uvm_pager.h,v 1.16 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_pager.h,v 1.23 2001/05/26 21:27:21 chs Exp $ */
/*
*
@@ -89,20 +89,21 @@
struct uvm_pagerops {
void (*pgo_init) __P((void));/* init pager */
void (*pgo_reference) /* add reference to obj */
- __P((struct uvm_object *));
+ __P((struct uvm_object *));
void (*pgo_detach) /* drop reference to obj */
__P((struct uvm_object *));
int (*pgo_fault) /* special nonstd fault fn */
__P((struct uvm_faultinfo *, vaddr_t,
- vm_page_t *, int, int, vm_fault_t,
+ struct vm_page **, int, int, vm_fault_t,
vm_prot_t, int));
boolean_t (*pgo_flush) /* flush pages out of obj */
__P((struct uvm_object *, voff_t, voff_t, int));
int (*pgo_get) /* get/read page */
__P((struct uvm_object *, voff_t,
- vm_page_t *, int *, int, vm_prot_t, int, int));
+ struct vm_page **, int *, int, vm_prot_t, int,
+ int));
int (*pgo_put) /* put/write page */
- __P((struct uvm_object *, vm_page_t *,
+ __P((struct uvm_object *, struct vm_page **,
int, boolean_t));
void (*pgo_cluster) /* return range of cluster */
__P((struct uvm_object *, voff_t, voff_t *,
@@ -143,7 +144,7 @@ struct uvm_pagerops {
#ifdef UVM_PAGER_INLINE
#define PAGER_INLINE static __inline
-#else
+#else
#define PAGER_INLINE /* nothing */
#endif /* UVM_PAGER_INLINE */
@@ -151,12 +152,12 @@ struct uvm_pagerops {
* prototypes
*/
-void uvm_pager_dropcluster __P((struct uvm_object *,
- struct vm_page *, struct vm_page **,
+void uvm_pager_dropcluster __P((struct uvm_object *,
+ struct vm_page *, struct vm_page **,
int *, int));
void uvm_pager_init __P((void));
-int uvm_pager_put __P((struct uvm_object *, struct vm_page *,
- struct vm_page ***, int *, int,
+int uvm_pager_put __P((struct uvm_object *, struct vm_page *,
+ struct vm_page ***, int *, int,
voff_t, voff_t));
PAGER_INLINE struct vm_page *uvm_pageratop __P((vaddr_t));
@@ -164,7 +165,7 @@ PAGER_INLINE struct vm_page *uvm_pageratop __P((vaddr_t));
vaddr_t uvm_pagermapin __P((struct vm_page **, int, int));
void uvm_pagermapout __P((vaddr_t, int));
struct vm_page **uvm_mk_pcluster __P((struct uvm_object *, struct vm_page **,
- int *, struct vm_page *, int,
+ int *, struct vm_page *, int,
voff_t, voff_t));
/* Flags to uvm_pagermapin() */
diff --git a/sys/uvm/uvm_pager_i.h b/sys/uvm/uvm_pager_i.h
index 397c0e544cd..f1b9f5e42f2 100644
--- a/sys/uvm/uvm_pager_i.h
+++ b/sys/uvm/uvm_pager_i.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pager_i.h,v 1.8 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_pager_i.h,v 1.10 2000/11/25 06:28:00 chs Exp $ */
+/* $OpenBSD: uvm_pager_i.h,v 1.9 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_pager_i.h,v 1.11 2001/05/25 04:06:16 chs Exp $ */
/*
*
@@ -32,7 +32,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * from: Id: uvm_pager_i.h,v 1.1.2.2 1997/10/09 23:05:46 chuck Exp
+ * from: Id: uvm_pager_i.h,v 1.1.2.2 1997/10/09 23:05:46 chuck Exp
*/
#ifndef _UVM_UVM_PAGER_I_H_
@@ -60,13 +60,13 @@ uvm_pageratop(kva)
struct vm_page *pg;
paddr_t pa;
boolean_t rv;
-
+
rv = pmap_extract(pmap_kernel(), kva, &pa);
KASSERT(rv);
pg = PHYS_TO_VM_PAGE(pa);
KASSERT(pg != NULL);
return (pg);
-}
+}
#endif /* defined(UVM_PAGER_INLINE) || defined(UVM_PAGER) */
diff --git a/sys/uvm/uvm_param.h b/sys/uvm/uvm_param.h
index 46b0b1a79e0..c183c97f500 100644
--- a/sys/uvm/uvm_param.h
+++ b/sys/uvm/uvm_param.h
@@ -1,7 +1,7 @@
-/* $OpenBSD: uvm_param.h,v 1.4 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_param.h,v 1.7 2001/03/21 03:16:06 chs Exp $ */
+/* $OpenBSD: uvm_param.h,v 1.5 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_param.h,v 1.11 2001/07/14 06:36:03 matt Exp $ */
-/*
+/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -43,17 +43,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -114,7 +114,9 @@ typedef int boolean_t;
#define VM_ANONMIN 7
#define VM_VTEXTMIN 8
#define VM_VNODEMIN 9
-#define VM_MAXID 10 /* number of valid vm ids */
+#define VM_MAXSLP 10
+#define VM_USPACE 11
+#define VM_MAXID 12 /* number of valid vm ids */
#define CTL_VM_NAMES { \
{ 0, 0 }, \
@@ -127,6 +129,8 @@ typedef int boolean_t;
{ "anonmin", CTLTYPE_INT }, \
{ "vtextmin", CTLTYPE_INT }, \
{ "vnodemin", CTLTYPE_INT }, \
+ { "maxslp", CTLTYPE_INT }, \
+ { "uspace", CTLTYPE_INT }, \
}
struct _ps_strings {
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index d25cd2d6119..2e46a28ec7d 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.19 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_pdaemon.c,v 1.31 2001/03/10 22:46:50 chs Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.20 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.36 2001/06/27 18:52:10 thorpej Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -373,14 +373,6 @@ uvmpd_scan_inactive(pglst)
UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
/*
- * note: we currently keep swap-backed pages on a seperate inactive
- * list from object-backed pages. however, merging the two lists
- * back together again hasn't been ruled out. thus, we keep our
- * swap cluster in "swpps" rather than in pps (allows us to mix
- * clustering types in the event of a mixed inactive queue).
- */
-
- /*
* swslot is non-zero if we are building a swap cluster. we want
* to stay in the loop while we have a page to scan or we have
* a swap-cluster to build.
@@ -695,13 +687,20 @@ uvmpd_scan_inactive(pglst)
* add block to cluster
*/
- swpps[swcpages] = p;
- if (anon)
+ if (anon) {
anon->an_swslot = swslot + swcpages;
- else
- uao_set_swslot(uobj,
+ } else {
+ result = uao_set_swslot(uobj,
p->offset >> PAGE_SHIFT,
swslot + swcpages);
+ if (result == -1) {
+ p->flags &= ~PG_BUSY;
+ UVM_PAGE_OWN(p, NULL);
+ simple_unlock(&uobj->vmobjlock);
+ continue;
+ }
+ }
+ swpps[swcpages] = p;
swcpages++;
}
} else {
@@ -872,12 +871,7 @@ uvmpd_scan()
got_it = FALSE;
pages_freed = uvmexp.pdfreed;
- if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
- got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
- if (!got_it)
- got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
- if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
- (void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
+ (void) uvmpd_scan_inactive(&uvm.page_inactive);
pages_freed = uvmexp.pdfreed - pages_freed;
/*
@@ -965,13 +959,14 @@ uvmpd_scan()
}
/*
- * If the page has not been referenced since the
- * last scan, deactivate the page if there is a
- * shortage of inactive pages.
+ * If we're short on inactive pages, move this over
+ * to the inactive list. The second hand will sweep
+ * it later, and if it has been referenced again, it
+ * will be moved back to active.
*/
- if (inactive_shortage > 0 &&
- pmap_clear_reference(p) == FALSE) {
+ if (inactive_shortage > 0) {
+ pmap_clear_reference(p);
/* no need to check wire_count as pg is "active" */
uvm_pagedeactivate(p);
uvmexp.pddeact++;
diff --git a/sys/uvm/uvm_pdaemon.h b/sys/uvm/uvm_pdaemon.h
index 34bb311c85a..bc6b96f5a07 100644
--- a/sys/uvm/uvm_pdaemon.h
+++ b/sys/uvm/uvm_pdaemon.h
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_pdaemon.h,v 1.7 2001/07/18 14:27:07 art Exp $ */
-/* $NetBSD: uvm_pdaemon.h,v 1.8 1999/11/04 21:51:42 thorpej Exp $ */
+/* $OpenBSD: uvm_pdaemon.h,v 1.8 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_pdaemon.h,v 1.9 2001/05/25 04:06:17 chs Exp $ */
-/*
+/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
*
* All rights reserved.
*
@@ -21,7 +21,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -45,17 +45,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
diff --git a/sys/uvm/uvm_pglist.c b/sys/uvm/uvm_pglist.c
index d69407da6fe..e747f827e6b 100644
--- a/sys/uvm/uvm_pglist.c
+++ b/sys/uvm/uvm_pglist.c
@@ -1,20 +1,20 @@
-/* $OpenBSD: uvm_pglist.c,v 1.10 2001/11/12 01:26:10 art Exp $ */
-/* $NetBSD: uvm_pglist.c,v 1.13 2001/02/18 21:19:08 chs Exp $ */
+/* $OpenBSD: uvm_pglist.c,v 1.11 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_pglist.c,v 1.17 2001/06/27 21:18:34 thorpej Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
- *
+ *
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
- * NASA Ames Research Center.
+ * NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
+ * 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
@@ -24,7 +24,7 @@
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
@@ -78,7 +78,7 @@ u_long uvm_pglistalloc_npages;
* low the low address of the allowed allocation range.
* high the high address of the allowed allocation range.
* alignment memory must be aligned to this power-of-two boundary.
- * boundary no segment in the allocation may cross this
+ * boundary no segment in the allocation may cross this
* power-of-two boundary (relative to zero).
*/
@@ -92,16 +92,16 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
paddr_t try, idxpa, lastidxpa;
int psi;
struct vm_page *pgs;
- int s, tryidx, idx, pgflidx, end, error, free_list;
- vm_page_t m;
+ int s, tryidx, idx, pgflidx, end, error, free_list, color;
+ struct vm_page *m;
u_long pagemask;
#ifdef DEBUG
- vm_page_t tp;
+ struct vm_page *tp;
#endif
KASSERT((alignment & (alignment - 1)) == 0);
KASSERT((boundary & (boundary - 1)) == 0);
-
+
/*
* Our allocations are always page granularity, so our alignment
* must be, too.
@@ -198,10 +198,11 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
while (idx < end) {
m = &pgs[idx];
free_list = uvm_page_lookup_freelist(m);
+ color = VM_PGCOLOR_BUCKET(m);
pgflidx = (m->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef DEBUG
for (tp = TAILQ_FIRST(&uvm.page_free[
- free_list].pgfl_queues[pgflidx]);
+ free_list].pgfl_buckets[color].pgfl_queues[pgflidx]);
tp != NULL;
tp = TAILQ_NEXT(tp, pageq)) {
if (tp == m)
@@ -210,8 +211,8 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
if (tp == NULL)
panic("uvm_pglistalloc: page not on freelist");
#endif
- TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_queues[pgflidx],
- m, pageq);
+ TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_buckets[
+ color].pgfl_queues[pgflidx], m, pageq);
uvmexp.free--;
if (m->flags & PG_ZERO)
uvmexp.zeropages--;
@@ -231,12 +232,8 @@ out:
* check to see if we need to generate some free pages waking
* the pagedaemon.
*/
-
- if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
- (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
- uvmexp.inactive < uvmexp.inactarg)) {
- wakeup(&uvm.pagedaemon);
- }
+
+ UVM_KICK_PDAEMON();
uvm_unlock_fpageq(s);
@@ -253,7 +250,7 @@ void
uvm_pglistfree(list)
struct pglist *list;
{
- vm_page_t m;
+ struct vm_page *m;
int s;
/*
@@ -266,8 +263,8 @@ uvm_pglistfree(list)
TAILQ_REMOVE(list, m, pageq);
m->pqflags = PQ_FREE;
TAILQ_INSERT_TAIL(&uvm.page_free[
- uvm_page_lookup_freelist(m)].pgfl_queues[PGFL_UNKNOWN],
- m, pageq);
+ uvm_page_lookup_freelist(m)].pgfl_buckets[
+ VM_PGCOLOR_BUCKET(m)].pgfl_queues[PGFL_UNKNOWN], m, pageq);
uvmexp.free++;
if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
uvm.page_idle_zero = vm_page_zero_enable;
diff --git a/sys/uvm/uvm_pglist.h b/sys/uvm/uvm_pglist.h
index 665a20f994e..62d48fe7437 100644
--- a/sys/uvm/uvm_pglist.h
+++ b/sys/uvm/uvm_pglist.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pglist.h,v 1.2 2001/11/07 02:55:50 art Exp $ */
-/* $NetBSD: uvm_pglist.h,v 1.3 2001/05/02 01:22:20 thorpej Exp $ */
+/* $OpenBSD: uvm_pglist.h,v 1.3 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_pglist.h,v 1.4 2001/05/25 04:06:17 chs Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@@ -54,8 +54,12 @@ TAILQ_HEAD(pglist, vm_page);
#define PGFL_ZEROS 1
#define PGFL_NQUEUES 2
-struct pgfreelist {
+struct pgflbucket {
struct pglist pgfl_queues[PGFL_NQUEUES];
};
-#endif
+struct pgfreelist {
+ struct pgflbucket *pgfl_buckets;
+};
+
+#endif /* _PGLIST_H */
diff --git a/sys/uvm/uvm_pmap.h b/sys/uvm/uvm_pmap.h
index f0039df2fed..4033c140e4d 100644
--- a/sys/uvm/uvm_pmap.h
+++ b/sys/uvm/uvm_pmap.h
@@ -1,6 +1,6 @@
-/* $NetBSD: uvm_pmap.h,v 1.5 2001/04/22 23:42:11 thorpej Exp $ */
+/* $NetBSD: uvm_pmap.h,v 1.7 2001/05/25 04:06:17 chs Exp $ */
-/*
+/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -42,17 +42,17 @@
* All rights reserved.
*
* Author: Avadis Tevanian, Jr.
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -99,7 +99,10 @@ typedef struct pmap_statistics *pmap_statistics_t;
#ifndef PMAP_EXCLUDE_DECLS /* Used in Sparc port to virtualize pmap mod */
#ifdef _KERNEL
__BEGIN_DECLS
-void *pmap_bootstrap_alloc __P((int));
+#if !defined(pmap_kernel)
+struct pmap *pmap_kernel __P((void));
+#endif
+
void pmap_activate __P((struct proc *));
void pmap_deactivate __P((struct proc *));
void pmap_unwire __P((pmap_t, vaddr_t));
@@ -138,13 +141,19 @@ boolean_t pmap_is_referenced __P((struct vm_page *));
void pmap_page_protect __P((struct vm_page *, vm_prot_t));
#if !defined(pmap_phys_address)
-paddr_t pmap_phys_address __P((int));
+paddr_t pmap_phys_address __P((int));
#endif
void pmap_protect __P((pmap_t,
vaddr_t, vaddr_t, vm_prot_t));
void pmap_reference __P((pmap_t));
void pmap_remove __P((pmap_t, vaddr_t, vaddr_t));
void pmap_update __P((void));
+#if !defined(pmap_resident_count)
+long pmap_resident_count __P((pmap_t));
+#endif
+#if !defined(pmap_wired_count)
+long pmap_wired_count __P((pmap_t));
+#endif
void pmap_zero_page __P((paddr_t));
void pmap_virtual_space __P((vaddr_t *, vaddr_t *));
diff --git a/sys/uvm/uvm_stat.c b/sys/uvm/uvm_stat.c
index 32fcc128377..4746b59f6df 100644
--- a/sys/uvm/uvm_stat.c
+++ b/sys/uvm/uvm_stat.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_stat.c,v 1.9 2001/11/12 01:26:10 art Exp $ */
-/* $NetBSD: uvm_stat.c,v 1.18 2001/03/09 01:02:13 chs Exp $ */
+/* $OpenBSD: uvm_stat.c,v 1.10 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_stat.c,v 1.19 2001/05/25 04:06:17 chs Exp $ */
/*
*
@@ -135,7 +135,7 @@ restart:
cur[lcv] = -1;
goto restart;
}
-
+
/*
* if the time hasn't been set yet, or this entry is
* earlier than the current tv, set the time and history
@@ -158,7 +158,7 @@ restart:
if (cur[hi] == hists[hi]->f)
cur[hi] = -1;
}
-
+
/* done! */
splx(s);
}
diff --git a/sys/uvm/uvm_stat.h b/sys/uvm/uvm_stat.h
index af0a256119f..2644314f99c 100644
--- a/sys/uvm/uvm_stat.h
+++ b/sys/uvm/uvm_stat.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_stat.h,v 1.10 2001/11/12 01:26:10 art Exp $ */
-/* $NetBSD: uvm_stat.h,v 1.19 2001/02/04 10:55:58 mrg Exp $ */
+/* $OpenBSD: uvm_stat.h,v 1.11 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_stat.h,v 1.22 2001/05/30 11:57:17 mrg Exp $ */
/*
*
@@ -38,6 +38,10 @@
#ifndef _UVM_UVM_STAT_H_
#define _UVM_UVM_STAT_H_
+#if defined(_KERNEL_OPT)
+#include "opt_uvmhist.h"
+#endif
+
#include <sys/queue.h>
/*
@@ -113,7 +117,7 @@ struct uvm_history {
LIST_ENTRY(uvm_history) list; /* link on list of all histories */
int n; /* number of entries */
int f; /* next free one */
- simple_lock_data_t l; /* lock on this history */
+ struct simplelock l; /* lock on this history */
struct uvm_history_ent *e; /* the malloc'd entries */
};
@@ -228,7 +232,7 @@ do { \
#define UVMHIST_FUNC(FNAME) \
static int _uvmhist_cnt = 0; \
static char *_uvmhist_name = FNAME; \
- int _uvmhist_call;
+ int _uvmhist_call;
static __inline void uvmhist_print __P((struct uvm_history_ent *));
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index fc1d6861de1..95ecb9e7828 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_swap.c,v 1.43 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_swap.c,v 1.47 2001/03/10 22:46:51 chs Exp $ */
+/* $OpenBSD: uvm_swap.c,v 1.44 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_swap.c,v 1.52 2001/05/26 16:32:47 chs Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
@@ -63,7 +63,7 @@
/*
* swap space is managed in the following way:
- *
+ *
* each swap partition or file is described by a "swapdev" structure.
* each "swapdev" structure contains a "swapent" structure which contains
* information that is passed up to the user (via system calls).
@@ -74,7 +74,7 @@
* the system maintains a global data structure describing all swap
* partitions/files. there is a sorted LIST of "swappri" structures
* which describe "swapdev"'s at that priority. this LIST is headed
- * by the "swap_priority" global var. each "swappri" contains a
+ * by the "swap_priority" global var. each "swappri" contains a
* CIRCLEQ of "swapdev" structures at that priority.
*
* locking:
@@ -99,7 +99,7 @@
* userland controls and configures swap with the swapctl(2) system call.
* the sys_swapctl performs the following operations:
* [1] SWAP_NSWAP: returns the number of swap devices currently configured
- * [2] SWAP_STATS: given a pointer to an array of swapent structures
+ * [2] SWAP_STATS: given a pointer to an array of swapent structures
* (passed in via "arg") of a size passed in via "misc" ... we load
* the current swap config into the array.
* [3] SWAP_ON: given a pathname in arg (could be device or file) and a
@@ -227,16 +227,15 @@ LIST_HEAD(swap_priority, swappri);
static struct swap_priority swap_priority;
/* locks */
-lock_data_t swap_syscall_lock;
+struct lock swap_syscall_lock;
/*
* prototypes
*/
-static void swapdrum_add __P((struct swapdev *, int));
static struct swapdev *swapdrum_getsdp __P((int));
static struct swapdev *swaplist_find __P((struct vnode *, int));
-static void swaplist_insert __P((struct swapdev *,
+static void swaplist_insert __P((struct swapdev *,
struct swappri *, int));
static void swaplist_trim __P((void));
@@ -262,7 +261,7 @@ void uvm_swap_initcrypt __P((struct swapdev *, int));
/*
* uvm_swap_init: init the swap system data structures and locks
*
- * => called at boot time from init_main.c after the filesystems
+ * => called at boot time from init_main.c after the filesystems
* are brought up (which happens after uvm_init())
*/
void
@@ -288,7 +287,7 @@ uvm_swap_init()
/*
* create swap block resource map to map /dev/drum. the range
* from 1 to INT_MAX allows 2 gigablocks of swap space. note
- * that block 0 is reserved (used to indicate an allocation
+ * that block 0 is reserved (used to indicate an allocation
* failure, or no allocation).
*/
swapmap = extent_create("swapmap", 1, INT_MAX,
@@ -563,27 +562,6 @@ swaplist_trim()
}
/*
- * swapdrum_add: add a "swapdev"'s blocks into /dev/drum's area.
- *
- * => caller must hold swap_syscall_lock
- * => uvm.swap_data_lock should be unlocked (we may sleep)
- */
-static void
-swapdrum_add(sdp, npages)
- struct swapdev *sdp;
- int npages;
-{
- u_long result;
-
- if (extent_alloc(swapmap, npages, EX_NOALIGN, 0, EX_NOBOUNDARY,
- EX_WAITOK, &result))
- panic("swapdrum_add");
-
- sdp->swd_drumoffset = result;
- sdp->swd_drumsize = npages;
-}
-
-/*
* swapdrum_getsdp: given a page offset in /dev/drum, convert it back
* to the "swapdev" that maps that section of the drum.
*
@@ -596,16 +574,19 @@ swapdrum_getsdp(pgno)
{
struct swapdev *sdp;
struct swappri *spp;
-
+
for (spp = LIST_FIRST(&swap_priority); spp != NULL;
spp = LIST_NEXT(spp, spi_swappri))
for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
sdp != (void *)&spp->spi_swapdev;
- sdp = CIRCLEQ_NEXT(sdp, swd_next))
+ sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
+ if (sdp->swd_flags & SWF_FAKE)
+ continue;
if (pgno >= sdp->swd_drumoffset &&
pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
return sdp;
}
+ }
return NULL;
}
@@ -646,7 +627,7 @@ sys_swapctl(p, v, retval)
/*
* we handle the non-priv NSWAP and STATS request first.
*
- * SWAP_NSWAP: return number of config'd swap devices
+ * SWAP_NSWAP: return number of config'd swap devices
* [can also be obtained with uvmexp sysctl]
*/
if (SCARG(uap, cmd) == SWAP_NSWAP) {
@@ -660,9 +641,9 @@ sys_swapctl(p, v, retval)
/*
* SWAP_STATS: get stats on current # of configured swap devs
*
- * note that the swap_priority list can't change as long
+ * note that the swap_priority list can't change as long
* as we are holding the swap_syscall_lock. we don't want
- * to grab the uvm.swap_data_lock because we may fault&sleep during
+ * to grab the uvm.swap_data_lock because we may fault&sleep during
* copyout() and we don't want to be holding that lock then!
*/
if (SCARG(uap, cmd) == SWAP_STATS
@@ -678,7 +659,7 @@ sys_swapctl(p, v, retval)
for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
sdp != (void *)&spp->spi_swapdev && misc-- > 0;
sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
- sdp->swd_inuse =
+ sdp->swd_inuse =
btodb((u_int64_t)sdp->swd_npginuse <<
PAGE_SHIFT);
error = copyout(&sdp->swd_se, sep,
@@ -698,7 +679,8 @@ sys_swapctl(p, v, retval)
count++;
#if defined(COMPAT_13)
if (SCARG(uap, cmd) == SWAP_OSTATS)
- ((struct oswapent *)sep)++;
+ sep = (struct swapent *)
+ ((struct oswapent *)sep + 1);
else
#endif
sep++;
@@ -710,7 +692,7 @@ sys_swapctl(p, v, retval)
*retval = count;
error = 0;
goto out;
- }
+ }
/*
* all other requests require superuser privs. verify.
@@ -797,14 +779,16 @@ sys_swapctl(p, v, retval)
*/
priority = SCARG(uap, misc);
+ sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
+ spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
simple_lock(&uvm.swap_data_lock);
- if ((sdp = swaplist_find(vp, 0)) != NULL) {
+ if (swaplist_find(vp, 0) != NULL) {
error = EBUSY;
simple_unlock(&uvm.swap_data_lock);
+ free(sdp, M_VMSWAP);
+ free(spp, M_VMSWAP);
break;
}
- sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
- spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
memset(sdp, 0, sizeof(*sdp));
sdp->swd_flags = SWF_FAKE; /* placeholder only */
sdp->swd_vp = vp;
@@ -905,6 +889,7 @@ swap_on(p, sdp)
struct vnode *vp;
int error, npages, nblocks, size;
long addr;
+ u_long result;
struct vattr va;
#if defined(NFSCLIENT)
extern int (**nfsv2_vnodeop_p) __P((void *));
@@ -1033,9 +1018,9 @@ swap_on(p, sdp)
}
/*
- * if the vnode we are swapping to is the root vnode
+ * if the vnode we are swapping to is the root vnode
* (i.e. we are swapping to the miniroot) then we want
- * to make sure we don't overwrite it. do a statfs to
+ * to make sure we don't overwrite it. do a statfs to
* find its size and skip over it.
*/
if (vp == rootvp) {
@@ -1050,7 +1035,7 @@ swap_on(p, sdp)
if (rootpages > size)
panic("swap_on: miniroot larger than swap?");
- if (extent_alloc_region(sdp->swd_ex, addr,
+ if (extent_alloc_region(sdp->swd_ex, addr,
rootpages, EX_WAITOK))
panic("swap_on: unable to preserve miniroot");
@@ -1080,9 +1065,14 @@ swap_on(p, sdp)
/*
* now add the new swapdev to the drum and enable.
*/
- simple_lock(&uvm.swap_data_lock);
- swapdrum_add(sdp, npages);
+ if (extent_alloc(swapmap, npages, EX_NOALIGN, 0, EX_NOBOUNDARY,
+ EX_WAITOK, &result))
+ panic("swapdrum_add");
+
+ sdp->swd_drumoffset = (int)result;
+ sdp->swd_drumsize = npages;
sdp->swd_npages = size;
+ simple_lock(&uvm.swap_data_lock);
sdp->swd_flags &= ~SWF_FAKE; /* going live */
sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
uvmexp.swpages += size;
@@ -1131,7 +1121,7 @@ swap_off(p, sdp)
sdp->swd_drumoffset + sdp->swd_drumsize) ||
anon_swap_off(sdp->swd_drumoffset,
sdp->swd_drumoffset + sdp->swd_drumsize)) {
-
+
simple_lock(&uvm.swap_data_lock);
sdp->swd_flags |= SWF_ENABLE;
simple_unlock(&uvm.swap_data_lock);
@@ -1161,6 +1151,7 @@ swap_off(p, sdp)
if (swaplist_find(sdp->swd_vp, 1) == NULL)
panic("swap_off: swapdev not in list\n");
swaplist_trim();
+ simple_unlock(&uvm.swap_data_lock);
/*
* free all resources!
@@ -1169,7 +1160,6 @@ swap_off(p, sdp)
EX_WAITOK);
extent_destroy(sdp->swd_ex);
free(sdp, M_VMSWAP);
- simple_unlock(&uvm.swap_data_lock);
return (0);
}
@@ -1336,7 +1326,7 @@ sw_reg_strategy(sdp, bp, bn)
&vp, &nbn, &nra);
if (error == 0 && nbn == (daddr_t)-1) {
- /*
+ /*
* this used to just set error, but that doesn't
* do the right thing. Instead, it causes random
* memory errors. The panic() should remain until
@@ -1578,7 +1568,7 @@ uvm_swap_alloc(nslots, lessok)
*/
if (uvmexp.nswapdev < 1)
return 0;
-
+
/*
* lock data lock, convert slots into blocks, and enter loop
*/
@@ -1682,8 +1672,8 @@ uvm_swap_free(startslot, nslots)
}
/*
- * convert drum slot offset back to sdp, free the blocks
- * in the extent, and return. must hold pri lock to do
+ * convert drum slot offset back to sdp, free the blocks
+ * in the extent, and return. must hold pri lock to do
* lookup and access the extent.
*/
@@ -1765,7 +1755,7 @@ uvm_swap_get(page, swslot, flags)
uvmexp.swpgonly--;
simple_unlock(&uvm.swap_data_lock);
- result = uvm_swap_io(&page, swslot, 1, B_READ |
+ result = uvm_swap_io(&page, swslot, 1, B_READ |
((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
if (result != 0) {
@@ -1906,7 +1896,7 @@ uvm_swap_io(pps, startslot, npages, flags)
}
#endif /* UVM_SWAP_ENCRYPT */
- /*
+ /*
* now allocate a buf for the i/o.
* [make sure we don't put the pagedaemon to sleep...]
*/
@@ -1962,9 +1952,8 @@ uvm_swap_io(pps, startslot, npages, flags)
splx(s);
bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
- /*
- * for pageouts we must set "dirtyoff" [NFS client code needs it].
- * and we bump v_numoutput (counter of number of active outputs).
+ /*
+ * bump v_numoutput (counter of number of active outputs).
*/
if (write) {
#ifdef UVM_SWAP_ENCRYPT
diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c
index a2fde83db35..a6debf6ff8d 100644
--- a/sys/uvm/uvm_unix.c
+++ b/sys/uvm/uvm_unix.c
@@ -1,9 +1,9 @@
-/* $OpenBSD: uvm_unix.c,v 1.18 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_unix.c,v 1.20 2001/03/19 02:25:33 simonb Exp $ */
+/* $OpenBSD: uvm_unix.c,v 1.19 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_unix.c,v 1.24 2001/06/06 21:28:51 mrg Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993 The Regents of the University of California.
+ * Copyright (c) 1991, 1993 The Regents of the University of California.
* Copyright (c) 1988 University of Utah.
*
* All rights reserved.
@@ -23,7 +23,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -77,38 +77,36 @@ sys_obreak(p, v, retval)
} */ *uap = v;
struct vmspace *vm = p->p_vmspace;
vaddr_t new, old;
- ssize_t diff;
int error;
old = (vaddr_t)vm->vm_daddr;
new = round_page((vaddr_t)SCARG(uap, nsize));
- if ((new - old) > p->p_rlimit[RLIMIT_DATA].rlim_cur)
+ if ((new - old) > p->p_rlimit[RLIMIT_DATA].rlim_cur && new > old)
return (ENOMEM);
old = round_page(old + ptoa(vm->vm_dsize));
- diff = new - old;
- if (diff == 0)
+ if (new == old)
return (0);
/*
* grow or shrink?
*/
- if (diff > 0) {
- error = uvm_map(&vm->vm_map, &old, diff, NULL,
+ if (new > old) {
+ error = uvm_map(&vm->vm_map, &old, new - old, NULL,
UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED|
UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
if (error) {
uprintf("sbrk: grow %ld failed, error = %d\n",
- (long)diff, error);
+ new - old, error);
return error;
}
- vm->vm_dsize += atop(diff);
+ vm->vm_dsize += atop(new - old);
} else {
- uvm_deallocate(&vm->vm_map, new, -diff);
- vm->vm_dsize -= atop(-diff);
+ uvm_deallocate(&vm->vm_map, new, old - new);
+ vm->vm_dsize -= atop(old - new);
}
return (0);
@@ -192,8 +190,8 @@ uvm_coredump(p, vp, cred, chdr)
struct core *chdr;
{
struct vmspace *vm = p->p_vmspace;
- vm_map_t map = &vm->vm_map;
- vm_map_entry_t entry;
+ struct vm_map *map = &vm->vm_map;
+ struct vm_map_entry *entry;
vaddr_t start, end, maxstack;
struct coreseg cseg;
off_t offset;
diff --git a/sys/uvm/uvm_user.c b/sys/uvm/uvm_user.c
index e6a6ba1d738..502d2aca440 100644
--- a/sys/uvm/uvm_user.c
+++ b/sys/uvm/uvm_user.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_user.c,v 1.7 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_user.c,v 1.9 2001/03/15 06:10:58 chs Exp $ */
+/* $OpenBSD: uvm_user.c,v 1.8 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_user.c,v 1.10 2001/06/02 18:09:27 chs Exp $ */
/*
*
@@ -52,7 +52,7 @@
void
uvm_deallocate(map, start, size)
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start;
vsize_t size;
{
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index cef3499f281..ca8a2551493 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,10 +1,10 @@
-/* $OpenBSD: uvm_vnode.c,v 1.26 2001/11/28 13:47:40 art Exp $ */
-/* $NetBSD: uvm_vnode.c,v 1.48 2001/03/10 22:46:51 chs Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.27 2001/11/28 19:28:15 art Exp $ */
+/* $NetBSD: uvm_vnode.c,v 1.50 2001/05/26 21:27:21 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993
- * The Regents of the University of California.
+ * The Regents of the University of California.
* Copyright (c) 1990 University of Utah.
*
* All rights reserved.
@@ -24,7 +24,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
+ * Washington University, the University of California, Berkeley and
* its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
@@ -79,10 +79,11 @@ static int uvn_findpage __P((struct uvm_object *, voff_t,
struct vm_page **, int));
boolean_t uvn_flush __P((struct uvm_object *, voff_t, voff_t,
int));
-static int uvn_get __P((struct uvm_object *, voff_t, vm_page_t *,
- int *, int, vm_prot_t, int, int));
-static int uvn_put __P((struct uvm_object *, vm_page_t *, int,
- boolean_t));
+static int uvn_get __P((struct uvm_object *, voff_t,
+ struct vm_page **, int *, int, vm_prot_t,
+ int, int));
+static int uvn_put __P((struct uvm_object *, struct vm_page **,
+ int, boolean_t));
static void uvn_reference __P((struct uvm_object *));
static boolean_t uvn_releasepg __P((struct vm_page *,
struct vm_page **));
@@ -226,10 +227,10 @@ uvn_attach(arg, accessprot)
* uvn_reference
*
* duplicate a reference to a VM object. Note that the reference
- * count must already be at least one (the passed in reference) so
+ * count must already be at least one (the passed in reference) so
* there is no chance of the uvn being killed or locked out here.
*
- * => caller must call with object unlocked.
+ * => caller must call with object unlocked.
* => caller must be using the same accessprot as was used at attach time
*/
@@ -278,7 +279,7 @@ uvn_releasepg(pg, nextpgp)
struct vm_page **nextpgp; /* OUT */
{
KASSERT(pg->flags & PG_RELEASED);
-
+
/*
* dispose of the page [caller handles PG_WANTED]
*/
@@ -354,9 +355,9 @@ uvn_releasepg(pg, nextpgp)
* in, then it can not be dirty (!PG_CLEAN) because no one has
* had a chance to modify it yet. if the PG_BUSY page is being
* paged out then it means that someone else has already started
- * cleaning the page for us (how nice!). in this case, if we
+ * cleaning the page for us (how nice!). in this case, if we
* have syncio specified, then after we make our pass through the
- * object we need to wait for the other PG_BUSY pages to clear
+ * object we need to wait for the other PG_BUSY pages to clear
* off (i.e. we need to do an iosync). also note that once a
* page is PG_BUSY it must stay in its object until it is un-busyed.
*
@@ -364,13 +365,13 @@ uvn_releasepg(pg, nextpgp)
* we can traverse the pages in an object either by going down the
* linked list in "uobj->memq", or we can go over the address range
* by page doing hash table lookups for each address. depending
- * on how many pages are in the object it may be cheaper to do one
+ * on how many pages are in the object it may be cheaper to do one
* or the other. we set "by_list" to true if we are using memq.
* if the cost of a hash lookup was equal to the cost of the list
* traversal we could compare the number of pages in the start->stop
* range to the total number of pages in the object. however, it
* seems that a hash table lookup is more expensive than the linked
- * list traversal, so we multiply the number of pages in the
+ * list traversal, so we multiply the number of pages in the
* start->stop range by a penalty which we define below.
*/
@@ -434,7 +435,7 @@ uvn_flush(uobj, start, stop, flags)
start = trunc_page(start);
stop = round_page(stop);
all = FALSE;
- by_list = (uobj->uo_npages <=
+ by_list = (uobj->uo_npages <=
((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY);
}
@@ -491,7 +492,7 @@ uvn_flush(uobj, start, stop, flags)
uvm_lock_pageq();
/* locked: both page queues and uobj */
- for ( ; (by_list && pp != NULL) ||
+ for ( ; (by_list && pp != NULL) ||
(!by_list && curoff < stop) ; pp = ppnext) {
if (by_list) {
if (!all &&
@@ -512,7 +513,7 @@ uvn_flush(uobj, start, stop, flags)
* handle case where we do not need to clean page (either
* because we are not clean or because page is not dirty or
* is busy):
- *
+ *
* NOTE: we are allowed to deactivate a non-wired active
* PG_BUSY page, but once a PG_BUSY page is on the inactive
* queue it must stay put until it is !PG_BUSY (so as not to
@@ -529,7 +530,7 @@ uvn_flush(uobj, start, stop, flags)
* freeing: nuke all mappings so we can sync
* PG_CLEAN bit with no race
*/
- if ((pp->flags & PG_CLEAN) != 0 &&
+ if ((pp->flags & PG_CLEAN) != 0 &&
(flags & PGO_FREE) != 0 &&
/* XXX ACTIVE|INACTIVE test unnecessary? */
(pp->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) != 0)
@@ -590,14 +591,14 @@ uvn_flush(uobj, start, stop, flags)
npages = sizeof(pps) / sizeof(struct vm_page *);
/* locked: page queues, uobj */
- result = uvm_pager_put(uobj, pp, &ppsp, &npages,
+ result = uvm_pager_put(uobj, pp, &ppsp, &npages,
flags | PGO_DOACTCLUST, start, stop);
/* unlocked: page queues, uobj */
/*
* at this point nothing is locked. if we did an async I/O
- * it is remotely possible for the async i/o to complete and
- * the page "pp" be freed or what not before we get a chance
+ * it is remotely possible for the async i/o to complete and
+ * the page "pp" be freed or what not before we get a chance
* to relock the object. in order to detect this, we have
* saved the version number of the page in "pp_version".
*/
@@ -637,10 +638,10 @@ uvn_flush(uobj, start, stop, flags)
}
/*
- * need to look at each page of the I/O operation. we defer
- * processing "pp" until the last trip through this "for" loop
+ * need to look at each page of the I/O operation. we defer
+ * processing "pp" until the last trip through this "for" loop
* so that we can load "ppnext" for the main loop after we
- * play with the cluster pages [thus the "npages + 1" in the
+ * play with the cluster pages [thus the "npages + 1" in the
* loop below].
*/
@@ -714,7 +715,7 @@ uvn_flush(uobj, start, stop, flags)
}
}
}
-
+
/*
* dispose of page
*/
@@ -774,7 +775,7 @@ uvn_flush(uobj, start, stop, flags)
vp->v_bioflag |= VBIOWAIT;
UVM_UNLOCK_AND_WAIT(&vp->v_numoutput,
- &uvn->u_obj.vmobjlock,
+ &uvn->u_obj.vmobjlock,
FALSE, "uvn_flush",0);
simple_lock(&uvn->u_obj.vmobjlock);
}
@@ -840,7 +841,7 @@ uvn_put(uobj, pps, npages, flags)
* => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
* => NOTE: caller must check for released pages!!
*/
-
+
static int
uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
struct uvm_object *uobj;
@@ -947,7 +948,7 @@ uvn_findpage(uobj, offset, pgp, flags)
simple_lock(&uobj->vmobjlock);
continue;
}
-
+
/* skip PG_RDONLY pages if requested */
if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
@@ -970,7 +971,7 @@ uvn_findpage(uobj, offset, pgp, flags)
* grow => just update size value
* shrink => toss un-needed pages
*
- * => we assume that the caller has a reference of some sort to the
+ * => we assume that the caller has a reference of some sort to the
* vnode in question so that it will not be yanked out from under
* us.
*