summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-03-25 20:00:19 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-03-25 20:00:19 +0000
commit3b9207b010a465a159335b6f30f6467e258b7973 (patch)
treef62be26e5f5b9df508e11cbcc08f32587d905de2 /sys/uvm
parent8286cd18f68c14418cc7b15e91da56cd157a0c47 (diff)
Move all of the pseudo-inline functions in uvm into C files.
By pseudo-inline, I mean that if a certain macro was defined, they would be inlined. However, no architecture defines that, and none has for a very very long time. Therefore mainly this just makes the code a damned sight easier to read. Some k&r -> ansi declarations while I'm in there. "just commit it" art@. ok weingart@.
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm.h12
-rw-r--r--sys/uvm/uvm_amap.c212
-rw-r--r--sys/uvm/uvm_amap.h29
-rw-r--r--sys/uvm/uvm_amap_i.h261
-rw-r--r--sys/uvm/uvm_fault.c162
-rw-r--r--sys/uvm/uvm_fault.h16
-rw-r--r--sys/uvm/uvm_fault_i.h220
-rw-r--r--sys/uvm/uvm_map.c117
-rw-r--r--sys/uvm/uvm_map.h28
-rw-r--r--sys/uvm/uvm_map_i.h203
-rw-r--r--sys/uvm/uvm_page.c187
-rw-r--r--sys/uvm/uvm_page.h57
-rw-r--r--sys/uvm/uvm_page_i.h277
-rw-r--r--sys/uvm/uvm_pager.c63
-rw-r--r--sys/uvm/uvm_pager.h25
-rw-r--r--sys/uvm/uvm_pager_i.h73
16 files changed, 759 insertions, 1183 deletions
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h
index 45fd5139aa2..9f5e42cd289 100644
--- a/sys/uvm/uvm.h
+++ b/sys/uvm/uvm.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm.h,v 1.25 2009/01/27 22:14:13 miod Exp $ */
+/* $OpenBSD: uvm.h,v 1.26 2009/03/25 20:00:17 oga Exp $ */
/* $NetBSD: uvm.h,v 1.24 2000/11/27 08:40:02 chs Exp $ */
/*
@@ -168,16 +168,6 @@ do { \
#define UVM_PAGE_OWN(PG, TAG) /* nothing */
#endif /* UVM_PAGE_TRKOWN */
-/*
- * pull in inlines
- */
-
-#include <uvm/uvm_amap_i.h>
-#include <uvm/uvm_fault_i.h>
-#include <uvm/uvm_map_i.h>
-#include <uvm/uvm_page_i.h>
-#include <uvm/uvm_pager_i.h>
-
#endif /* _KERNEL */
#endif /* _UVM_UVM_H_ */
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 4c79f7ee907..e2645412a3c 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.43 2008/10/08 08:41:18 art Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.44 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -42,8 +42,6 @@
* uvm_amap.h for a brief explanation of the role of amaps in uvm.
*/
-#undef UVM_AMAP_INLINE /* enable/disable amap inlines */
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@@ -51,7 +49,6 @@
#include <sys/kernel.h>
#include <sys/pool.h>
-#define UVM_AMAP_C /* ensure disabled inlines are in */
#include <uvm/uvm.h>
#include <uvm/uvm_swap.h>
@@ -1131,3 +1128,210 @@ next:
return rv;
}
+
+/*
+ * amap_lookup: look up a page in an amap
+ *
+ * => amap should be locked by caller.
+ */
+struct vm_anon *
+amap_lookup(struct vm_aref *aref, vaddr_t offset)
+{
+ int slot;
+ struct vm_amap *amap = aref->ar_amap;
+ UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist);
+
+ AMAP_B2SLOT(slot, offset);
+ slot += aref->ar_pageoff;
+
+ if (slot >= amap->am_nslot)
+ panic("amap_lookup: offset out of range");
+
+ UVMHIST_LOG(maphist, "<- done (amap=%p, offset=0x%lx, result=%p)",
+ amap, offset, amap->am_anon[slot], 0);
+ return(amap->am_anon[slot]);
+}
+
+/*
+ * amap_lookups: look up a range of pages in an amap
+ *
+ * => amap should be locked by caller.
+ * => XXXCDC: this interface is biased toward array-based amaps. fix.
+ */
+void
+amap_lookups(struct vm_aref *aref, vaddr_t offset,
+ struct vm_anon **anons, int npages)
+{
+ int slot;
+ struct vm_amap *amap = aref->ar_amap;
+ UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist);
+
+ AMAP_B2SLOT(slot, offset);
+ slot += aref->ar_pageoff;
+
+ UVMHIST_LOG(maphist, " slot=%ld, npages=%ld, nslot=%ld", slot, npages,
+ amap->am_nslot, 0);
+
+ if ((slot + (npages - 1)) >= amap->am_nslot)
+ panic("amap_lookups: offset out of range");
+
+ memcpy(anons, &amap->am_anon[slot], npages * sizeof(struct vm_anon *));
+
+ UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0);
+ return;
+}
+
+/*
+ * amap_add: add (or replace) a page to an amap
+ *
+ * => caller must lock amap.
+ * => if (replace) caller must lock anon because we might have to call
+ * pmap_page_protect on the anon's page.
+ * => returns an "offset" which is meaningful to amap_unadd().
+ */
+void
+amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
+ boolean_t replace)
+{
+ int slot;
+ struct vm_amap *amap = aref->ar_amap;
+ UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist);
+
+ AMAP_B2SLOT(slot, offset);
+ slot += aref->ar_pageoff;
+
+ if (slot >= amap->am_nslot)
+ panic("amap_add: offset out of range");
+
+ if (replace) {
+
+ if (amap->am_anon[slot] == NULL)
+ panic("amap_add: replacing null anon");
+ if (amap->am_anon[slot]->an_page != NULL &&
+ (amap->am_flags & AMAP_SHARED) != 0) {
+ pmap_page_protect(amap->am_anon[slot]->an_page,
+ VM_PROT_NONE);
+ /*
+ * XXX: suppose page is supposed to be wired somewhere?
+ */
+ }
+ } else { /* !replace */
+ if (amap->am_anon[slot] != NULL)
+ panic("amap_add: slot in use");
+
+ amap->am_bckptr[slot] = amap->am_nused;
+ amap->am_slots[amap->am_nused] = slot;
+ amap->am_nused++;
+ }
+ amap->am_anon[slot] = anon;
+ UVMHIST_LOG(maphist,
+ "<- done (amap=%p, offset=0x%lx, anon=%p, rep=%ld)",
+ amap, offset, anon, replace);
+}
+
+/*
+ * amap_unadd: remove a page from an amap
+ *
+ * => caller must lock amap
+ */
+void
+amap_unadd(struct vm_aref *aref, vaddr_t offset)
+{
+ int ptr, slot;
+ struct vm_amap *amap = aref->ar_amap;
+ UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist);
+
+ AMAP_B2SLOT(slot, offset);
+ slot += aref->ar_pageoff;
+
+ if (slot >= amap->am_nslot)
+ panic("amap_unadd: offset out of range");
+
+ if (amap->am_anon[slot] == NULL)
+ panic("amap_unadd: nothing there");
+
+ amap->am_anon[slot] = NULL;
+ ptr = amap->am_bckptr[slot];
+
+ if (ptr != (amap->am_nused - 1)) { /* swap to keep slots contig? */
+ amap->am_slots[ptr] = amap->am_slots[amap->am_nused - 1];
+ amap->am_bckptr[amap->am_slots[ptr]] = ptr; /* back link */
+ }
+ amap->am_nused--;
+ UVMHIST_LOG(maphist, "<- done (amap=%p, slot=%ld)", amap, slot,0, 0);
+}
+
+/*
+ * amap_ref: gain a reference to an amap
+ *
+ * => amap must not be locked (we will lock)
+ * => "offset" and "len" are in units of pages
+ * => called at fork time to gain the child's reference
+ */
+void
+amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
+{
+ UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
+
+ amap->am_ref++;
+ if (flags & AMAP_SHARED)
+ amap->am_flags |= AMAP_SHARED;
+#ifdef UVM_AMAP_PPREF
+ if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
+ len != amap->am_nslot)
+ amap_pp_establish(amap);
+ if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
+ if (flags & AMAP_REFALL)
+ amap_pp_adjref(amap, 0, amap->am_nslot, 1);
+ else
+ amap_pp_adjref(amap, offset, len, 1);
+ }
+#endif
+ UVMHIST_LOG(maphist,"<- done! amap=%p", amap, 0, 0, 0);
+}
+
+/*
+ * amap_unref: remove a reference to an amap
+ *
+ * => caller must remove all pmap-level references to this amap before
+ * dropping the reference
+ * => called from uvm_unmap_detach [only] ... note that entry is no
+ * longer part of a map and thus has no need for locking
+ * => amap must be unlocked (we will lock it).
+ */
+void
+amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
+{
+ UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
+
+ UVMHIST_LOG(maphist," amap=%p refs=%ld, nused=%ld",
+ amap, amap->am_ref, amap->am_nused, 0);
+
+ /*
+ * if we are the last reference, free the amap and return.
+ */
+
+ if (amap->am_ref-- == 1) {
+ amap_wipeout(amap); /* drops final ref and frees */
+ UVMHIST_LOG(maphist,"<- done (was last ref)!", 0, 0, 0, 0);
+ return; /* no need to unlock */
+ }
+
+ /*
+ * otherwise just drop the reference count(s)
+ */
+ if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
+ amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
+#ifdef UVM_AMAP_PPREF
+ if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot)
+ amap_pp_establish(amap);
+ if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
+ if (all)
+ amap_pp_adjref(amap, 0, amap->am_nslot, -1);
+ else
+ amap_pp_adjref(amap, offset, len, -1);
+ }
+#endif
+
+ UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
+}
diff --git a/sys/uvm/uvm_amap.h b/sys/uvm/uvm_amap.h
index 6cdc2a9bae1..b598bdee8c0 100644
--- a/sys/uvm/uvm_amap.h
+++ b/sys/uvm/uvm_amap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.h,v 1.17 2007/06/18 21:51:15 pedro Exp $ */
+/* $OpenBSD: uvm_amap.h,v 1.18 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -66,23 +66,12 @@
struct vm_amap;
/*
- * handle inline options... we allow amap ops to be inline, but we also
- * provide a hook to turn this off. macros can also be used.
- */
-
-#ifdef UVM_AMAP_INLINE /* defined/undef'd in uvm_amap.c */
-#define AMAP_INLINE static __inline /* inline enabled */
-#else
-#define AMAP_INLINE /* inline disabled */
-#endif /* UVM_AMAP_INLINE */
-
-
-/*
* prototypes for the amap interface
*/
-AMAP_INLINE /* add an anon to an amap */
-void amap_add(struct vm_aref *, vaddr_t, struct vm_anon *, boolean_t);
+ /* add an anon to an amap */
+void amap_add(struct vm_aref *, vaddr_t, struct vm_anon *,
+ boolean_t);
/* allocate a new amap */
struct vm_amap *amap_alloc(vaddr_t, vaddr_t, int);
/* clear amap needs-copy flag */
@@ -98,11 +87,11 @@ int amap_flags(struct vm_amap *);
void amap_free(struct vm_amap *);
/* init amap module (at boot time) */
void amap_init(void);
-AMAP_INLINE /* lookup an anon @ offset in amap */
+ /* lookup an anon @ offset in amap */
struct vm_anon *amap_lookup(struct vm_aref *, vaddr_t);
-AMAP_INLINE /* lookup multiple anons */
+ /* lookup multiple anons */
void amap_lookups(struct vm_aref *, vaddr_t, struct vm_anon **, int);
-AMAP_INLINE /* add a reference to an amap */
+ /* add a reference to an amap */
void amap_ref(struct vm_amap *, vaddr_t, vsize_t, int);
/* get number of references of amap */
int amap_refs(struct vm_amap *);
@@ -110,9 +99,9 @@ int amap_refs(struct vm_amap *);
void amap_share_protect(vm_map_entry_t, vm_prot_t);
/* split reference to amap into two */
void amap_splitref(struct vm_aref *, struct vm_aref *, vaddr_t);
-AMAP_INLINE /* remove an anon from an amap */
+ /* remove an anon from an amap */
void amap_unadd(struct vm_aref *, vaddr_t);
-AMAP_INLINE /* drop reference to an amap */
+ /* drop reference to an amap */
void amap_unref(struct vm_amap *, vaddr_t, vsize_t, int);
/* remove all anons from amap */
void amap_wipeout(struct vm_amap *);
diff --git a/sys/uvm/uvm_amap_i.h b/sys/uvm/uvm_amap_i.h
deleted file mode 100644
index d71cab1f7be..00000000000
--- a/sys/uvm/uvm_amap_i.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/* $OpenBSD: uvm_amap_i.h,v 1.19 2009/03/20 15:19:04 oga Exp $ */
-/* $NetBSD: uvm_amap_i.h,v 1.15 2000/11/25 06:27:59 chs Exp $ */
-
-/*
- *
- * Copyright (c) 1997 Charles D. Cranor and Washington University.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Charles D. Cranor and
- * Washington University.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * from: Id: uvm_amap_i.h,v 1.1.2.4 1998/01/05 18:12:57 chuck Exp
- */
-
-#ifndef _UVM_UVM_AMAP_I_H_
-#define _UVM_UVM_AMAP_I_H_
-
-/*
- * uvm_amap_i.h
- */
-
-/*
- * if inlines are enabled always pull in these functions, otherwise
- * pull them in only once (when we are compiling uvm_amap.c).
- */
-
-#if defined(UVM_AMAP_INLINE) || defined(UVM_AMAP_C)
-
-/*
- * amap_lookup: look up a page in an amap
- *
- * => amap should be locked by caller.
- */
-AMAP_INLINE struct vm_anon *
-amap_lookup(struct vm_aref *aref, vaddr_t offset)
-{
- int slot;
- struct vm_amap *amap = aref->ar_amap;
- UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist);
-
- AMAP_B2SLOT(slot, offset);
- slot += aref->ar_pageoff;
-
- if (slot >= amap->am_nslot)
- panic("amap_lookup: offset out of range");
-
- UVMHIST_LOG(maphist, "<- done (amap=%p, offset=0x%lx, result=%p)",
- amap, offset, amap->am_anon[slot], 0);
- return(amap->am_anon[slot]);
-}
-
-/*
- * amap_lookups: look up a range of pages in an amap
- *
- * => amap should be locked by caller.
- * => XXXCDC: this interface is biased toward array-based amaps. fix.
- */
-AMAP_INLINE void
-amap_lookups(struct vm_aref *aref, vaddr_t offset,
- struct vm_anon **anons, int npages)
-{
- int slot;
- struct vm_amap *amap = aref->ar_amap;
- UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist);
-
- AMAP_B2SLOT(slot, offset);
- slot += aref->ar_pageoff;
-
- UVMHIST_LOG(maphist, " slot=%ld, npages=%ld, nslot=%ld", slot, npages,
- amap->am_nslot, 0);
-
- if ((slot + (npages - 1)) >= amap->am_nslot)
- panic("amap_lookups: offset out of range");
-
- memcpy(anons, &amap->am_anon[slot], npages * sizeof(struct vm_anon *));
-
- UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0);
- return;
-}
-
-/*
- * amap_add: add (or replace) a page to an amap
- *
- * => caller must lock amap.
- * => if (replace) caller must lock anon because we might have to call
- * pmap_page_protect on the anon's page.
- * => returns an "offset" which is meaningful to amap_unadd().
- */
-AMAP_INLINE void
-amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
- boolean_t replace)
-{
- int slot;
- struct vm_amap *amap = aref->ar_amap;
- UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist);
-
- AMAP_B2SLOT(slot, offset);
- slot += aref->ar_pageoff;
-
- if (slot >= amap->am_nslot)
- panic("amap_add: offset out of range");
-
- if (replace) {
-
- if (amap->am_anon[slot] == NULL)
- panic("amap_add: replacing null anon");
- if (amap->am_anon[slot]->an_page != NULL &&
- (amap->am_flags & AMAP_SHARED) != 0) {
- pmap_page_protect(amap->am_anon[slot]->an_page,
- VM_PROT_NONE);
- /*
- * XXX: suppose page is supposed to be wired somewhere?
- */
- }
- } else { /* !replace */
- if (amap->am_anon[slot] != NULL)
- panic("amap_add: slot in use");
-
- amap->am_bckptr[slot] = amap->am_nused;
- amap->am_slots[amap->am_nused] = slot;
- amap->am_nused++;
- }
- amap->am_anon[slot] = anon;
- UVMHIST_LOG(maphist,
- "<- done (amap=%p, offset=0x%lx, anon=%p, rep=%ld)",
- amap, offset, anon, replace);
-}
-
-/*
- * amap_unadd: remove a page from an amap
- *
- * => caller must lock amap
- */
-AMAP_INLINE void
-amap_unadd(struct vm_aref *aref, vaddr_t offset)
-{
- int ptr, slot;
- struct vm_amap *amap = aref->ar_amap;
- UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist);
-
- AMAP_B2SLOT(slot, offset);
- slot += aref->ar_pageoff;
-
- if (slot >= amap->am_nslot)
- panic("amap_unadd: offset out of range");
-
- if (amap->am_anon[slot] == NULL)
- panic("amap_unadd: nothing there");
-
- amap->am_anon[slot] = NULL;
- ptr = amap->am_bckptr[slot];
-
- if (ptr != (amap->am_nused - 1)) { /* swap to keep slots contig? */
- amap->am_slots[ptr] = amap->am_slots[amap->am_nused - 1];
- amap->am_bckptr[amap->am_slots[ptr]] = ptr; /* back link */
- }
- amap->am_nused--;
- UVMHIST_LOG(maphist, "<- done (amap=%p, slot=%ld)", amap, slot,0, 0);
-}
-
-/*
- * amap_ref: gain a reference to an amap
- *
- * => amap must not be locked (we will lock)
- * => "offset" and "len" are in units of pages
- * => called at fork time to gain the child's reference
- */
-AMAP_INLINE void
-amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
-{
- UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
-
- amap->am_ref++;
- if (flags & AMAP_SHARED)
- amap->am_flags |= AMAP_SHARED;
-#ifdef UVM_AMAP_PPREF
- if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
- len != amap->am_nslot)
- amap_pp_establish(amap);
- if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- if (flags & AMAP_REFALL)
- amap_pp_adjref(amap, 0, amap->am_nslot, 1);
- else
- amap_pp_adjref(amap, offset, len, 1);
- }
-#endif
- UVMHIST_LOG(maphist,"<- done! amap=%p", amap, 0, 0, 0);
-}
-
-/*
- * amap_unref: remove a reference to an amap
- *
- * => caller must remove all pmap-level references to this amap before
- * dropping the reference
- * => called from uvm_unmap_detach [only] ... note that entry is no
- * longer part of a map and thus has no need for locking
- * => amap must be unlocked (we will lock it).
- */
-AMAP_INLINE void
-amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
-{
- UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
-
- UVMHIST_LOG(maphist," amap=%p refs=%ld, nused=%ld",
- amap, amap->am_ref, amap->am_nused, 0);
-
- /*
- * if we are the last reference, free the amap and return.
- */
-
- if (amap->am_ref-- == 1) {
- amap_wipeout(amap); /* drops final ref and frees */
- UVMHIST_LOG(maphist,"<- done (was last ref)!", 0, 0, 0, 0);
- return; /* no need to unlock */
- }
-
- /*
- * otherwise just drop the reference count(s)
- */
- if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
- amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
-#ifdef UVM_AMAP_PPREF
- if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot)
- amap_pp_establish(amap);
- if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- if (all)
- amap_pp_adjref(amap, 0, amap->am_nslot, -1);
- else
- amap_pp_adjref(amap, offset, len, -1);
- }
-#endif
-
- UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
-}
-
-#endif /* defined(UVM_AMAP_INLINE) || defined(UVM_AMAP_C) */
-
-#endif /* _UVM_UVM_AMAP_I_H_ */
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index be5e24d0cca..57d3fe322bf 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.51 2009/03/20 15:19:04 oga Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.52 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -177,6 +177,7 @@ static struct uvm_advice uvmadvice[] = {
static void uvmfault_amapcopy(struct uvm_faultinfo *);
static __inline void uvmfault_anonflush(struct vm_anon **, int);
+void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
/*
* inline functions
@@ -1860,3 +1861,162 @@ uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
uvm_unlock_pageq();
}
+
+/*
+ * uvmfault_unlockmaps: unlock the maps
+ */
+void
+uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked)
+{
+ /*
+ * ufi can be NULL when this isn't really a fault,
+ * but merely paging in anon data.
+ */
+
+ if (ufi == NULL) {
+ return;
+ }
+
+ if (write_locked) {
+ vm_map_unlock(ufi->map);
+ } else {
+ vm_map_unlock_read(ufi->map);
+ }
+}
+
+/*
+ * uvmfault_unlockall: unlock everything passed in.
+ *
+ * => maps must be read-locked (not write-locked).
+ */
+void
+uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
+ struct uvm_object *uobj, struct vm_anon *anon)
+{
+
+ if (anon)
+ simple_unlock(&anon->an_lock);
+ if (uobj)
+ simple_unlock(&uobj->vmobjlock);
+ uvmfault_unlockmaps(ufi, FALSE);
+}
+
+/*
+ * uvmfault_lookup: lookup a virtual address in a map
+ *
+ * => caller must provide a uvm_faultinfo structure with the IN
+ * params properly filled in
+ * => we will lookup the map entry (handling submaps) as we go
+ * => if the lookup is a success we will return with the maps locked
+ * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
+ * get a read lock.
+ * => note that submaps can only appear in the kernel and they are
+ * required to use the same virtual addresses as the map they
+ * are referenced by (thus address translation between the main
+ * map and the submap is unnecessary).
+ */
+
+boolean_t
+uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
+{
+ vm_map_t tmpmap;
+
+ /*
+ * init ufi values for lookup.
+ */
+
+ ufi->map = ufi->orig_map;
+ ufi->size = ufi->orig_size;
+
+ /*
+ * keep going down levels until we are done. note that there can
+ * only be two levels so we won't loop very long.
+ */
+
+ while (1) {
+
+ /*
+ * lock map
+ */
+ if (write_lock) {
+ vm_map_lock(ufi->map);
+ } else {
+ vm_map_lock_read(ufi->map);
+ }
+
+ /*
+ * lookup
+ */
+ if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
+ &ufi->entry)) {
+ uvmfault_unlockmaps(ufi, write_lock);
+ return(FALSE);
+ }
+
+ /*
+ * reduce size if necessary
+ */
+ if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
+ ufi->size = ufi->entry->end - ufi->orig_rvaddr;
+
+ /*
+ * submap? replace map with the submap and lookup again.
+ * note: VAs in submaps must match VAs in main map.
+ */
+ if (UVM_ET_ISSUBMAP(ufi->entry)) {
+ tmpmap = ufi->entry->object.sub_map;
+ if (write_lock) {
+ vm_map_unlock(ufi->map);
+ } else {
+ vm_map_unlock_read(ufi->map);
+ }
+ ufi->map = tmpmap;
+ continue;
+ }
+
+ /*
+ * got it!
+ */
+
+ ufi->mapv = ufi->map->timestamp;
+ return(TRUE);
+
+ } /* while loop */
+
+ /*NOTREACHED*/
+}
+
+/*
+ * uvmfault_relock: attempt to relock the same version of the map
+ *
+ * => fault data structures should be unlocked before calling.
+ * => if a success (TRUE) maps will be locked after call.
+ */
+boolean_t
+uvmfault_relock(struct uvm_faultinfo *ufi)
+{
+ /*
+ * ufi can be NULL when this isn't really a fault,
+ * but merely paging in anon data.
+ */
+
+ if (ufi == NULL) {
+ return TRUE;
+ }
+
+ uvmexp.fltrelck++;
+
+ /*
+ * relock map. fail if version mismatch (in which case nothing
+ * gets locked).
+ */
+
+ vm_map_lock_read(ufi->map);
+ if (ufi->mapv != ufi->map->timestamp) {
+ vm_map_unlock_read(ufi->map);
+ return(FALSE);
+ }
+
+ uvmexp.fltrelckok++;
+ return(TRUE); /* got it! */
+}
diff --git a/sys/uvm/uvm_fault.h b/sys/uvm/uvm_fault.h
index ddcb3e02c7e..dd19cdf9262 100644
--- a/sys/uvm/uvm_fault.h
+++ b/sys/uvm/uvm_fault.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.h,v 1.12 2002/03/14 01:27:18 millert Exp $ */
+/* $OpenBSD: uvm_fault.h,v 1.13 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_fault.h,v 1.14 2000/06/26 14:21:17 mrg Exp $ */
/*
@@ -73,12 +73,16 @@ struct uvm_faultinfo {
*/
-int uvmfault_anonget(struct uvm_faultinfo *, struct vm_amap *,
- struct vm_anon *);
+boolean_t uvmfault_lookup(struct uvm_faultinfo *, boolean_t);
+boolean_t uvmfault_relock(struct uvm_faultinfo *);
+void uvmfault_unlockall(struct uvm_faultinfo *, struct vm_amap *,
+ struct uvm_object *, struct vm_anon *);
+int uvmfault_anonget(struct uvm_faultinfo *, struct vm_amap *,
+ struct vm_anon *);
-int uvm_fault_wire(vm_map_t, vaddr_t, vaddr_t, vm_prot_t);
-void uvm_fault_unwire(vm_map_t, vaddr_t, vaddr_t);
-void uvm_fault_unwire_locked(vm_map_t, vaddr_t, vaddr_t);
+int uvm_fault_wire(vm_map_t, vaddr_t, vaddr_t, vm_prot_t);
+void uvm_fault_unwire(vm_map_t, vaddr_t, vaddr_t);
+void uvm_fault_unwire_locked(vm_map_t, vaddr_t, vaddr_t);
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_fault_i.h b/sys/uvm/uvm_fault_i.h
deleted file mode 100644
index 525c1e1c433..00000000000
--- a/sys/uvm/uvm_fault_i.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* $OpenBSD: uvm_fault_i.h,v 1.12 2007/05/31 21:20:30 thib Exp $ */
-/* $NetBSD: uvm_fault_i.h,v 1.11 2000/06/26 14:21:17 mrg Exp $ */
-
-/*
- *
- * Copyright (c) 1997 Charles D. Cranor and Washington University.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Charles D. Cranor and
- * Washington University.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
- */
-
-#ifndef _UVM_UVM_FAULT_I_H_
-#define _UVM_UVM_FAULT_I_H_
-
-/*
- * uvm_fault_i.h: fault inline functions
- */
-static boolean_t uvmfault_lookup(struct uvm_faultinfo *, boolean_t);
-static boolean_t uvmfault_relock(struct uvm_faultinfo *);
-static void uvmfault_unlockall(struct uvm_faultinfo *, struct vm_amap *,
- struct uvm_object *, struct vm_anon *);
-static void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
-
-/*
- * uvmfault_unlockmaps: unlock the maps
- */
-
-static __inline void
-uvmfault_unlockmaps(ufi, write_locked)
- struct uvm_faultinfo *ufi;
- boolean_t write_locked;
-{
- /*
- * ufi can be NULL when this isn't really a fault,
- * but merely paging in anon data.
- */
-
- if (ufi == NULL) {
- return;
- }
-
- if (write_locked) {
- vm_map_unlock(ufi->map);
- } else {
- vm_map_unlock_read(ufi->map);
- }
-}
-
-/*
- * uvmfault_unlockall: unlock everything passed in.
- *
- * => maps must be read-locked (not write-locked).
- */
-
-static __inline void
-uvmfault_unlockall(ufi, amap, uobj, anon)
- struct uvm_faultinfo *ufi;
- struct vm_amap *amap;
- struct uvm_object *uobj;
- struct vm_anon *anon;
-{
-
- if (anon)
- simple_unlock(&anon->an_lock);
- if (uobj)
- simple_unlock(&uobj->vmobjlock);
- uvmfault_unlockmaps(ufi, FALSE);
-}
-
-/*
- * uvmfault_lookup: lookup a virtual address in a map
- *
- * => caller must provide a uvm_faultinfo structure with the IN
- * params properly filled in
- * => we will lookup the map entry (handling submaps) as we go
- * => if the lookup is a success we will return with the maps locked
- * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
- * get a read lock.
- * => note that submaps can only appear in the kernel and they are
- * required to use the same virtual addresses as the map they
- * are referenced by (thus address translation between the main
- * map and the submap is unnecessary).
- */
-
-static __inline boolean_t
-uvmfault_lookup(ufi, write_lock)
- struct uvm_faultinfo *ufi;
- boolean_t write_lock;
-{
- vm_map_t tmpmap;
-
- /*
- * init ufi values for lookup.
- */
-
- ufi->map = ufi->orig_map;
- ufi->size = ufi->orig_size;
-
- /*
- * keep going down levels until we are done. note that there can
- * only be two levels so we won't loop very long.
- */
-
- while (1) {
-
- /*
- * lock map
- */
- if (write_lock) {
- vm_map_lock(ufi->map);
- } else {
- vm_map_lock_read(ufi->map);
- }
-
- /*
- * lookup
- */
- if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
- &ufi->entry)) {
- uvmfault_unlockmaps(ufi, write_lock);
- return(FALSE);
- }
-
- /*
- * reduce size if necessary
- */
- if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
- ufi->size = ufi->entry->end - ufi->orig_rvaddr;
-
- /*
- * submap? replace map with the submap and lookup again.
- * note: VAs in submaps must match VAs in main map.
- */
- if (UVM_ET_ISSUBMAP(ufi->entry)) {
- tmpmap = ufi->entry->object.sub_map;
- if (write_lock) {
- vm_map_unlock(ufi->map);
- } else {
- vm_map_unlock_read(ufi->map);
- }
- ufi->map = tmpmap;
- continue;
- }
-
- /*
- * got it!
- */
-
- ufi->mapv = ufi->map->timestamp;
- return(TRUE);
-
- } /* while loop */
-
- /*NOTREACHED*/
-}
-
-/*
- * uvmfault_relock: attempt to relock the same version of the map
- *
- * => fault data structures should be unlocked before calling.
- * => if a success (TRUE) maps will be locked after call.
- */
-
-static __inline boolean_t
-uvmfault_relock(ufi)
- struct uvm_faultinfo *ufi;
-{
- /*
- * ufi can be NULL when this isn't really a fault,
- * but merely paging in anon data.
- */
-
- if (ufi == NULL) {
- return TRUE;
- }
-
- uvmexp.fltrelck++;
-
- /*
- * relock map. fail if version mismatch (in which case nothing
- * gets locked).
- */
-
- vm_map_lock_read(ufi->map);
- if (ufi->mapv != ufi->map->timestamp) {
- vm_map_unlock_read(ufi->map);
- return(FALSE);
- }
-
- uvmexp.fltrelckok++;
- return(TRUE); /* got it! */
-}
-
-#endif /* _UVM_UVM_FAULT_I_H_ */
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 9370d4a9925..75abac9c9f2 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.108 2008/11/10 18:11:59 oga Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.109 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -85,7 +85,6 @@
#include <sys/shm.h>
#endif
-#define UVM_MAP
#include <uvm/uvm.h>
#undef RB_AUGMENT
#define RB_AUGMENT(x) uvm_rb_augment(x)
@@ -1408,6 +1407,39 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
}
/*
+ * U N M A P - m a i n e n t r y p o i n t
+ */
+
+/*
+ * uvm_unmap: remove mappings from a vm_map (from "start" up to "stop")
+ *
+ * => caller must check alignment and size
+ * => map must be unlocked (we will lock it)
+ */
+void
+uvm_unmap_p(vm_map_t map, vaddr_t start, vaddr_t end, struct proc *p)
+{
+ vm_map_entry_t dead_entries;
+ UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
+
+ UVMHIST_LOG(maphist, " (map=%p, start=0x%lx, end=0x%lx)",
+ map, start, end, 0);
+ /*
+ * work now done by helper functions. wipe the pmap's and then
+ * detach from the dead entries...
+ */
+ vm_map_lock(map);
+ uvm_unmap_remove(map, start, end, &dead_entries, p);
+ vm_map_unlock(map);
+
+ if (dead_entries != NULL)
+ uvm_unmap_detach(dead_entries, 0);
+
+ UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
+}
+
+
+/*
* U N M A P - m a i n h e l p e r f u n c t i o n s
*/
@@ -3336,6 +3368,87 @@ uvmspace_free(struct vmspace *vm)
}
/*
+ * uvm_map_create: create map
+ */
+vm_map_t
+uvm_map_create(pmap_t pmap, vaddr_t min, vaddr_t max, int flags)
+{
+ vm_map_t result;
+
+ result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
+ uvm_map_setup(result, min, max, flags);
+ result->pmap = pmap;
+ return(result);
+}
+
+/*
+ * uvm_map_setup: init map
+ *
+ * => map must not be in service yet.
+ */
+void
+uvm_map_setup(vm_map_t map, vaddr_t min, vaddr_t max, int flags)
+{
+
+ RB_INIT(&map->rbhead);
+ map->header.next = map->header.prev = &map->header;
+ map->nentries = 0;
+ map->size = 0;
+ map->ref_count = 1;
+ map->min_offset = min;
+ map->max_offset = max;
+ map->flags = flags;
+ map->first_free = &map->header;
+ map->hint = &map->header;
+ map->timestamp = 0;
+ rw_init(&map->lock, "vmmaplk");
+ simple_lock_init(&map->ref_lock);
+ simple_lock_init(&map->hint_lock);
+}
+
+
+
+/*
+ * uvm_map_reference: add reference to a map
+ *
+ * => map need not be locked (we use ref_lock).
+ */
+void
+uvm_map_reference(vm_map_t map)
+{
+ simple_lock(&map->ref_lock);
+ map->ref_count++;
+ simple_unlock(&map->ref_lock);
+}
+
+/*
+ * uvm_map_deallocate: drop reference to a map
+ *
+ * => caller must not lock map
+ * => we will zap map if ref count goes to zero
+ */
+void
+uvm_map_deallocate(vm_map_t map)
+{
+ int c;
+
+ simple_lock(&map->ref_lock);
+ c = --map->ref_count;
+ simple_unlock(&map->ref_lock);
+ if (c > 0) {
+ return;
+ }
+
+ /*
+ * all references gone. unmap and free.
+ */
+
+ uvm_unmap(map, map->min_offset, map->max_offset);
+ pmap_destroy(map->pmap);
+ free(map, M_VMMAP);
+}
+
+/*
* F O R K - m a i n e n t r y p o i n t
*/
/*
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index 456f9a5e516..cc5eae5db43 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.h,v 1.39 2008/07/18 16:40:17 kurt Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.40 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -259,16 +259,6 @@ struct vm_map_intrsafe {
};
/*
- * handle inline options
- */
-
-#ifdef UVM_MAP_INLINE
-#define MAP_INLINE static __inline
-#else
-#define MAP_INLINE /* nothing */
-#endif /* UVM_MAP_INLINE */
-
-/*
* globals:
*/
@@ -282,38 +272,34 @@ extern vaddr_t uvm_maxkaddr;
* protos: the following prototypes define the interface to vm_map
*/
-MAP_INLINE
void uvm_map_deallocate(vm_map_t);
int uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int);
void uvm_map_clip_start(vm_map_t, vm_map_entry_t, vaddr_t);
void uvm_map_clip_end(vm_map_t, vm_map_entry_t, vaddr_t);
-MAP_INLINE
vm_map_t uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
int uvm_map_extract(vm_map_t, vaddr_t, vsize_t,
- vm_map_t, vaddr_t *, int);
+ vm_map_t, vaddr_t *, int);
vm_map_entry_t uvm_map_findspace(vm_map_t, vaddr_t, vsize_t, vaddr_t *,
- struct uvm_object *, voff_t, vsize_t, int);
+ struct uvm_object *, voff_t, vsize_t, int);
vaddr_t uvm_map_pie(vaddr_t);
vaddr_t uvm_map_hint(struct proc *, vm_prot_t);
int uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t);
int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int);
void uvm_map_init(void);
boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *);
-MAP_INLINE
void uvm_map_reference(vm_map_t);
int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t,
- vm_map_entry_t, int);
+ vm_map_entry_t, int);
int uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t,
- vaddr_t *);
+ vaddr_t *);
void uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int);
int uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t);
#define uvm_unmap(_m, _s, _e) uvm_unmap_p(_m, _s, _e, 0)
-MAP_INLINE
void uvm_unmap_p(vm_map_t, vaddr_t, vaddr_t, struct proc *);
void uvm_unmap_detach(vm_map_entry_t,int);
-void uvm_unmap_remove(vm_map_t, vaddr_t, vaddr_t,
- vm_map_entry_t *, struct proc *);
+void uvm_unmap_remove(vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t *,
+ struct proc *);
#endif /* _KERNEL */
diff --git a/sys/uvm/uvm_map_i.h b/sys/uvm/uvm_map_i.h
deleted file mode 100644
index 09fcc7b37c5..00000000000
--- a/sys/uvm/uvm_map_i.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/* $OpenBSD: uvm_map_i.h,v 1.23 2009/03/20 15:19:04 oga Exp $ */
-/* $NetBSD: uvm_map_i.h,v 1.18 2000/11/27 08:40:04 chs Exp $ */
-
-/*
- * Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
- *
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * The Mach Operating System project at Carnegie-Mellon University.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
- * its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
- * from: Id: uvm_map_i.h,v 1.1.2.1 1997/08/14 19:10:50 chuck Exp
- *
- *
- * Copyright (c) 1987, 1990 Carnegie-Mellon University.
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and
- * its documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
- * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie the
- * rights to redistribute these changes.
- */
-
-#ifndef _UVM_UVM_MAP_I_H_
-#define _UVM_UVM_MAP_I_H_
-
-/*
- * uvm_map_i.h
- */
-
-/*
- * inline functions [maybe]
- */
-
-#if defined(UVM_MAP_INLINE) || defined(UVM_MAP)
-
-/*
- * uvm_map_create: create map
- */
-
-MAP_INLINE vm_map_t
-uvm_map_create(pmap_t pmap, vaddr_t min, vaddr_t max, int flags)
-{
- vm_map_t result;
-
- result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
- uvm_map_setup(result, min, max, flags);
- result->pmap = pmap;
- return(result);
-}
-
-/*
- * uvm_map_setup: init map
- *
- * => map must not be in service yet.
- */
-
-MAP_INLINE void
-uvm_map_setup(vm_map_t map, vaddr_t min, vaddr_t max, int flags)
-{
-
- RB_INIT(&map->rbhead);
- map->header.next = map->header.prev = &map->header;
- map->nentries = 0;
- map->size = 0;
- map->ref_count = 1;
- map->min_offset = min;
- map->max_offset = max;
- map->flags = flags;
- map->first_free = &map->header;
- map->hint = &map->header;
- map->timestamp = 0;
- rw_init(&map->lock, "vmmaplk");
- simple_lock_init(&map->ref_lock);
- simple_lock_init(&map->hint_lock);
-}
-
-
-/*
- * U N M A P - m a i n e n t r y p o i n t
- */
-
-/*
- * uvm_unmap: remove mappings from a vm_map (from "start" up to "stop")
- *
- * => caller must check alignment and size
- * => map must be unlocked (we will lock it)
- */
-
-MAP_INLINE void
-uvm_unmap_p(vm_map_t map, vaddr_t start, vaddr_t end, struct proc *p)
-{
- vm_map_entry_t dead_entries;
- UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
-
- UVMHIST_LOG(maphist, " (map=%p, start=0x%lx, end=0x%lx)",
- map, start, end, 0);
- /*
- * work now done by helper functions. wipe the pmap's and then
- * detach from the dead entries...
- */
- vm_map_lock(map);
- uvm_unmap_remove(map, start, end, &dead_entries, p);
- vm_map_unlock(map);
-
- if (dead_entries != NULL)
- uvm_unmap_detach(dead_entries, 0);
-
- UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
-}
-
-
-/*
- * uvm_map_reference: add reference to a map
- *
- * => map need not be locked (we use ref_lock).
- */
-
-MAP_INLINE void
-uvm_map_reference(vm_map_t map)
-{
- simple_lock(&map->ref_lock);
- map->ref_count++;
- simple_unlock(&map->ref_lock);
-}
-
-/*
- * uvm_map_deallocate: drop reference to a map
- *
- * => caller must not lock map
- * => we will zap map if ref count goes to zero
- */
-
-MAP_INLINE void
-uvm_map_deallocate(vm_map_t map)
-{
- int c;
-
- simple_lock(&map->ref_lock);
- c = --map->ref_count;
- simple_unlock(&map->ref_lock);
- if (c > 0) {
- return;
- }
-
- /*
- * all references gone. unmap and free.
- */
-
- uvm_unmap(map, map->min_offset, map->max_offset);
- pmap_destroy(map->pmap);
- free(map, M_VMMAP);
-}
-
-#endif /* defined(UVM_MAP_INLINE) || defined(UVM_MAP) */
-
-#endif /* _UVM_UVM_MAP_I_H_ */
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 08e8a5cb852..58b93ef499e 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.69 2009/03/24 16:29:42 oga Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.70 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -71,7 +71,6 @@
* uvm_page.c: page ops.
*/
-#define UVM_PAGE /* pull in uvm_page.h functions */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
@@ -1495,3 +1494,187 @@ PHYS_TO_VM_PAGE(paddr_t pa)
return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]);
}
+/*
+ * uvm_pagelookup: look up a page
+ *
+ * => caller should lock object to keep someone from pulling the page
+ * out from under it
+ */
+struct vm_page *
+uvm_pagelookup(struct uvm_object *obj, voff_t off)
+{
+ struct vm_page *pg;
+ struct pglist *buck;
+ int s;
+
+ buck = &uvm.page_hash[uvm_pagehash(obj,off)];
+
+ s = splvm();
+ simple_lock(&uvm.hashlock);
+ TAILQ_FOREACH(pg, buck, hashq) {
+ if (pg->uobject == obj && pg->offset == off) {
+ break;
+ }
+ }
+ simple_unlock(&uvm.hashlock);
+ splx(s);
+ return(pg);
+}
+
+/*
+ * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
+ *
+ * => caller must lock page queues
+ */
+void
+uvm_pagewire(struct vm_page *pg)
+{
+ if (pg->wire_count == 0) {
+ if (pg->pg_flags & PQ_ACTIVE) {
+ TAILQ_REMOVE(&uvm.page_active, pg, pageq);
+ atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
+ uvmexp.active--;
+ }
+ if (pg->pg_flags & PQ_INACTIVE) {
+ if (pg->pg_flags & PQ_SWAPBACKED)
+ TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
+ else
+ TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+ atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
+ uvmexp.inactive--;
+ }
+ uvmexp.wired++;
+ }
+ pg->wire_count++;
+}
+
+/*
+ * uvm_pageunwire: unwire the page.
+ *
+ * => activate if wire count goes to zero.
+ * => caller must lock page queues
+ */
+void
+uvm_pageunwire(struct vm_page *pg)
+{
+ pg->wire_count--;
+ if (pg->wire_count == 0) {
+ TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
+ uvmexp.active++;
+ atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
+ uvmexp.wired--;
+ }
+}
+
+/*
+ * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
+ *
+ * => caller must lock page queues
+ * => caller must check to make sure page is not wired
+ * => object that page belongs to must be locked (so we can adjust pg->flags)
+ */
+void
+uvm_pagedeactivate(struct vm_page *pg)
+{
+ if (pg->pg_flags & PQ_ACTIVE) {
+ TAILQ_REMOVE(&uvm.page_active, pg, pageq);
+ atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
+ uvmexp.active--;
+ }
+ if ((pg->pg_flags & PQ_INACTIVE) == 0) {
+ KASSERT(pg->wire_count == 0);
+ if (pg->pg_flags & PQ_SWAPBACKED)
+ TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
+ else
+ TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
+ atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
+ uvmexp.inactive++;
+ pmap_clear_reference(pg);
+ /*
+ * update the "clean" bit. this isn't 100%
+ * accurate, and doesn't have to be. we'll
+ * re-sync it after we zap all mappings when
+ * scanning the inactive list.
+ */
+ if ((pg->pg_flags & PG_CLEAN) != 0 &&
+ pmap_is_modified(pg))
+ atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
+ }
+}
+
+/*
+ * uvm_pageactivate: activate page
+ *
+ * => caller must lock page queues
+ */
+void
+uvm_pageactivate(struct vm_page *pg)
+{
+ if (pg->pg_flags & PQ_INACTIVE) {
+ if (pg->pg_flags & PQ_SWAPBACKED)
+ TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
+ else
+ TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+ atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
+ uvmexp.inactive--;
+ }
+ if (pg->wire_count == 0) {
+
+ /*
+ * if page is already active, remove it from list so we
+ * can put it at tail. if it wasn't active, then mark
+ * it active and bump active count
+ */
+ if (pg->pg_flags & PQ_ACTIVE)
+ TAILQ_REMOVE(&uvm.page_active, pg, pageq);
+ else {
+ atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
+ uvmexp.active++;
+ }
+
+ TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
+ }
+}
+
+/*
+ * uvm_pagezero: zero fill a page
+ *
+ * => if page is part of an object then the object should be locked
+ * to protect pg->flags.
+ */
+void
+uvm_pagezero(struct vm_page *pg)
+{
+ atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
+ pmap_zero_page(pg);
+}
+
+/*
+ * uvm_pagecopy: copy a page
+ *
+ * => if page is part of an object then the object should be locked
+ * to protect pg->flags.
+ */
+void
+uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
+{
+ atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
+ pmap_copy_page(src, dst);
+}
+
+/*
+ * uvm_page_lookup_freelist: look up the free list for the specified page
+ */
+int
+uvm_page_lookup_freelist(struct vm_page *pg)
+{
+#if VM_PHYSSEG_MAX == 1
+ return (vm_physmem[0].free_list);
+#else
+ int lcv;
+
+ lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
+ KASSERT(lcv != -1);
+ return (vm_physmem[lcv].free_list);
+#endif
+}
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index c60caf0fabb..db0b870567e 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.h,v 1.28 2009/03/24 16:29:42 oga Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.29 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_page.h,v 1.19 2000/12/28 08:24:55 chs Exp $ */
/*
@@ -232,49 +232,36 @@ extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
extern int vm_nphysseg;
/*
- * handle inline options
- */
-
-#ifdef UVM_PAGE_INLINE
-#define PAGE_INLINE static __inline
-#else
-#define PAGE_INLINE /* nothing */
-#endif /* UVM_PAGE_INLINE */
-
-/*
* prototypes: the following prototypes define the interface to pages
*/
-void uvm_page_init(vaddr_t *, vaddr_t *);
+void uvm_page_init(vaddr_t *, vaddr_t *);
#if defined(UVM_PAGE_TRKOWN)
-void uvm_page_own(struct vm_page *, char *);
+void uvm_page_own(struct vm_page *, char *);
#endif
#if !defined(PMAP_STEAL_MEMORY)
-boolean_t uvm_page_physget(paddr_t *);
+boolean_t uvm_page_physget(paddr_t *);
#endif
-void uvm_page_rehash(void);
-void uvm_pageidlezero(void);
-
-PAGE_INLINE int uvm_lock_fpageq(void);
-PAGE_INLINE void uvm_unlock_fpageq(int);
-
-PAGE_INLINE void uvm_pageactivate(struct vm_page *);
-vaddr_t uvm_pageboot_alloc(vsize_t);
-PAGE_INLINE void uvm_pagecopy(struct vm_page *, struct vm_page *);
-PAGE_INLINE void uvm_pagedeactivate(struct vm_page *);
-void uvm_pagefree(struct vm_page *);
-void uvm_page_unbusy(struct vm_page **, int);
-PAGE_INLINE struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
-PAGE_INLINE void uvm_pageunwire(struct vm_page *);
-PAGE_INLINE void uvm_pagewait(struct vm_page *, int);
-PAGE_INLINE void uvm_pagewake(struct vm_page *);
-PAGE_INLINE void uvm_pagewire(struct vm_page *);
-PAGE_INLINE void uvm_pagezero(struct vm_page *);
-
-PAGE_INLINE int uvm_page_lookup_freelist(struct vm_page *);
+void uvm_page_rehash(void);
+void uvm_pageidlezero(void);
+
+void uvm_pageactivate(struct vm_page *);
+vaddr_t uvm_pageboot_alloc(vsize_t);
+void uvm_pagecopy(struct vm_page *, struct vm_page *);
+void uvm_pagedeactivate(struct vm_page *);
+void uvm_pagefree(struct vm_page *);
+void uvm_page_unbusy(struct vm_page **, int);
+struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
+void uvm_pageunwire(struct vm_page *);
+void uvm_pagewait(struct vm_page *, int);
+void uvm_pagewake(struct vm_page *);
+void uvm_pagewire(struct vm_page *);
+void uvm_pagezero(struct vm_page *);
+
+int uvm_page_lookup_freelist(struct vm_page *);
struct vm_page *PHYS_TO_VM_PAGE(paddr_t);
-int vm_physseg_find(paddr_t, int *);
+int vm_physseg_find(paddr_t, int *);
/*
* macros
diff --git a/sys/uvm/uvm_page_i.h b/sys/uvm/uvm_page_i.h
deleted file mode 100644
index 204c76084fc..00000000000
--- a/sys/uvm/uvm_page_i.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* $OpenBSD: uvm_page_i.h,v 1.20 2008/01/09 17:42:17 miod Exp $ */
-/* $NetBSD: uvm_page_i.h,v 1.14 2000/11/27 07:47:42 chs Exp $ */
-
-/*
- * Copyright (c) 1997 Charles D. Cranor and Washington University.
- * Copyright (c) 1991, 1993, The Regents of the University of California.
- *
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * The Mach Operating System project at Carnegie-Mellon University.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Charles D. Cranor,
- * Washington University, the University of California, Berkeley and
- * its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
- * from: Id: uvm_page_i.h,v 1.1.2.7 1998/01/05 00:26:02 chuck Exp
- *
- *
- * Copyright (c) 1987, 1990 Carnegie-Mellon University.
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and
- * its documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
- * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie the
- * rights to redistribute these changes.
- */
-
-#ifndef _UVM_UVM_PAGE_I_H_
-#define _UVM_UVM_PAGE_I_H_
-
-/*
- * uvm_page_i.h
- */
-
-/*
- * inline functions [maybe]
- */
-
-#if defined(UVM_PAGE_INLINE) || defined(UVM_PAGE)
-/*
- * uvm_pagelookup: look up a page
- *
- * => caller should lock object to keep someone from pulling the page
- * out from under it
- */
-
-struct vm_page *
-uvm_pagelookup(struct uvm_object *obj, voff_t off)
-{
- struct vm_page *pg;
- struct pglist *buck;
- int s;
-
- buck = &uvm.page_hash[uvm_pagehash(obj,off)];
-
- s = splvm();
- simple_lock(&uvm.hashlock);
- TAILQ_FOREACH(pg, buck, hashq) {
- if (pg->uobject == obj && pg->offset == off) {
- break;
- }
- }
- simple_unlock(&uvm.hashlock);
- splx(s);
- return(pg);
-}
-
-/*
- * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
- *
- * => caller must lock page queues
- */
-
-PAGE_INLINE void
-uvm_pagewire(struct vm_page *pg)
-{
- if (pg->wire_count == 0) {
- if (pg->pg_flags & PQ_ACTIVE) {
- TAILQ_REMOVE(&uvm.page_active, pg, pageq);
- atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
- uvmexp.active--;
- }
- if (pg->pg_flags & PQ_INACTIVE) {
- if (pg->pg_flags & PQ_SWAPBACKED)
- TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
- else
- TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
- atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
- uvmexp.inactive--;
- }
- uvmexp.wired++;
- }
- pg->wire_count++;
-}
-
-/*
- * uvm_pageunwire: unwire the page.
- *
- * => activate if wire count goes to zero.
- * => caller must lock page queues
- */
-
-PAGE_INLINE void
-uvm_pageunwire(struct vm_page *pg)
-{
- pg->wire_count--;
- if (pg->wire_count == 0) {
- TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
- uvmexp.active++;
- atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
- uvmexp.wired--;
- }
-}
-
-/*
- * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
- *
- * => caller must lock page queues
- * => caller must check to make sure page is not wired
- * => object that page belongs to must be locked (so we can adjust pg->flags)
- */
-
-PAGE_INLINE void
-uvm_pagedeactivate(struct vm_page *pg)
-{
- if (pg->pg_flags & PQ_ACTIVE) {
- TAILQ_REMOVE(&uvm.page_active, pg, pageq);
- atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
- uvmexp.active--;
- }
- if ((pg->pg_flags & PQ_INACTIVE) == 0) {
- KASSERT(pg->wire_count == 0);
- if (pg->pg_flags & PQ_SWAPBACKED)
- TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
- else
- TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
- atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
- uvmexp.inactive++;
- pmap_clear_reference(pg);
- /*
- * update the "clean" bit. this isn't 100%
- * accurate, and doesn't have to be. we'll
- * re-sync it after we zap all mappings when
- * scanning the inactive list.
- */
- if ((pg->pg_flags & PG_CLEAN) != 0 &&
- pmap_is_modified(pg))
- atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
- }
-}
-
-/*
- * uvm_pageactivate: activate page
- *
- * => caller must lock page queues
- */
-
-PAGE_INLINE void
-uvm_pageactivate(struct vm_page *pg)
-{
- if (pg->pg_flags & PQ_INACTIVE) {
- if (pg->pg_flags & PQ_SWAPBACKED)
- TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
- else
- TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
- atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
- uvmexp.inactive--;
- }
- if (pg->wire_count == 0) {
-
- /*
- * if page is already active, remove it from list so we
- * can put it at tail. if it wasn't active, then mark
- * it active and bump active count
- */
- if (pg->pg_flags & PQ_ACTIVE)
- TAILQ_REMOVE(&uvm.page_active, pg, pageq);
- else {
- atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
- uvmexp.active++;
- }
-
- TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
- }
-}
-
-/*
- * uvm_pagezero: zero fill a page
- *
- * => if page is part of an object then the object should be locked
- * to protect pg->flags.
- */
-
-PAGE_INLINE void
-uvm_pagezero(struct vm_page *pg)
-{
- atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
- pmap_zero_page(pg);
-}
-
-/*
- * uvm_pagecopy: copy a page
- *
- * => if page is part of an object then the object should be locked
- * to protect pg->flags.
- */
-
-PAGE_INLINE void
-uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
-{
- atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
- pmap_copy_page(src, dst);
-}
-
-/*
- * uvm_page_lookup_freelist: look up the free list for the specified page
- */
-
-PAGE_INLINE int
-uvm_page_lookup_freelist(struct vm_page *pg)
-{
-#if VM_PHYSSEG_MAX == 1
- return (vm_physmem[0].free_list);
-#else
- int lcv;
-
- lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
- KASSERT(lcv != -1);
- return (vm_physmem[lcv].free_list);
-#endif
-}
-
-#endif /* defined(UVM_PAGE_INLINE) || defined(UVM_PAGE) */
-
-#endif /* _UVM_UVM_PAGE_I_H_ */
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index b5ddf391489..1fabb6391ed 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pager.c,v 1.47 2009/03/20 15:19:04 oga Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.48 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */
/*
@@ -39,7 +39,6 @@
* uvm_pager.c: generic functions used to assist the pagers.
*/
-#define UVM_PAGER
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@@ -88,10 +87,12 @@ struct uvm_pseg psegs[PSEG_NUMSEGS];
#define UVM_PSEG_EMPTY(pseg) ((pseg)->use == 0)
#define UVM_PSEG_INUSE(pseg,id) (((pseg)->use & (1 << (id))) != 0)
-void uvm_pseg_init(struct uvm_pseg *);
-void uvm_pseg_destroy(struct uvm_pseg *);
-vaddr_t uvm_pseg_get(int);
-void uvm_pseg_release(vaddr_t);
+void uvm_pseg_init(struct uvm_pseg *);
+void uvm_pseg_destroy(struct uvm_pseg *);
+vaddr_t uvm_pseg_get(int);
+void uvm_pseg_release(vaddr_t);
+
+struct vm_page *uvm_pageratop(vaddr_t);
/*
* uvm_pager_init: init pagers (at boot time)
@@ -344,11 +345,8 @@ uvm_pagermapout(vaddr_t kva, int npages)
*/
struct vm_page **
-uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
- struct uvm_object *uobj; /* IN */
- struct vm_page **pps, *center; /* IN/OUT, IN */
- int *npages, flags; /* IN/OUT, IN */
- voff_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
+uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
+ struct vm_page *center, int flags, voff_t mlo, voff_t mhi)
{
struct vm_page **ppsp, *pclust;
voff_t lo, hi, curoff;
@@ -504,12 +502,9 @@ uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
*/
int
-uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
- struct uvm_object *uobj; /* IN */
- struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
- int *npages; /* IN/OUT */
- int flags; /* IN */
- voff_t start, stop; /* IN, IN */
+uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
+ struct vm_page ***ppsp_ptr, int *npages, int flags,
+ voff_t start, voff_t stop)
{
int result;
daddr64_t swblk;
@@ -705,11 +700,8 @@ ReTry:
*/
void
-uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
- struct uvm_object *uobj; /* IN */
- struct vm_page *pg, **ppsp; /* IN, IN/OUT */
- int *npages; /* IN/OUT */
- int flags;
+uvm_pager_dropcluster(struct uvm_object *uobj, struct vm_page *pg,
+ struct vm_page **ppsp, int *npages, int flags)
{
int lcv;
boolean_t obj_is_alive;
@@ -828,8 +820,7 @@ uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
*/
void
-uvm_aio_biodone1(bp)
- struct buf *bp;
+uvm_aio_biodone1(struct buf *bp)
{
struct buf *mbp = bp->b_private;
@@ -856,8 +847,7 @@ uvm_aio_biodone1(bp)
*/
void
-uvm_aio_biodone(bp)
- struct buf *bp;
+uvm_aio_biodone(struct buf *bp)
{
splassert(IPL_BIO);
@@ -876,8 +866,7 @@ uvm_aio_biodone(bp)
*/
void
-uvm_aio_aiodone(bp)
- struct buf *bp;
+uvm_aio_aiodone(struct buf *bp)
{
int npages = bp->b_bufsize >> PAGE_SHIFT;
struct vm_page *pg, *pgs[MAXPHYS >> PAGE_SHIFT];
@@ -976,3 +965,21 @@ freed:
}
pool_put(&bufpool, bp);
}
+
+/*
+ * uvm_pageratop: convert KVAs in the pager map back to their page
+ * structures.
+ */
+struct vm_page *
+uvm_pageratop(vaddr_t kva)
+{
+ struct vm_page *pg;
+ paddr_t pa;
+ boolean_t rv;
+
+ rv = pmap_extract(pmap_kernel(), kva, &pa);
+ KASSERT(rv);
+ pg = PHYS_TO_VM_PAGE(pa);
+ KASSERT(pg != NULL);
+ return (pg);
+}
diff --git a/sys/uvm/uvm_pager.h b/sys/uvm/uvm_pager.h
index 2291225ddff..84b1e837f8d 100644
--- a/sys/uvm/uvm_pager.h
+++ b/sys/uvm/uvm_pager.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pager.h,v 1.21 2008/11/24 19:55:33 thib Exp $ */
+/* $OpenBSD: uvm_pager.h,v 1.22 2009/03/25 20:00:18 oga Exp $ */
/* $NetBSD: uvm_pager.h,v 1.20 2000/11/27 08:40:05 chs Exp $ */
/*
@@ -136,34 +136,21 @@ struct uvm_pagerops {
#ifdef _KERNEL
/*
- * handle inline options
- */
-
-#ifdef UVM_PAGER_INLINE
-#define PAGER_INLINE static __inline
-#else
-#define PAGER_INLINE /* nothing */
-#endif /* UVM_PAGER_INLINE */
-
-/*
* prototypes
*/
-void uvm_pager_dropcluster(struct uvm_object *,
- struct vm_page *, struct vm_page **,
- int *, int);
+void uvm_pager_dropcluster(struct uvm_object *, struct vm_page *,
+ struct vm_page **, int *, int);
void uvm_pager_init(void);
int uvm_pager_put(struct uvm_object *, struct vm_page *,
- struct vm_page ***, int *, int,
- voff_t, voff_t);
+ struct vm_page ***, int *, int, voff_t, voff_t);
-PAGER_INLINE struct vm_page *uvm_pageratop(vaddr_t);
vaddr_t uvm_pagermapin(struct vm_page **, int, int);
void uvm_pagermapout(vaddr_t, int);
struct vm_page **uvm_mk_pcluster(struct uvm_object *, struct vm_page **,
- int *, struct vm_page *, int,
- voff_t, voff_t);
+ int *, struct vm_page *, int, voff_t, voff_t);
+
/* Flags to uvm_pagermapin() */
#define UVMPAGER_MAPIN_WAITOK 0x01 /* it's okay to wait */
#define UVMPAGER_MAPIN_READ 0x02 /* host <- device */
diff --git a/sys/uvm/uvm_pager_i.h b/sys/uvm/uvm_pager_i.h
deleted file mode 100644
index c027cd17fb2..00000000000
--- a/sys/uvm/uvm_pager_i.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* $OpenBSD: uvm_pager_i.h,v 1.10 2001/12/19 08:58:07 art Exp $ */
-/* $NetBSD: uvm_pager_i.h,v 1.10 2000/11/25 06:28:00 chs Exp $ */
-
-/*
- *
- * Copyright (c) 1997 Charles D. Cranor and Washington University.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Charles D. Cranor and
- * Washington University.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * from: Id: uvm_pager_i.h,v 1.1.2.2 1997/10/09 23:05:46 chuck Exp
- */
-
-#ifndef _UVM_UVM_PAGER_I_H_
-#define _UVM_UVM_PAGER_I_H_
-
-/*
- * uvm_pager_i.h
- */
-
-/*
- * inline functions [maybe]
- */
-
-#if defined(UVM_PAGER_INLINE) || defined(UVM_PAGER)
-
-/*
- * uvm_pageratop: convert KVAs in the pager map back to their page
- * structures.
- */
-
-PAGER_INLINE struct vm_page *
-uvm_pageratop(kva)
- vaddr_t kva;
-{
- struct vm_page *pg;
- paddr_t pa;
- boolean_t rv;
-
- rv = pmap_extract(pmap_kernel(), kva, &pa);
- KASSERT(rv);
- pg = PHYS_TO_VM_PAGE(pa);
- KASSERT(pg != NULL);
- return (pg);
-}
-
-#endif /* defined(UVM_PAGER_INLINE) || defined(UVM_PAGER) */
-
-#endif /* _UVM_UVM_PAGER_I_H_ */