summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/uvm/uvm.h4
-rw-r--r--sys/uvm/uvm_amap.c72
-rw-r--r--sys/uvm/uvm_amap.h4
-rw-r--r--sys/uvm/uvm_anon.c21
-rw-r--r--sys/uvm/uvm_anon.h9
-rw-r--r--sys/uvm/uvm_aobj.c65
-rw-r--r--sys/uvm/uvm_device.c11
-rw-r--r--sys/uvm/uvm_fault.c112
-rw-r--r--sys/uvm/uvm_map.h4
-rw-r--r--sys/uvm/uvm_mmap.c3
-rw-r--r--sys/uvm/uvm_page.c22
-rw-r--r--sys/uvm/uvm_page.h24
-rw-r--r--sys/uvm/uvm_pager.c37
-rw-r--r--sys/uvm/uvm_pdaemon.c28
-rw-r--r--sys/uvm/uvm_vnode.c86
15 files changed, 108 insertions, 394 deletions
diff --git a/sys/uvm/uvm.h b/sys/uvm/uvm.h
index 5a68492a528..4f2810ae702 100644
--- a/sys/uvm/uvm.h
+++ b/sys/uvm/uvm.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm.h,v 1.49 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm.h,v 1.50 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm.h,v 1.24 2000/11/27 08:40:02 chs Exp $ */
/*
@@ -79,7 +79,7 @@ struct uvm {
struct pglist page_active; /* allocated pages, in use */
struct pglist page_inactive_swp;/* pages inactive (reclaim or free) */
struct pglist page_inactive_obj;/* pages inactive (reclaim or free) */
- /* Lock order: object lock, pageqlock, then fpageqlock. */
+ /* Lock order: pageqlock, then fpageqlock. */
struct mutex fpageqlock; /* lock for free page q + pdaemon */
boolean_t page_init_done; /* TRUE if uvm_page_init() finished */
boolean_t page_idle_zero; /* TRUE if we should try to zero
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 0433096ccbe..813cb06a9a9 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.48 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.49 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -124,8 +124,6 @@ static __inline void pp_setreflen(int *, int, int, int);
/*
* pp_getreflen: get the reference and length for a specific offset
- *
- * => ppref's amap must be locked
*/
static __inline void
pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
@@ -142,8 +140,6 @@ pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
/*
* pp_setreflen: set the reference and length for a specific offset
- *
- * => ppref's amap must be locked
*/
static __inline void
pp_setreflen(int *ppref, int offset, int ref, int len)
@@ -175,8 +171,6 @@ amap_init(void)
/*
* amap_alloc1: internal function that allocates an amap, but does not
* init the overlay.
- *
- * => lock on returned amap is init'd
*/
static inline struct vm_amap *
amap_alloc1(int slots, int padslots, int waitf)
@@ -222,7 +216,6 @@ fail1:
*
* => caller should ensure sz is a multiple of PAGE_SIZE
* => reference count to new amap is set to one
- * => new amap is returned unlocked
*/
struct vm_amap *
@@ -248,7 +241,6 @@ amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf)
/*
* amap_free: free an amap
*
- * => the amap must be locked
* => the amap should have a zero reference count and be empty
*/
void
@@ -272,7 +264,6 @@ amap_free(struct vm_amap *amap)
*
* => called from uvm_map when we want to extend an amap to cover
* a new mapping (rather than allocate a new one)
- * => amap should be unlocked (we will lock it)
* => to safely extend an amap it should have a reference count of
* one (thus it can't be shared)
* => XXXCDC: support padding at this level?
@@ -441,8 +432,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize)
* mechanisms like map entry passing that may want to write-protect
* all mappings of a shared amap.] we traverse am_anon or am_slots
* depending on the current state of the amap.
- *
- * => entry's map and amap must be locked by the caller
*/
void
amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot)
@@ -481,7 +470,6 @@ amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot)
*
* => called from amap_unref when the final reference to an amap is
* discarded (i.e. when reference count == 1)
- * => the amap should be locked (by the caller)
*/
void
@@ -524,7 +512,7 @@ amap_wipeout(struct vm_amap *amap)
amap->am_ref = 0; /* ... was one */
amap->am_nused = 0;
- amap_free(amap); /* will unlock and free amap */
+ amap_free(amap); /* will free amap */
}
/*
@@ -532,8 +520,6 @@ amap_wipeout(struct vm_amap *amap)
* by copying the amap if necessary.
*
* => an entry with a null amap pointer will get a new (blank) one.
- * => the map that the map entry belongs to must be locked by caller.
- * => the amap currently attached to "entry" (if any) must be unlocked.
* => if canchunk is true, then we may clip the entry into a chunk
* => "startva" and "endva" are used only if canchunk is true. they are
* used to limit chunking (e.g. if you have a large space that you
@@ -584,11 +570,8 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
/*
* first check and see if we are the only map entry
* referencing the amap we currently have. if so, then we can
- * just take it over rather than copying it. note that we are
- * reading am_ref with the amap unlocked... the value can only
- * be one if we have the only reference to the amap (via our
- * locked map). if we are greater than one we fall through to
- * the next case (where we double check the value).
+ * just take it over rather than copying it. the value can only
+ * be one if we have the only reference to the amap
*/
if (entry->aref.ar_amap->am_ref == 1) {
@@ -607,9 +590,8 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
srcamap = entry->aref.ar_amap;
/*
- * need to double check reference count now that we've got the
- * src amap locked down. the reference count could have
- * changed while we were in malloc. if the reference count
+ * need to double check reference count now. the reference count
+ * could have changed while we were in malloc. if the reference count
* dropped down to one we take over the old map rather than
* copying the amap.
*/
@@ -639,7 +621,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
(amap->am_maxslot - lcv) * sizeof(struct vm_anon *));
/*
- * drop our reference to the old amap (srcamap) and unlock.
+ * drop our reference to the old amap (srcamap).
* we know that the reference count on srcamap is greater than
* one (we checked above), so there is no way we could drop
* the count to zero. [and no need to worry about freeing it]
@@ -677,15 +659,9 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
* => assume parent's entry was wired, thus all pages are resident.
* => assume pages that are loaned out (loan_count) are already mapped
* read-only in all maps, and thus no need for us to worry about them
- * => assume both parent and child vm_map's are locked
* => caller passes child's map/entry in to us
- * => if we run out of memory we will unlock the amap and sleep _with_ the
- * parent and child vm_map's locked(!). we have to do this since
- * we are in the middle of a fork(2) and we can't let the parent
- * map change until we are done copying all the map entries.
* => XXXCDC: out of memory should cause fork to fail, but there is
* currently no easy way to do this (needs fix)
- * => page queues must be unlocked (we may lock them)
*/
void
@@ -697,9 +673,9 @@ amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
struct vm_page *pg, *npg;
/*
- * note that if we unlock the amap then we must ReStart the "lcv" for
- * loop because some other process could reorder the anon's in the
- * am_anon[] array on us while the lock is dropped.
+ * note that if we wait, we must ReStart the "lcv" for loop because
+ * some other process could reorder the anon's in the
+ * am_anon[] array on us.
*/
ReStart:
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
@@ -733,7 +709,7 @@ ReStart:
if (anon->an_ref > 1 && pg->loan_count == 0) {
/*
- * if the page is busy then we have to unlock, wait for
+ * if the page is busy then we have to wait for
* it and then restart.
*/
if (pg->pg_flags & PG_BUSY) {
@@ -774,7 +750,7 @@ ReStart:
amap->am_anon[slot] = nanon; /* replace */
/*
- * drop PG_BUSY on new page ... since we have had it's
+ * drop PG_BUSY on new page ... since we have had its
* owner locked the whole time it can't be
* PG_RELEASED | PG_WANTED.
*/
@@ -796,8 +772,6 @@ ReStart:
* amap_splitref: split a single reference into two separate references
*
* => called from uvm_map's clip routines
- * => origref's map should be locked
- * => origref->ar_amap should be unlocked (we will lock)
*/
void
amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
@@ -809,7 +783,7 @@ amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
panic("amap_splitref: split at zero offset");
/*
- * now: amap is locked and we have a valid am_mapped array.
+ * now: we have a valid am_mapped array.
*/
if (origref->ar_amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
@@ -832,8 +806,6 @@ amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
/*
* amap_pp_establish: add a ppref array to an amap, if possible
- *
- * => amap locked by caller
*/
void
amap_pp_establish(struct vm_amap *amap)
@@ -860,7 +832,6 @@ amap_pp_establish(struct vm_amap *amap)
* amap_pp_adjref: adjust reference count to a part of an amap using the
* per-page reference count array.
*
- * => map and amap locked by caller
* => caller must check that ppref != PPREF_NONE before calling
*/
void
@@ -932,8 +903,6 @@ amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
/*
* amap_wiperange: wipe out a range of an amap
* [different from amap_wipeout because the amap is kept intact]
- *
- * => both map and amap must be locked by caller.
*/
void
amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
@@ -1006,7 +975,6 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
/*
* amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
*
- * => called with swap_syscall_lock held.
* => note that we don't always traverse all anons.
* eg. amaps being wiped out, released anons.
* => return TRUE if failed.
@@ -1079,8 +1047,6 @@ next:
/*
* amap_lookup: look up a page in an amap
- *
- * => amap should be locked by caller.
*/
struct vm_anon *
amap_lookup(struct vm_aref *aref, vaddr_t offset)
@@ -1100,7 +1066,6 @@ amap_lookup(struct vm_aref *aref, vaddr_t offset)
/*
* amap_lookups: look up a range of pages in an amap
*
- * => amap should be locked by caller.
* => XXXCDC: this interface is biased toward array-based amaps. fix.
*/
void
@@ -1124,9 +1089,6 @@ amap_lookups(struct vm_aref *aref, vaddr_t offset,
/*
* amap_add: add (or replace) a page to an amap
*
- * => caller must lock amap.
- * => if (replace) caller must lock anon because we might have to call
- * pmap_page_protect on the anon's page.
* => returns an "offset" which is meaningful to amap_unadd().
*/
void
@@ -1167,8 +1129,6 @@ amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
/*
* amap_unadd: remove a page from an amap
- *
- * => caller must lock amap
*/
void
amap_unadd(struct vm_aref *aref, vaddr_t offset)
@@ -1198,7 +1158,6 @@ amap_unadd(struct vm_aref *aref, vaddr_t offset)
/*
* amap_ref: gain a reference to an amap
*
- * => amap must not be locked (we will lock)
* => "offset" and "len" are in units of pages
* => called at fork time to gain the child's reference
*/
@@ -1228,8 +1187,7 @@ amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
* => caller must remove all pmap-level references to this amap before
* dropping the reference
* => called from uvm_unmap_detach [only] ... note that entry is no
- * longer part of a map and thus has no need for locking
- * => amap must be unlocked (we will lock it).
+ * longer part of a map
*/
void
amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
@@ -1241,7 +1199,7 @@ amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
if (amap->am_ref-- == 1) {
amap_wipeout(amap); /* drops final ref and frees */
- return; /* no need to unlock */
+ return;
}
/*
diff --git a/sys/uvm/uvm_amap.h b/sys/uvm/uvm_amap.h
index b598bdee8c0..f838262f941 100644
--- a/sys/uvm/uvm_amap.h
+++ b/sys/uvm/uvm_amap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.h,v 1.18 2009/03/25 20:00:18 oga Exp $ */
+/* $OpenBSD: uvm_amap.h,v 1.19 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -232,7 +232,7 @@ struct vm_amap {
}
/*
- * lock/unlock/refs/flags macros
+ * flags macros
*/
#define amap_flags(AMAP) ((AMAP)->am_flags)
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index 0c6e176f161..02e2132c1f3 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.c,v 1.36 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.37 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -82,7 +82,6 @@ uvm_analloc(void)
*
* => caller must remove anon from its amap before calling (if it was in
* an amap).
- * => anon must be unlocked and have a zero reference count.
* => we may lock the pageq's.
*/
void
@@ -113,7 +112,7 @@ uvm_anfree(struct vm_anon *anon)
if (pg) {
/*
- * if the page is owned by a uobject (now locked), then we must
+ * if the page is owned by a uobject, then we must
* kill the loan on the page rather than free it.
*/
@@ -168,8 +167,6 @@ uvm_anfree(struct vm_anon *anon)
/*
* uvm_anon_dropswap: release any swap resources from this anon.
- *
- * => anon must be locked or have a reference count of 0.
*/
void
uvm_anon_dropswap(struct vm_anon *anon)
@@ -185,10 +182,8 @@ uvm_anon_dropswap(struct vm_anon *anon)
/*
* uvm_anon_lockloanpg: given a locked anon, lock its resident page
*
- * => anon is locked by caller
- * => on return: anon is locked
+ * => on return:
* if there is a resident page:
- * if it has a uobject, it is locked by us
* if it is ownerless, we take over as owner
* we return the resident page (it can change during
* this function)
@@ -240,7 +235,6 @@ uvm_anon_lockloanpg(struct vm_anon *anon)
/*
* fetch an anon's page.
*
- * => anon must be locked, and is unlocked upon return.
* => returns TRUE if pagein was aborted due to lack of memory.
*/
@@ -251,12 +245,7 @@ uvm_anon_pagein(struct vm_anon *anon)
struct uvm_object *uobj;
int rv;
- /* locked: anon */
rv = uvmfault_anonget(NULL, NULL, anon);
- /*
- * if rv == VM_PAGER_OK, anon is still locked, else anon
- * is unlocked
- */
switch (rv) {
case VM_PAGER_OK:
@@ -302,9 +291,5 @@ uvm_anon_pagein(struct vm_anon *anon)
uvm_pagedeactivate(pg);
uvm_unlock_pageq();
- /*
- * unlock the anon and we're done.
- */
-
return FALSE;
}
diff --git a/sys/uvm/uvm_anon.h b/sys/uvm/uvm_anon.h
index effb7f4657a..5fcf974babb 100644
--- a/sys/uvm/uvm_anon.h
+++ b/sys/uvm/uvm_anon.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.h,v 1.16 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_anon.h,v 1.17 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_anon.h,v 1.13 2000/12/27 09:17:04 chs Exp $ */
/*
@@ -45,12 +45,11 @@
*/
struct vm_anon {
- struct vm_page *an_page; /* if in RAM [an_lock] */
- int an_ref; /* reference count [an_lock] */
+ struct vm_page *an_page; /* if in RAM */
+ int an_ref; /* reference count */
/*
- * Drum swap slot # (if != 0) [an_lock or not, if we hold an_page
- * PG_BUSY]
+ * Drum swap slot # (if != 0) [if we hold an_page, PG_BUSY]
*/
int an_swslot;
};
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index f668980602d..824ac7b9445 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.56 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.57 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -144,7 +144,7 @@ struct pool uao_swhash_elt_pool;
*/
struct uvm_aobj {
- struct uvm_object u_obj; /* has: lock, pgops, memt, #pages, #refs */
+ struct uvm_object u_obj; /* has: pgops, memt, #pages, #refs */
int u_pages; /* number of pages in entire object */
int u_flags; /* the flags (see uvm_aobj.h) */
/*
@@ -222,8 +222,6 @@ static struct mutex uao_list_lock = MUTEX_INITIALIZER(IPL_NONE);
/*
* uao_find_swhash_elt: find (or create) a hash table entry for a page
* offset.
- *
- * => the object should be locked by the caller
*/
static struct uao_swhash_elt *
@@ -261,8 +259,6 @@ uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
/*
* uao_find_swslot: find the swap slot number for an aobj/pageidx
- *
- * => object must be locked by caller
*/
__inline static int
uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
@@ -299,7 +295,6 @@ uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
* uao_set_swslot: set the swap slot for a page in an aobj.
*
* => setting a slot to zero frees the slot
- * => object must be locked by caller
*/
int
uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
@@ -490,9 +485,6 @@ uao_create(vsize_t size, int flags)
/*
* allocate hash/array if necessary
- *
- * note: in the KERNSWAP case no need to worry about locking since
- * we are still booting we should be the only thread around.
*/
if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
@@ -562,9 +554,6 @@ uao_init(void)
/*
* uao_reference: add a ref to an aobj
- *
- * => aobj must be unlocked
- * => just lock it and call the locked version
*/
void
uao_reference(struct uvm_object *uobj)
@@ -573,12 +562,7 @@ uao_reference(struct uvm_object *uobj)
}
/*
- * uao_reference_locked: add a ref to an aobj that is already locked
- *
- * => aobj must be locked
- * this needs to be separate from the normal routine
- * since sometimes we need to add a reference to an aobj when
- * it's already locked.
+ * uao_reference_locked: add a ref to an aobj
*/
void
uao_reference_locked(struct uvm_object *uobj)
@@ -597,9 +581,6 @@ uao_reference_locked(struct uvm_object *uobj)
/*
* uao_detach: drop a reference to an aobj
- *
- * => aobj must be unlocked
- * => just lock it and call the locked version
*/
void
uao_detach(struct uvm_object *uobj)
@@ -611,10 +592,7 @@ uao_detach(struct uvm_object *uobj)
/*
* uao_detach_locked: drop a reference to an aobj
*
- * => aobj must be locked, and is unlocked (or freed) upon return.
- * this needs to be separate from the normal routine
- * since sometimes we need to detach from an aobj when
- * it's already locked.
+ * => aobj may freed upon return.
*/
void
uao_detach_locked(struct uvm_object *uobj)
@@ -671,14 +649,7 @@ uao_detach_locked(struct uvm_object *uobj)
/*
* uao_flush: "flush" pages out of a uvm object
*
- * => object should be locked by caller. we may _unlock_ the object
- * if (and only if) we need to clean a page (PGO_CLEANIT).
- * XXXJRT Currently, however, we don't. In the case of cleaning
- * XXXJRT a page, we simply just deactivate it. Should probably
- * XXXJRT handle this better, in the future (although "flushing"
- * XXXJRT anonymous memory isn't terribly important).
- * => if PGO_CLEANIT is not set, then we will neither unlock the object
- * or block.
+ * => if PGO_CLEANIT is not set, then we will not block.
* => if PGO_ALLPAGE is set, then all pages in the object are valid targets
* for flushing.
* => NOTE: we are allowed to lock the page queues, so the caller
@@ -718,7 +689,6 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
return (TRUE);
- /* locked: uobj */
curoff = start;
for (;;) {
if (curoff < stop) {
@@ -810,8 +780,6 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
* so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
* then we will need to return VM_PAGER_UNLOCK.
*
- * => prefer map unlocked (not required)
- * => object must be locked! we will _unlock_ it before starting any I/O.
* => flags: PGO_ALLPAGES: get all of the pages
* PGO_LOCKED: fault data structures are locked
* => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
@@ -838,10 +806,8 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
if (flags & PGO_LOCKED) {
/*
- * step 1a: get pages that are already resident. only do
- * this if the data structures are locked (i.e. the first
- * time through).
- */
+ * step 1a: get pages that are already resident.
+ */
done = TRUE; /* be optimistic */
gotpages = 0; /* # of pages we got so far */
@@ -885,7 +851,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
}
/*
- * useful page: busy/lock it and plug it in our
+ * useful page: busy it and plug it in our
* result array
*/
/* caller must un-busy this page */
@@ -912,7 +878,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
/*
* step 2: get non-resident or busy pages.
- * object is locked. data structures are unlocked.
+ * data structures are unlocked.
*/
for (lcv = 0, current_offset = offset ; lcv < maxpages ;
@@ -1020,7 +986,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
} else {
/*
* page in the swapped-out page.
- * unlock object for i/o, relock when done.
*/
rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
@@ -1071,17 +1036,11 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
} /* lcv loop */
- /*
- * finally, unlock object and return.
- */
-
return(VM_PAGER_OK);
}
/*
* uao_dropswap: release any swap resources from this aobj page.
- *
- * => aobj must be locked or have a reference count of 0.
*/
int
@@ -1100,7 +1059,6 @@ uao_dropswap(struct uvm_object *uobj, int pageidx)
/*
* page in every page in every aobj that is paged-out to a range of swslots.
*
- * => nothing should be locked.
* => returns TRUE if pagein was aborted due to lack of memory.
*/
boolean_t
@@ -1176,7 +1134,6 @@ uao_swap_off(int startslot, int endslot)
/*
* page in any pages from aobj in the given range.
*
- * => aobj must be locked and is returned locked.
* => returns TRUE if pagein was aborted due to lack of memory.
*/
static boolean_t
@@ -1250,8 +1207,6 @@ restart:
/*
* page in a page from an aobj. used for swap_off.
* returns TRUE if pagein was aborted due to lack of memory.
- *
- * => aobj must be locked and is returned locked.
*/
static boolean_t
uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
@@ -1261,10 +1216,8 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
pg = NULL;
npages = 1;
- /* locked: aobj */
rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
&pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
- /* unlocked: aobj */
switch (rv) {
case VM_PAGER_OK:
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index a32bc5cefca..9d9a2cdac38 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.41 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.42 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -94,8 +94,7 @@ struct uvm_pagerops uvm_deviceops = {
* get a VM object that is associated with a device. allocate a new
* one if needed.
*
- * => caller must _not_ already be holding the lock on the uvm_object.
- * => in fact, nothing should be locked so that we can sleep here.
+ * => nothing should be locked so that we can sleep here.
*
* The last two arguments (off and size) are only used for access checking.
*/
@@ -241,8 +240,6 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
* add a reference to a VM object. Note that the reference count must
* already be one (the passed in reference) so there is no chance of the
* udv being released or locked out here.
- *
- * => caller must call with object unlocked.
*/
static void
@@ -256,8 +253,6 @@ udv_reference(struct uvm_object *uobj)
* udv_detach
*
* remove a reference to a VM object.
- *
- * => caller must call with object unlocked and map locked.
*/
static void
@@ -321,8 +316,6 @@ udv_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
* => rather than having a "get" function, we have a fault routine
* since we don't return vm_pages we need full control over the
* pmap_enter map in
- * => all the usual fault data structured are locked by the caller
- * (i.e. maps(read), amap (if any), uobj)
* => on return, we unlock all fault data structures
* => flags: PGO_ALLPAGES: get all of the pages
* PGO_LOCKED: fault data structures are locked
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index b7d53d31197..2f802f99ab0 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.66 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.67 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -222,8 +222,6 @@ uvmfault_anonflush(struct vm_anon **anons, int n)
/*
* uvmfault_amapcopy: clear "needs_copy" in a map.
*
- * => called with VM data structures unlocked (usually, see below)
- * => we get a write lock on the maps and clear needs_copy for a VA
* => if we are out of RAM we sleep (waiting for more)
*/
@@ -253,7 +251,7 @@ uvmfault_amapcopy(struct uvm_faultinfo *ufi)
ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
/*
- * didn't work? must be out of RAM. unlock and sleep.
+ * didn't work? must be out of RAM. sleep.
*/
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
@@ -263,7 +261,7 @@ uvmfault_amapcopy(struct uvm_faultinfo *ufi)
}
/*
- * got it! unlock and return.
+ * got it!
*/
uvmfault_unlockmaps(ufi, TRUE);
@@ -276,16 +274,11 @@ uvmfault_amapcopy(struct uvm_faultinfo *ufi)
* uvmfault_anonget: get data in an anon into a non-busy, non-released
* page in that anon.
*
- * => maps, amap, and anon locked by caller.
- * => if we fail (result != VM_PAGER_OK) we unlock everything.
- * => if we are successful, we return with everything still locked.
* => we don't move the page on the queues [gets moved later]
* => if we allocate a new page [we_own], it gets put on the queues.
* either way, the result is that the page is on the queues at return time
* => for pages which are on loan from a uvm_object (and thus are not
* owned by the anon): if successful, we return with the owning object
- * locked. the caller must unlock this object when it unlocks everything
- * else.
*/
int
@@ -317,7 +310,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
/*
* if there is a resident page and it is loaned, then anon
* may not own it. call out to uvm_anon_lockpage() to ensure
- * the real owner of the page has been identified and locked.
+ * the real owner of the page has been identified.
*/
if (pg && pg->loan_count)
@@ -418,7 +411,6 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
if (we_own) {
if (pg->pg_flags & PG_WANTED) {
- /* still holding object lock */
wakeup(pg);
}
/* un-busy! */
@@ -619,7 +611,6 @@ ReFault:
if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
return (EFAULT);
}
- /* locked: maps(read) */
#ifdef DIAGNOSTIC
if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
@@ -649,10 +640,7 @@ ReFault:
access_type = enter_prot; /* full access for wired */
/*
- * handle "needs_copy" case. if we need to copy the amap we will
- * have to drop our readlock and relock it with a write lock. (we
- * need a write lock to change anything in a map entry [e.g.
- * needs_copy]).
+ * handle "needs_copy" case.
*/
if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
@@ -729,10 +717,8 @@ ReFault:
}
- /* locked: maps(read) */
-
/*
- * if we've got an amap, lock it and extract current anons.
+ * if we've got an amap, extract current anons.
*/
if (amap) {
@@ -743,8 +729,6 @@ ReFault:
anons = NULL; /* to be safe */
}
- /* locked: maps(read), amap(if there) */
-
/*
* for MADV_SEQUENTIAL mappings we want to deactivate the back pages
* now and then forget about them (for the rest of the fault).
@@ -771,8 +755,6 @@ ReFault:
centeridx = 0;
}
- /* locked: maps(read), amap(if there) */
-
/*
* map in the backpages and frontpages we found in the amap in hopes
* of preventing future faults. we also init the pages[] array as
@@ -835,12 +817,11 @@ ReFault:
pmap_update(ufi.orig_map->pmap);
}
- /* locked: maps(read), amap(if there) */
/* (shadowed == TRUE) if there is an anon at the faulting address */
/*
* note that if we are really short of RAM we could sleep in the above
- * call to pmap_enter with everything locked. bad?
+ * call to pmap_enter. bad?
*
* XXX Actually, that is bad; pmap_enter() should just fail in that
* XXX case. --thorpej
@@ -855,13 +836,10 @@ ReFault:
*/
if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) {
- /* locked: maps(read), amap (if there), uobj */
result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
centeridx, fault_type, access_type,
PGO_LOCKED);
- /* locked: nothing, pgo_fault has unlocked everything */
-
if (result == VM_PAGER_OK)
return (0); /* pgo_fault did pmap enter */
else if (result == VM_PAGER_REFAULT)
@@ -881,11 +859,6 @@ ReFault:
*/
if (uobj && shadowed == FALSE) {
- /* locked (!shadowed): maps(read), amap (if there), uobj */
- /*
- * the following call to pgo_get does _not_ change locking state
- */
-
uvmexp.fltlget++;
gotpages = npages;
(void) uobj->pgops->pgo_get(uobj, ufi.entry->offset +
@@ -968,10 +941,6 @@ ReFault:
uobjpage = NULL;
}
- /* locked (shadowed): maps(read), amap */
- /* locked (!shadowed): maps(read), amap(if there),
- uobj(if !null), uobjpage(if !null) */
-
/*
* note that at this point we are done with any front or back pages.
* we are now going to focus on the center page (i.e. the one we've
@@ -994,16 +963,12 @@ ReFault:
if (shadowed == FALSE)
goto Case2;
- /* locked: maps(read), amap */
-
/*
* handle case 1: fault on an anon in our amap
*/
anon = anons[centeridx];
- /* locked: maps(read), amap, anon */
-
/*
* no matter if we have case 1A or case 1B we are going to need to
* have the anon's memory resident. ensure that now.
@@ -1011,11 +976,7 @@ ReFault:
/*
* let uvmfault_anonget do the dirty work.
- * if it fails (!OK) it will unlock everything for us.
- * if it succeeds, locks are still valid and locked.
* also, if it is OK, then the anon's page is on the queues.
- * if the page is on loan from a uvm_object, then anonget will
- * lock that object for us if it does not fail.
*/
result = uvmfault_anonget(&ufi, amap, anon);
@@ -1046,9 +1007,7 @@ ReFault:
* uobj is non null if the page is on loan from an object (i.e. uobj)
*/
- uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
-
- /* locked: maps(read), amap, anon, uobj(if one) */
+ uobj = anon->an_page->uobject;
/*
* special handling for loaned pages
@@ -1092,8 +1051,7 @@ ReFault:
}
/*
- * copy data, kill loan, and drop uobj lock
- * (if any)
+ * copy data, kill loan
*/
/* copy old -> new */
uvm_pagecopy(anon->an_page, pg);
@@ -1136,14 +1094,14 @@ ReFault:
* it is > 1 and we are only dropping one ref.
*
* in the (hopefully very rare) case that we are out of RAM we
- * will unlock, wait for more RAM, and refault.
+ * will wait for more RAM, and refault.
*
* if we are out of anon VM we kill the process (XXX: could wait?).
*/
if ((access_type & VM_PROT_WRITE) != 0 && anon->an_ref > 1) {
uvmexp.flt_acow++;
- oanon = anon; /* oanon = old, locked anon */
+ oanon = anon; /* oanon = old */
anon = uvm_analloc();
if (anon) {
pg = uvm_pagealloc(NULL, 0, anon, 0);
@@ -1178,23 +1136,21 @@ ReFault:
oanon->an_ref--;
/*
- * note: oanon still locked. anon is _not_ locked, but we
- * have the sole references to in from amap which _is_ locked.
+ * note: anon is _not_ locked, but we have the sole references
+ * to in from amap.
* thus, no one can get at it until we are done with it.
*/
} else {
uvmexp.flt_anon++;
- oanon = anon; /* old, locked anon is same as anon */
+ oanon = anon;
pg = anon->an_page;
if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
enter_prot = enter_prot & ~VM_PROT_WRITE;
}
- /* locked: maps(read), amap, oanon */
-
/*
* now map the page in ...
* XXX: old fault unlocks object before pmap_enter. this seems
@@ -1262,11 +1218,6 @@ Case2:
*/
/*
- * locked:
- * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
- */
-
- /*
* note that uobjpage can not be PGO_DONTCARE at this point. we now
* set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
* have a backing object, check and see if we are going to promote
@@ -1286,7 +1237,7 @@ Case2:
* if uobjpage is not null then we do not need to do I/O to get the
* uobjpage.
*
- * if uobjpage is null, then we need to unlock and ask the pager to
+ * if uobjpage is null, then we need to ask the pager to
* get the data for us. once we have the data, we need to reverify
* the state the world. we are currently not holding any resources.
*/
@@ -1298,9 +1249,7 @@ Case2:
/* update rusage counters */
curproc->p_ru.ru_majflt++;
- /* locked: maps(read), amap(if there), uobj */
uvmfault_unlockall(&ufi, amap, NULL, NULL);
- /* locked: uobj */
uvmexp.fltget++;
gotpages = 1;
@@ -1309,8 +1258,6 @@ Case2:
0, access_type & MASK(ufi.entry), ufi.entry->advice,
PGO_SYNCIO);
- /* locked: uobjpage(if result OK) */
-
/*
* recover from I/O
*/
@@ -1326,21 +1273,15 @@ Case2:
return (EACCES); /* XXX i/o error */
}
- /* locked: uobjpage */
-
/*
- * re-verify the state of the world by first trying to relock
- * the maps. always relock the object.
+ * re-verify the state of the world.
*/
locked = uvmfault_relock(&ufi);
- /* locked(locked): maps(read), amap(if !null), uobj, uobjpage */
- /* locked(!locked): uobj, uobjpage */
-
/*
* Re-verify that amap slot is still free. if there is
- * a problem, we unlock and clean up.
+ * a problem, we clean up.
*/
if (locked && amap && amap_lookup(&ufi.entry->aref,
@@ -1372,19 +1313,12 @@ Case2:
}
/*
- * we have the data in uobjpage which is PG_BUSY and we are
- * holding object lock.
+ * we have the data in uobjpage which is PG_BUSY
*/
- /* locked: maps(read), amap(if !null), uobj, uobjpage */
}
/*
- * locked:
- * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
- */
-
- /*
* notes:
* - at this point uobjpage can not be NULL
* - at this point uobjpage could be PG_WANTED (handle later)
@@ -1462,7 +1396,6 @@ Case2:
pmap_page_protect(uobjpage, VM_PROT_NONE);
if (uobjpage->pg_flags & PG_WANTED)
wakeup(uobjpage);
- /* uobj still locked */
atomic_clearbits_int(&uobjpage->pg_flags,
PG_BUSY|PG_WANTED);
UVM_PAGE_OWN(uobjpage, NULL);
@@ -1523,7 +1456,6 @@ Case2:
*/
if (uobjpage != PGO_DONTCARE) {
if (uobjpage->pg_flags & PG_WANTED)
- /* still holding object lock */
wakeup(uobjpage);
uvm_lock_pageq();
@@ -1570,7 +1502,6 @@ Case2:
*/
if (uobjpage->pg_flags & PG_WANTED)
- /* still have the obj lock */
wakeup(uobjpage);
atomic_clearbits_int(&uobjpage->pg_flags,
PG_BUSY|PG_WANTED);
@@ -1592,9 +1523,6 @@ Case2:
}
/*
- * locked:
- * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
- *
* note: pg is either the uobjpage or the new page in the new anon
*/
@@ -1616,7 +1544,7 @@ Case2:
*/
if (pg->pg_flags & PG_WANTED)
- wakeup(pg); /* lock still held */
+ wakeup(pg);
atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
@@ -1653,7 +1581,7 @@ Case2:
uvm_unlock_pageq();
if (pg->pg_flags & PG_WANTED)
- wakeup(pg); /* lock still held */
+ wakeup(pg);
atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index 236a533c46c..1a9d3a8e1d5 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.h,v 1.49 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.50 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -431,8 +431,6 @@ void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
*
* vm_map_unbusy: clear busy status on a map.
*
- * Note that "intrsafe" maps use only exclusive, spin locks. We simply
- * use the sleep lock's interlock for this.
*/
#ifdef _KERNEL
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 8ad58fb4501..a8e6511b2b6 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_mmap.c,v 1.92 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.93 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -264,7 +264,6 @@ sys_mincore(struct proc *p, void *v, register_t *retval)
/* Check the top layer first. */
anon = amap_lookup(&entry->aref,
start - entry->start);
- /* Don't need to lock anon here. */
if (anon != NULL && anon->an_page != NULL) {
/*
* Anon has the page for this entry
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index cdce265a4d0..27cd5ad0b1c 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.124 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.125 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -142,7 +142,6 @@ static void uvm_pageremove(struct vm_page *);
/*
* uvm_pageinsert: insert a page in the object
*
- * => caller must lock object
* => caller must lock page queues XXX questionable
* => call should have already set pg's object and offset pointers
* and bumped the version counter
@@ -164,7 +163,6 @@ uvm_pageinsert(struct vm_page *pg)
/*
* uvm_page_remove: remove page from object
*
- * => caller must lock object
* => caller must lock page queues
*/
@@ -940,8 +938,6 @@ uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
*
* => return null if no pages free
* => wake up pagedaemon if number of free pages drops below low water mark
- * => if obj != NULL, obj must be locked (to put in tree)
- * => if anon != NULL, anon must be locked (to put in anon)
* => only one of obj or anon can be non-null
* => caller must activate/deactivate page if it is not wired.
*/
@@ -1009,8 +1005,6 @@ uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
/*
* uvm_pagerealloc: reallocate a page from one object to another
- *
- * => both objects must be locked
*/
void
@@ -1043,7 +1037,6 @@ uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
*
* => erase page's identity (i.e. remove from object)
* => put page on free list
- * => caller must lock owning object (either anon or uvm_object)
* => caller must lock page queues
* => assumes all valid mappings of pg are gone
*/
@@ -1169,8 +1162,7 @@ uvm_pagefree(struct vm_page *pg)
* uvm_page_unbusy: unbusy an array of pages.
*
* => pages must either all belong to the same object, or all belong to anons.
- * => if pages are object-owned, object must be locked.
- * => if pages are anon-owned, anons must be unlockd and have 0 refcount.
+ * => if pages are anon-owned, anons must have 0 refcount.
*/
void
@@ -1219,7 +1211,6 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs)
* => this is a debugging function that keeps track of who sets PG_BUSY
* and where they do it. it can be used to track down problems
* such a process setting "PG_BUSY" and never releasing it.
- * => page's object [if any] must be locked
* => if "tag" is NULL then we are releasing page ownership
*/
void
@@ -1416,9 +1407,6 @@ PHYS_TO_VM_PAGE(paddr_t pa)
/*
* uvm_pagelookup: look up a page
- *
- * => caller should lock object to keep someone from pulling the page
- * out from under it
*/
struct vm_page *
uvm_pagelookup(struct uvm_object *obj, voff_t off)
@@ -1547,9 +1535,6 @@ uvm_pageactivate(struct vm_page *pg)
/*
* uvm_pagezero: zero fill a page
- *
- * => if page is part of an object then the object should be locked
- * to protect pg->flags.
*/
void
uvm_pagezero(struct vm_page *pg)
@@ -1560,9 +1545,6 @@ uvm_pagezero(struct vm_page *pg)
/*
* uvm_pagecopy: copy a page
- *
- * => if page is part of an object then the object should be locked
- * to protect pg->flags.
*/
void
uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index 428f82e3a5f..5bb3897b360 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.h,v 1.49 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.50 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_page.h,v 1.19 2000/12/28 08:24:55 chs Exp $ */
/*
@@ -90,9 +90,8 @@
* and offset to which this page belongs (for pageout),
* and sundry status bits.
*
- * Fields in this structure are locked either by the lock on the
- * object that the page belongs to (O) or by the lock on the page
- * queues (P) [or both].
+ * Fields in this structure are possibly locked by the lock on the page
+ * queues (P).
*/
#include <uvm/uvm_extern.h>
@@ -102,20 +101,20 @@ TAILQ_HEAD(pglist, vm_page);
struct vm_page {
TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
* queue or free list (P) */
- RB_ENTRY(vm_page) objt; /* object tree (O)*/
+ RB_ENTRY(vm_page) objt; /* object tree */
- struct vm_anon *uanon; /* anon (O,P) */
- struct uvm_object *uobject; /* object (O,P) */
- voff_t offset; /* offset into object (O,P) */
+ struct vm_anon *uanon; /* anon (P) */
+ struct uvm_object *uobject; /* object (P) */
+ voff_t offset; /* offset into object (P) */
- u_int pg_flags; /* object flags [O or P] */
+ u_int pg_flags; /* object flags [P] */
- u_int pg_version; /* version count [O] */
+ u_int pg_version; /* version count */
u_int wire_count; /* wired down map refs [P] */
u_int loan_count; /* number of active loans
- * to read: [O or P]
- * to modify: [O _and_ P] */
+ * to read: [P]
+ * to modify: [P] */
paddr_t phys_addr; /* physical address of page */
psize_t fpgsz; /* free page range size */
@@ -137,7 +136,6 @@ struct vm_page {
/*
* locking rules:
- * PG_ ==> locked by object lock
* PQ_ ==> lock by page queue lock
* PQ_FREE is locked by free queue lock and is mutex with all other PQs
* pg_flags may only be changed using the atomic operations.
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index 84ef7cf5e88..10a4dc55777 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pager.c,v 1.61 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.62 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */
/*
@@ -448,7 +448,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
*
* => page queues must be locked by caller
* => if page is not swap-backed, then "uobj" points to the object
- * backing it. this object should be locked by the caller.
+ * backing it.
* => if page is swap-backed, then "uobj" should be NULL.
* => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
* for swap-backed memory, "pg" can be NULL if there is no page
@@ -466,17 +466,10 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
* => return state:
* 1. we return the VM_PAGER status code of the pageout
* 2. we return with the page queues unlocked
- * 3. if (uobj != NULL) [!swap_backed] we return with
- * uobj locked _only_ if PGO_PDFREECLUST is set
- * AND result != VM_PAGER_PEND. in all other cases
- * we return with uobj unlocked. [this is a hack
- * that allows the pagedaemon to save one lock/unlock
- * pair in the !swap_backed case since we have to
- * lock the uobj to drop the cluster anyway]
- * 4. on errors we always drop the cluster. thus, if we return
+ * 3. on errors we always drop the cluster. thus, if we return
* !PEND, !OK, then the caller only has to worry about
* un-busying the main page (not the cluster pages).
- * 5. on success, if !PGO_PDFREECLUST, we return the cluster
+ * 4. on success, if !PGO_PDFREECLUST, we return the cluster
* with all pages busy (caller must un-busy and check
* wanted/released flags).
*/
@@ -538,14 +531,10 @@ uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
ReTry:
if (uobj) {
- /* object is locked */
result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
- /* object is now unlocked */
} else {
- /* nothing locked */
/* XXX daddr64_t -> int */
result = uvm_swap_put(swblk, ppsp, *npages, flags);
- /* nothing locked */
}
/*
@@ -564,14 +553,11 @@ ReTry:
if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
/*
- * drop cluster and relock object (only if I/O is
- * not pending)
+ * drop cluster
*/
if (*npages > 1 || pg == NULL)
uvm_pager_dropcluster(uobj, pg, ppsp, npages,
PGO_PDFREECLUST);
- /* if (uobj): object still locked, as per
- * return-state item #3 */
}
return (result);
}
@@ -651,9 +637,7 @@ ReTry:
* got an error, or, if PGO_PDFREECLUST we are un-busying the
* cluster pages on behalf of the pagedaemon).
*
- * => uobj, if non-null, is a non-swap-backed object that is
- * locked by the caller. we return with this object still
- * locked.
+ * => uobj, if non-null, is a non-swap-backed object
* => page queues are not locked
* => pg is our page of interest (the one we clustered around, can be null)
* => ppsp/npages is our current cluster
@@ -682,13 +666,9 @@ uvm_pager_dropcluster(struct uvm_object *uobj, struct vm_page *pg,
continue;
/*
- * if swap-backed, gain lock on object that owns page. note
- * that PQ_ANON bit can't change as long as we are holding
+ * Note that PQ_ANON bit can't change as long as we are holding
* the PG_BUSY bit (so there is no need to lock the page
* queues to test it).
- *
- * once we have the lock, dispose of the pointer to swap, if
- * requested
*/
if (!uobj) {
if (ppsp[lcv]->pg_flags & PQ_ANON) {
@@ -704,7 +684,6 @@ uvm_pager_dropcluster(struct uvm_object *uobj, struct vm_page *pg,
/* did someone want the page while we had it busy-locked? */
if (ppsp[lcv]->pg_flags & PG_WANTED) {
- /* still holding obj lock */
wakeup(ppsp[lcv]);
}
@@ -757,7 +736,7 @@ uvm_aio_biodone(struct buf *bp)
/* reset b_iodone for when this is a single-buf i/o. */
bp->b_iodone = uvm_aio_aiodone;
- mtx_enter(&uvm.aiodoned_lock); /* locks uvm.aio_done */
+ mtx_enter(&uvm.aiodoned_lock);
TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
wakeup(&uvm.aiodoned);
mtx_leave(&uvm.aiodoned_lock);
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index aadaa598b72..97b48cf0094 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.63 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.64 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -446,13 +446,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
/*
- * first we attempt to lock the object that this page
- * belongs to. if our attempt fails we skip on to
- * the next page (no harm done). it is important to
- * "try" locking the object as we are locking in the
- * wrong order (pageq -> object) and we don't want to
- * deadlock.
- *
* the only time we expect to see an ownerless page
* (i.e. a page with no uobject and !PQ_ANON) is if an
* anon has loaned a page from a uvm_object and the
@@ -496,7 +489,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
/*
- * we now have the object and the page queues locked.
+ * we now have the page queues locked.
* the page is not busy. if the page is clean we
* can free it now and continue.
*/
@@ -698,19 +691,14 @@ uvmpd_scan_inactive(struct pglist *pglst)
* with cluster pages at this level.
*
* note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
- * IN: locked: uobj (if !swap_backed), page queues
- * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
- * !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
- *
- * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
+ * IN: locked: page queues
+ * OUT: locked:
+ * !locked: pageqs
*/
- /* locked: uobj (if !swap_backed), page queues */
uvmexp.pdpageouts++;
result = uvm_pager_put(swap_backed ? NULL : uobj, p,
&ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
- /* locked: uobj (if !swap_backed && result != PEND) */
- /* unlocked: pageqs, object (if swap_backed ||result == PEND) */
/*
* if we did i/o to swap, zero swslot to indicate that we are
@@ -793,7 +781,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
/* handle PG_WANTED now */
if (p->pg_flags & PG_WANTED)
- /* still holding object lock */
wakeup(p);
atomic_clearbits_int(&p->pg_flags, PG_BUSY|PG_WANTED);
@@ -949,11 +936,8 @@ uvmpd_scan(void)
p = nextpg) {
nextpg = TAILQ_NEXT(p, pageq);
if (p->pg_flags & PG_BUSY)
- continue; /* quick check before trying to lock */
+ continue;
- /*
- * lock the page's owner.
- */
/* is page anon owned or ownerless? */
if ((p->pg_flags & PQ_ANON) || p->uobject == NULL) {
KASSERT(p->uanon != NULL);
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index 36691f59906..54d92263a86 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_vnode.c,v 1.77 2013/05/30 15:17:59 tedu Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.78 2013/05/30 16:29:46 tedu Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
@@ -138,7 +138,6 @@ uvn_init(void)
* the "accessprot" tells the max access the attaching thread wants to
* our pages.
*
- * => caller must _not_ already be holding the lock on the uvm_object.
* => in fact, nothing should be locked so that we can sleep here.
* => note that uvm_object is first thing in vnode structure, so their
* pointers are equiv.
@@ -173,7 +172,7 @@ uvn_attach(void *arg, vm_prot_t accessprot)
}
/*
- * now we have lock and uvn must not be in a blocked state.
+ * now uvn must not be in a blocked state.
* first check to see if it is already active, in which case
* we can bump the reference count, check to see if we need to
* add it to the writeable list, and then return.
@@ -194,7 +193,6 @@ uvn_attach(void *arg, vm_prot_t accessprot)
uvn->u_flags |= UVM_VNODE_WRITEABLE;
}
- /* unlock and return */
return (&uvn->u_obj);
}
@@ -230,8 +228,6 @@ uvn_attach(void *arg, vm_prot_t accessprot)
used_vnode_size = vattr.va_size;
}
- /* relock object */
-
if (result != 0) {
if (uvn->u_flags & UVM_VNODE_WANTED)
wakeup(uvn);
@@ -281,9 +277,8 @@ uvn_attach(void *arg, vm_prot_t accessprot)
*
* duplicate a reference to a VM object. Note that the reference
* count must already be at least one (the passed in reference) so
- * there is no chance of the uvn being killed or locked out here.
+ * there is no chance of the uvn being killed out here.
*
- * => caller must call with object unlocked.
* => caller must be using the same accessprot as was used at attach time
*/
@@ -310,7 +305,7 @@ uvn_reference(struct uvm_object *uobj)
*
* remove a reference to a VM object.
*
- * => caller must call with object unlocked and map locked.
+ * => caller must call with map locked.
* => this starts the detach process, but doesn't have to finish it
* (async i/o could still be pending).
*/
@@ -446,7 +441,7 @@ uvm_vnp_terminate(struct vnode *vp)
int oldflags;
/*
- * lock object and check if it is valid
+ * check if it is valid
*/
if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
return;
@@ -483,8 +478,7 @@ uvm_vnp_terminate(struct vnode *vp)
/*
* block the uvn by setting the dying flag, and then flush the
- * pages. (note that flush may unlock object while doing I/O, but
- * it will re-lock it before it returns control here).
+ * pages.
*
* also, note that we tell I/O that we are already VOP_LOCK'd so
* that uvn_io doesn't attempt to VOP_LOCK again.
@@ -557,9 +551,7 @@ uvm_vnp_terminate(struct vnode *vp)
}
if (oldflags & UVM_VNODE_WANTED)
- wakeup(uvn); /* object lock still held */
-
-
+ wakeup(uvn);
}
/*
@@ -591,7 +583,6 @@ uvm_vnp_terminate(struct vnode *vp)
* => called with pageq's locked by the daemon.
*
* general outline:
- * - "try" to lock object. if fail, just return (will try again later)
* - drop "u_nio" (this req is done!)
* - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio }
* - get "page" structures (atop?).
@@ -603,14 +594,10 @@ uvm_vnp_terminate(struct vnode *vp)
/*
* uvn_flush: flush pages out of a uvm object.
*
- * => object should be locked by caller. we may _unlock_ the object
- * if (and only if) we need to clean a page (PGO_CLEANIT).
- * we return with the object locked.
* => if PGO_CLEANIT is set, we may block (due to I/O). thus, a caller
* might want to unlock higher level resources (e.g. vm_map)
* before calling flush.
- * => if PGO_CLEANIT is not set, then we will neither unlock the object
- * or block.
+ * => if PGO_CLEANIT is not set, then we will not block
* => if PGO_ALLPAGE is set, then all pages in the object are valid targets
* for flushing.
* => NOTE: we are allowed to lock the page queues, so the caller
@@ -680,8 +667,8 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
}
ppsp = NULL; /* XXX: shut up gcc */
- uvm_lock_pageq(); /* page queues locked */
- /* locked: both page queues and uobj */
+ uvm_lock_pageq();
+ /* locked: both page queues */
for (curoff = start; curoff < stop; curoff += PAGE_SIZE) {
if ((pp = uvm_pagelookup(uobj, curoff)) == NULL)
continue;
@@ -749,12 +736,12 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
}
/*
- * pp points to a page in the locked object that we are
+ * pp points to a page in the object that we are
* working on. if it is !PG_CLEAN,!PG_BUSY and we asked
* for cleaning (PGO_CLEANIT). we clean it now.
*
* let uvm_pager_put attempted a clustered page out.
- * note: locked: uobj and page queues.
+ * note: locked: page queues.
*/
atomic_setbits_int(&pp->pg_flags, PG_BUSY);
@@ -767,10 +754,8 @@ ReTry:
ppsp = pps;
npages = sizeof(pps) / sizeof(struct vm_page *);
- /* locked: page queues, uobj */
result = uvm_pager_put(uobj, pp, &ppsp, &npages,
flags | PGO_DOACTCLUST, start, stop);
- /* unlocked: page queues, uobj */
/*
* if we did an async I/O it is remotely possible for the
@@ -779,8 +764,6 @@ ReTry:
* we only touch it when it won't be freed, RELEASED took care
* of the rest.
*/
-
- /* relock! */
uvm_lock_pageq();
/*
@@ -793,8 +776,8 @@ ReTry:
if (result == VM_PAGER_AGAIN) {
/*
* it is unlikely, but page could have been released
- * while we had the object lock dropped. we ignore
- * this now and retry the I/O. we will detect and
+ * we ignore this now and retry the I/O.
+ * we will detect and
* handle the released page after the syncio I/O
* completes.
*/
@@ -834,8 +817,7 @@ ReTry:
ptmp = ppsp[lcv];
/*
- * verify the page didn't get moved while obj was
- * unlocked
+ * verify the page didn't get moved
*/
if (result == VM_PAGER_PEND && ptmp->uobject != uobj)
continue;
@@ -843,13 +825,12 @@ ReTry:
/*
* unbusy the page if I/O is done. note that for
* pending I/O it is possible that the I/O op
- * finished before we relocked the object (in
- * which case the page is no longer busy).
+ * finished
+ * (in which case the page is no longer busy).
*/
if (result != VM_PAGER_PEND) {
if (ptmp->pg_flags & PG_WANTED)
- /* still holding object lock */
wakeup(ptmp);
atomic_clearbits_int(&ptmp->pg_flags,
@@ -910,7 +891,6 @@ ReTry:
uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED);
}
- /* return, with object locked! */
return(retval);
}
@@ -920,8 +900,6 @@ ReTry:
* we are about to do I/O in an object at offset. this function is called
* to establish a range of offsets around "offset" in which we can cluster
* I/O.
- *
- * - currently doesn't matter if obj locked or not.
*/
void
@@ -948,7 +926,6 @@ uvn_cluster(struct uvm_object *uobj, voff_t offset, voff_t *loffset,
* uvn_put: flush page data to backing store.
*
* => prefer map unlocked (not required)
- * => object must be locked! we will _unlock_ it before starting I/O.
* => flags: PGO_SYNCIO -- use sync. I/O
* => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
* => XXX: currently we use VOP_READ/VOP_WRITE which are only sync.
@@ -960,9 +937,7 @@ uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
{
int retval;
- /* note: object locked */
retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE);
- /* note: object unlocked */
return(retval);
}
@@ -972,7 +947,6 @@ uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
* uvn_get: get pages (synchronously) from backing store
*
* => prefer map unlocked (not required)
- * => object must be locked! we will _unlock_ it before starting any I/O.
* => flags: PGO_ALLPAGES: get all of the pages
* PGO_LOCKED: fault data structures are locked
* => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
@@ -1029,7 +1003,7 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
}
/*
- * useful page: busy/lock it and plug it in our
+ * useful page: busy it and plug it in our
* result array
*/
atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
@@ -1060,13 +1034,12 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
if (done)
return(VM_PAGER_OK); /* bingo! */
else
- /* EEK! Need to unlock and I/O */
return(VM_PAGER_UNLOCK);
}
/*
* step 2: get non-resident or busy pages.
- * object is locked. data structures are unlocked.
+ * data structures are unlocked.
*
* XXX: because we can't do async I/O at this level we get things
* page at a time (otherwise we'd chunk). the VOP_READ() will do
@@ -1154,23 +1127,19 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
/*
* we have a "fake/busy/clean" page that we just allocated. do
- * I/O to fill it with valid data. note that object must be
- * locked going into uvn_io, but will be unlocked afterwards.
+ * I/O to fill it with valid data.
*/
result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1,
PGO_SYNCIO, UIO_READ);
/*
- * I/O done. object is unlocked (by uvn_io). because we used
- * syncio the result can not be PEND or AGAIN. we must relock
- * and check for errors.
+ * I/O done. because we used syncio the result can not be
+ * PEND or AGAIN.
*/
- /* lock object. check for errors. */
if (result != VM_PAGER_OK) {
if (ptmp->pg_flags & PG_WANTED)
- /* object lock still held */
wakeup(ptmp);
atomic_clearbits_int(&ptmp->pg_flags,
@@ -1200,10 +1169,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
} /* lcv loop */
- /*
- * finally, unlock object and return.
- */
-
return (VM_PAGER_OK);
}
@@ -1211,7 +1176,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* uvn_io: do I/O to a vnode
*
* => prefer map unlocked (not required)
- * => object must be locked! we will _unlock_ it before starting I/O.
* => flags: PGO_SYNCIO -- use sync. I/O
* => XXX: currently we use VOP_READ/VOP_WRITE which are only sync.
* [thus we never do async i/o! see iodone comment]
@@ -1276,7 +1240,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
*/
uvn->u_nio++; /* we have an I/O in progress! */
- /* NOTE: object now unlocked */
if (kva == 0)
kva = uvm_pagermapin(pps, npages,
mapinflags | UVMPAGER_MAPIN_WAITOK);
@@ -1323,7 +1286,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
if (result == 0) {
/* NOTE: vnode now locked! */
-
if (rw == UIO_READ)
result = VOP_READ(vn, &uio, 0, curproc->p_ucred);
else
@@ -1361,18 +1323,14 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
* now clean up the object (i.e. drop I/O count)
*/
- /* NOTE: object now locked! */
-
uvn->u_nio--; /* I/O DONE! */
if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) {
wakeup(&uvn->u_nio);
}
- /* NOTE: object now unlocked! */
/*
* done!
*/
-
if (result == 0)
return(VM_PAGER_OK);
else