summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2014-04-13 23:14:16 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2014-04-13 23:14:16 +0000
commit8c056ce52ed8926ad8c2f8c92d2f8fdfaaf5e2d9 (patch)
tree63ebc99133093232a5ed420c9b08e8c960fcd642 /sys
parentf7270b4ff1a063635837af701d72a8ec3b6c5b65 (diff)
compress code by turning four line comments into one line comments.
emphatic ok usual suspects, grudging ok miod
Diffstat (limited to 'sys')
-rw-r--r--sys/uvm/uvm_addr.c60
-rw-r--r--sys/uvm/uvm_amap.c125
-rw-r--r--sys/uvm/uvm_anon.c36
-rw-r--r--sys/uvm/uvm_aobj.c189
-rw-r--r--sys/uvm/uvm_device.c81
-rw-r--r--sys/uvm/uvm_fault.c312
-rw-r--r--sys/uvm/uvm_glue.c4
-rw-r--r--sys/uvm/uvm_init.c22
-rw-r--r--sys/uvm/uvm_io.c29
-rw-r--r--sys/uvm/uvm_km.c85
-rw-r--r--sys/uvm/uvm_map.c403
-rw-r--r--sys/uvm/uvm_meter.c9
-rw-r--r--sys/uvm/uvm_mmap.c117
-rw-r--r--sys/uvm/uvm_page.c118
-rw-r--r--sys/uvm/uvm_pager.c57
-rw-r--r--sys/uvm/uvm_pdaemon.c90
-rw-r--r--sys/uvm/uvm_pmemrange.c41
-rw-r--r--sys/uvm/uvm_swap.c116
-rw-r--r--sys/uvm/uvm_unix.c31
-rw-r--r--sys/uvm/uvm_user.c3
-rw-r--r--sys/uvm/uvm_vnode.c171
21 files changed, 393 insertions, 1706 deletions
diff --git a/sys/uvm/uvm_addr.c b/sys/uvm/uvm_addr.c
index 9d7ac06feb5..f4395b99bed 100644
--- a/sys/uvm/uvm_addr.c
+++ b/sys/uvm/uvm_addr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_addr.c,v 1.6 2014/02/04 01:04:03 tedu Exp $ */
+/* $OpenBSD: uvm_addr.c,v 1.7 2014/04/13 23:14:15 tedu Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -70,9 +70,7 @@ struct uaddr_rnd_state {
TAILQ_HEAD(, vm_map_entry) ur_free;
};
-/*
- * Definition of a pivot in pivot selector.
- */
+/* Definition of a pivot in pivot selector. */
struct uaddr_pivot {
vaddr_t addr; /* End of prev. allocation. */
int expire;/* Best before date. */
@@ -106,11 +104,7 @@ uvm_mapent_fspace_cmp(struct vm_map_entry *e1, struct vm_map_entry *e2)
extern const struct uvm_addr_functions uaddr_kernel_functions;
struct uvm_addr_state uaddr_kbootstrap;
-
-/*
- * Support functions.
- */
-
+/* Support functions. */
#ifndef SMALL_KERNEL
struct vm_map_entry *uvm_addr_entrybyspace(struct uaddr_free_rbtree*,
vsize_t);
@@ -258,9 +252,7 @@ uvm_addr_fitspace(vaddr_t *min_result, vaddr_t *max_result,
if (fspace < sz + before_gap + after_gap)
return ENOMEM;
- /*
- * Calculate lowest address.
- */
+ /* Calculate lowest address. */
low_addr += before_gap;
low_addr = uvm_addr_align_forward(tmp = low_addr, align, offset);
if (low_addr < tmp) /* Overflow during alignment. */
@@ -268,9 +260,7 @@ uvm_addr_fitspace(vaddr_t *min_result, vaddr_t *max_result,
if (high_addr - after_gap - sz < low_addr)
return ENOMEM;
- /*
- * Calculate highest address.
- */
+ /* Calculate highest address. */
high_addr -= after_gap + sz;
high_addr = uvm_addr_align_backward(tmp = high_addr, align, offset);
if (high_addr > tmp) /* Overflow during alignment. */
@@ -369,9 +359,7 @@ uvm_addr_linsearch(struct vm_map *map, struct uvm_addr_state *uaddr,
(before_gap & PAGE_MASK) == 0 && (after_gap & PAGE_MASK) == 0);
KASSERT(high + sz > high); /* Check for overflow. */
- /*
- * Hint magic.
- */
+ /* Hint magic. */
if (hint == 0)
hint = (direction == 1 ? low : high);
else if (hint > high) {
@@ -492,7 +480,6 @@ uaddr_destroy(struct uvm_addr_state *uaddr)
* If hint is set, search will start at the hint position.
* Only searches forward.
*/
-
const struct uvm_addr_functions uaddr_lin_functions = {
.uaddr_select = &uaddr_lin_select,
.uaddr_destroy = &uaddr_destroy,
@@ -519,9 +506,7 @@ uaddr_lin_select(struct vm_map *map, struct uvm_addr_state *uaddr,
{
vaddr_t guard_sz;
- /*
- * Deal with guardpages: search for space with one extra page.
- */
+ /* Deal with guardpages: search for space with one extra page. */
guard_sz = ((map->flags & VM_MAP_GUARDPAGES) == 0 ? 0 : PAGE_SIZE);
if (uaddr->uaddr_maxaddr - uaddr->uaddr_minaddr < sz + guard_sz)
@@ -739,7 +724,6 @@ uaddr_rnd_print(struct uvm_addr_state *uaddr_p, boolean_t full,
*
* If no hint is given, the allocator refuses to allocate.
*/
-
const struct uvm_addr_functions uaddr_hint_functions = {
.uaddr_select = &uaddr_hint_select,
.uaddr_destroy = &uaddr_hint_destroy,
@@ -792,9 +776,7 @@ uaddr_hint_select(struct vm_map *map, struct uvm_addr_state *uaddr_param,
if (hint == 0)
return ENOMEM;
- /*
- * Calculate upper and lower bound for selected address.
- */
+ /* Calculate upper and lower bound for selected address. */
high = hint + uaddr->max_dist;
if (high < hint) /* overflow */
high = map->max_offset;
@@ -849,7 +831,6 @@ uaddr_hint_select(struct vm_map *map, struct uvm_addr_state *uaddr_param,
/*
* Kernel allocation bootstrap logic.
*/
-
const struct uvm_addr_functions uaddr_kernel_functions = {
.uaddr_select = &uaddr_kbootstrap_select,
.uaddr_destroy = &uaddr_kbootstrap_destroy,
@@ -971,9 +952,7 @@ uaddr_bestfit_select(struct vm_map *map, struct uvm_addr_state *uaddr_p,
if (entry == NULL)
return ENOMEM;
- /*
- * Walk the tree until we find an entry that fits.
- */
+ /* Walk the tree until we find an entry that fits. */
while (uvm_addr_fitspace(&min, &max,
VMMAP_FREE_START(entry), VMMAP_FREE_END(entry),
sz, align, offset, 0, guardsz) != 0) {
@@ -982,9 +961,7 @@ uaddr_bestfit_select(struct vm_map *map, struct uvm_addr_state *uaddr_p,
return ENOMEM;
}
- /*
- * Return the address that generates the least fragmentation.
- */
+ /* Return the address that generates the least fragmentation. */
*entry_out = entry;
*addr_out = (min - VMMAP_FREE_START(entry) <=
VMMAP_FREE_END(entry) - guardsz - sz - max ?
@@ -1257,9 +1234,7 @@ uaddr_pivot_select(struct vm_map *map, struct uvm_addr_state *uaddr_p,
if (pivot->addr == 0 || pivot->entry == NULL || pivot->expire == 0)
goto expired; /* Pivot is invalid (null or expired). */
- /*
- * Attempt to use the pivot to map the entry.
- */
+ /* Attempt to use the pivot to map the entry. */
entry = pivot->entry;
if (pivot->dir > 0) {
if (uvm_addr_fitspace(&min, &max,
@@ -1472,7 +1447,6 @@ struct uaddr_bs_strat {
* select which one (stack or brk area) to try. If the allocation fails,
* the other one is tested.
*/
-
const struct uvm_addr_functions uaddr_stack_brk_functions = {
.uaddr_select = &uaddr_stack_brk_select,
.uaddr_destroy = &uaddr_destroy,
@@ -1511,9 +1485,7 @@ uaddr_stack_brk_select(struct vm_map *map, struct uvm_addr_state *uaddr,
stack_idx = 1;
}
- /*
- * Set up stack search strategy.
- */
+ /* Set up stack search strategy. */
s = &strat[stack_idx];
s->start = MAX(map->s_start, uaddr->uaddr_minaddr);
s->end = MIN(map->s_end, uaddr->uaddr_maxaddr);
@@ -1523,17 +1495,13 @@ uaddr_stack_brk_select(struct vm_map *map, struct uvm_addr_state *uaddr,
s->dir = 1;
#endif
- /*
- * Set up brk search strategy.
- */
+ /* Set up brk search strategy. */
s = &strat[brk_idx];
s->start = MAX(map->b_start, uaddr->uaddr_minaddr);
s->end = MIN(map->b_end, uaddr->uaddr_maxaddr);
s->dir = -1; /* Opposite of brk() growth. */
- /*
- * Linear search for space.
- */
+ /* Linear search for space. */
for (s = &strat[0]; s < &strat[nitems(strat)]; s++) {
if (s->end - s->start < sz)
continue;
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index dd525d73c7a..95f6db01b2d 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.50 2013/05/30 16:39:26 tedu Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.51 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -35,9 +35,7 @@
/*
* uvm_amap.c: amap operations
- */
-
-/*
+ *
* this file contains functions that perform operations on amaps. see
* uvm_amap.h for a brief explanation of the role of amaps in uvm.
*/
@@ -160,9 +158,7 @@ pp_setreflen(int *ppref, int offset, int ref, int len)
void
amap_init(void)
{
- /*
- * Initialize the vm_amap pool.
- */
+ /* Initialize the vm_amap pool. */
pool_init(&uvm_amap_pool, sizeof(struct vm_amap), 0, 0, 0,
"amappl", &pool_allocator_nointr);
pool_sethiwat(&uvm_amap_pool, 4096);
@@ -217,7 +213,6 @@ fail1:
* => caller should ensure sz is a multiple of PAGE_SIZE
* => reference count to new amap is set to one
*/
-
struct vm_amap *
amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf)
{
@@ -286,7 +281,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize)
* forget that ar_pageoff could be non-zero: this means that
* there are some unused slots before us in the amap.
*/
-
AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
AMAP_B2SLOT(slotadd, addsize); /* slots to add */
slotneed = slotoff + slotmapped + slotadd;
@@ -296,7 +290,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize)
* only need to bump the reference counts on the slots we are
* adding.
*/
-
if (amap->am_nslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -310,7 +303,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize)
* case 2: we pre-allocated slots for use and we just need to
* bump nslot up to take account for these slots.
*/
-
if (amap->am_maxslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -323,7 +315,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize)
}
#endif
amap->am_nslot = slotneed;
-
/*
* no need to zero am_anon since that was done at
* alloc time and we never shrink an allocation.
@@ -337,7 +328,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize)
*
* XXXCDC: could we take advantage of a kernel realloc()?
*/
-
if (slotneed >= UVM_AMAP_LARGE)
return E2BIG;
@@ -370,10 +360,7 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize)
sizeof(int));
KASSERT(amap->am_maxslot < slotneed);
- /*
- * now copy everything over to new malloc'd areas...
- */
-
+ /* now copy everything over to new malloc'd areas... */
slotadded = slotalloc - amap->am_nslot;
/* do am_slots */
@@ -481,9 +468,7 @@ amap_wipeout(struct vm_amap *amap)
KASSERT(amap->am_ref == 0);
if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
- /*
- * amap_swap_off will call us again.
- */
+ /* amap_swap_off will call us again. */
return;
}
amap_list_remove(amap);
@@ -499,17 +484,12 @@ amap_wipeout(struct vm_amap *amap)
refs = --anon->an_ref;
if (refs == 0) {
- /*
- * we had the last reference to a vm_anon. free it.
- */
+ /* we had the last reference to a vm_anon. free it. */
uvm_anfree(anon);
}
}
- /*
- * now we free the map
- */
-
+ /* now we free the map */
amap->am_ref = 0; /* ... was one */
amap->am_nused = 0;
amap_free(amap); /* will free amap */
@@ -535,18 +515,13 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
int slots, lcv;
vaddr_t chunksize;
- /*
- * is there a map to copy? if not, create one from scratch.
- */
-
+ /* is there a map to copy? if not, create one from scratch. */
if (entry->aref.ar_amap == NULL) {
-
/*
* check to see if we have a large amap that we can
* chunk. we align startva/endva to chunk-sized
* boundaries and then clip to them.
*/
-
if (canchunk && atop(entry->end - entry->start) >=
UVM_AMAP_LARGE) {
/* convert slots to bytes */
@@ -573,16 +548,12 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
* just take it over rather than copying it. the value can only
* be one if we have the only reference to the amap
*/
-
if (entry->aref.ar_amap->am_ref == 1) {
entry->etype &= ~UVM_ET_NEEDSCOPY;
return;
}
- /*
- * looks like we need to copy the map.
- */
-
+ /* looks like we need to copy the map. */
AMAP_B2SLOT(slots, entry->end - entry->start);
amap = amap_alloc1(slots, 0, waitf);
if (amap == NULL)
@@ -595,7 +566,6 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
* dropped down to one we take over the old map rather than
* copying the amap.
*/
-
if (srcamap->am_ref == 1) { /* take it over? */
entry->etype &= ~UVM_ET_NEEDSCOPY;
amap->am_ref--; /* drop final reference to map */
@@ -603,10 +573,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
return;
}
- /*
- * we must copy it now.
- */
-
+ /* we must copy it now. */
for (lcv = 0 ; lcv < slots; lcv++) {
amap->am_anon[lcv] =
srcamap->am_anon[entry->aref.ar_pageoff + lcv];
@@ -626,7 +593,6 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
* one (we checked above), so there is no way we could drop
* the count to zero. [and no need to worry about freeing it]
*/
-
srcamap->am_ref--;
if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0)
srcamap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
@@ -637,10 +603,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
}
#endif
- /*
- * install new amap.
- */
-
+ /* install new amap. */
entry->aref.ar_pageoff = 0;
entry->aref.ar_amap = amap;
entry->etype &= ~UVM_ET_NEEDSCOPY;
@@ -679,22 +642,15 @@ amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
*/
ReStart:
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
-
- /*
- * get the page
- */
-
+ /* get the page */
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
pg = anon->an_page;
- /*
- * page must be resident since parent is wired
- */
-
+ /* page must be resident since parent is wired */
if (pg == NULL)
- panic("amap_cow_now: non-resident wired page in anon %p",
- anon);
+ panic("amap_cow_now: non-resident wired page"
+ " in anon %p", anon);
/*
* if the anon ref count is one and the page is not loaned,
@@ -705,9 +661,7 @@ ReStart:
* we only need to get involved when these are not true.
* [note: if loan_count == 0, then the anon must own the page]
*/
-
if (anon->an_ref > 1 && pg->loan_count == 0) {
-
/*
* if the page is busy then we have to wait for
* it and then restart.
@@ -718,9 +672,7 @@ ReStart:
goto ReStart;
}
- /*
- * ok, time to do a copy-on-write to a new anon
- */
+ /* ok, time to do a copy-on-write to a new anon */
nanon = uvm_analloc();
if (nanon) {
npg = uvm_pagealloc(NULL, 0, nanon, 0);
@@ -759,12 +711,7 @@ ReStart:
uvm_pageactivate(npg);
uvm_unlock_pageq();
}
-
- /*
- * done with this anon, next ...!
- */
-
- } /* end of 'for' loop */
+ }
}
/*
@@ -781,17 +728,12 @@ amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
if (leftslots == 0)
panic("amap_splitref: split at zero offset");
- /*
- * now: we have a valid am_mapped array.
- */
-
+ /* now: we have a valid am_mapped array. */
if (origref->ar_amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
panic("amap_splitref: map size check failed");
#ifdef UVM_AMAP_PPREF
- /*
- * establish ppref before we add a duplicate reference to the amap
- */
+ /* establish ppref before we add a duplicate reference to the amap */
if (origref->ar_amap->am_ppref == NULL)
amap_pp_establish(origref->ar_amap);
#endif
@@ -813,17 +755,13 @@ amap_pp_establish(struct vm_amap *amap)
amap->am_ppref = malloc(sizeof(int) * amap->am_maxslot,
M_UVMAMAP, M_NOWAIT|M_ZERO);
- /*
- * if we fail then we just won't use ppref for this amap
- */
+ /* if we fail then we just won't use ppref for this amap */
if (amap->am_ppref == NULL) {
amap->am_ppref = PPREF_NONE; /* not using it */
return;
}
- /*
- * init ppref
- */
+ /* init ppref */
pp_setreflen(amap->am_ppref, 0, amap->am_ref, amap->am_nslot);
}
@@ -847,7 +785,6 @@ amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
* first advance to the correct place in the ppref array,
* fragment if needed.
*/
-
for (lcv = 0 ; lcv < curslot ; lcv += len) {
pp_getreflen(ppref, lcv, &ref, &len);
if (lcv + len > curslot) { /* goes past start? */
@@ -873,7 +810,6 @@ amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
* now adjust reference counts in range. merge the first
* changed entry with the last unchanged entry if possible.
*/
-
if (lcv != curslot)
panic("amap_pp_adjref: overshot target");
@@ -913,7 +849,6 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
* we can either traverse the amap by am_anon or by am_slots depending
* on which is cheaper. decide now.
*/
-
if (slots < amap->am_nused) {
byanon = TRUE;
lcv = slotoff;
@@ -942,9 +877,7 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
}
anon = amap->am_anon[curslot];
- /*
- * remove it from the amap
- */
+ /* remove it from the amap */
amap->am_anon[curslot] = NULL;
ptr = amap->am_bckptr[curslot];
if (ptr != (amap->am_nused - 1)) {
@@ -955,9 +888,7 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
}
amap->am_nused--;
- /*
- * drop anon reference count
- */
+ /* drop anon reference count */
refs = --anon->an_ref;
if (refs == 0) {
/*
@@ -1104,7 +1035,6 @@ amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
panic("amap_add: offset out of range");
if (replace) {
-
if (amap->am_anon[slot] == NULL)
panic("amap_add: replacing null anon");
if (amap->am_anon[slot]->an_page != NULL &&
@@ -1192,18 +1122,13 @@ void
amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
{
- /*
- * if we are the last reference, free the amap and return.
- */
-
+ /* if we are the last reference, free the amap and return. */
if (amap->am_ref-- == 1) {
amap_wipeout(amap); /* drops final ref and frees */
return;
}
- /*
- * otherwise just drop the reference count(s)
- */
+ /* otherwise just drop the reference count(s) */
if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
#ifdef UVM_AMAP_PPREF
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index 02e2132c1f3..5ad03a84e6e 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.c,v 1.37 2013/05/30 16:29:46 tedu Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.38 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -89,10 +89,7 @@ uvm_anfree(struct vm_anon *anon)
{
struct vm_page *pg;
- /*
- * get page
- */
-
+ /* get page */
pg = anon->an_page;
/*
@@ -100,7 +97,6 @@ uvm_anfree(struct vm_anon *anon)
* own it. call out to uvm_anon_lockpage() to ensure the real owner
* of the page has been identified and locked.
*/
-
if (pg && pg->loan_count)
pg = uvm_anon_lockloanpg(anon);
@@ -108,14 +104,11 @@ uvm_anfree(struct vm_anon *anon)
* if we have a resident page, we must dispose of it before freeing
* the anon.
*/
-
if (pg) {
-
/*
* if the page is owned by a uobject, then we must
* kill the loan on the page rather than free it.
*/
-
if (pg->uobject) {
uvm_lock_pageq();
KASSERT(pg->loan_count > 0);
@@ -123,7 +116,6 @@ uvm_anfree(struct vm_anon *anon)
pg->uanon = NULL;
uvm_unlock_pageq();
} else {
-
/*
* page has no uobject, so we must be the owner of it.
*
@@ -132,7 +124,6 @@ uvm_anfree(struct vm_anon *anon)
* wake up). if the page is not busy then we can
* free it now.
*/
-
if ((pg->pg_flags & PG_BUSY) != 0) {
/* tell them to dump it when done */
atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
@@ -150,9 +141,7 @@ uvm_anfree(struct vm_anon *anon)
uvmexp.swpgonly--;
}
- /*
- * free any swap resources.
- */
+ /* free any swap resources. */
uvm_anon_dropswap(anon);
/*
@@ -207,26 +196,17 @@ uvm_anon_lockloanpg(struct vm_anon *anon)
* result may cause us to do more work than we need to, but it will
* not produce an incorrect result.
*/
-
while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
-
-
/*
* if page is un-owned [i.e. the object dropped its ownership],
* then we can take over as owner!
*/
-
if (pg->uobject == NULL && (pg->pg_flags & PQ_ANON) == 0) {
uvm_lock_pageq();
atomic_setbits_int(&pg->pg_flags, PQ_ANON);
pg->loan_count--; /* ... and drop our loan */
uvm_unlock_pageq();
}
-
- /*
- * we did it! break the loop
- */
-
break;
}
return(pg);
@@ -250,18 +230,14 @@ uvm_anon_pagein(struct vm_anon *anon)
switch (rv) {
case VM_PAGER_OK:
break;
-
case VM_PAGER_ERROR:
case VM_PAGER_REFAULT:
-
/*
* nothing more to do on errors.
* VM_PAGER_REFAULT can only mean that the anon was freed,
* so again there's nothing to do.
*/
-
return FALSE;
-
default:
#ifdef DIAGNOSTIC
panic("anon_pagein: uvmfault_anonget -> %d", rv);
@@ -274,17 +250,13 @@ uvm_anon_pagein(struct vm_anon *anon)
* ok, we've got the page now.
* mark it as dirty, clear its swslot and un-busy it.
*/
-
pg = anon->an_page;
uobj = pg->uobject;
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;
atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
- /*
- * deactivate the page (to put it on a page queue)
- */
-
+ /* deactivate the page (to put it on a page queue) */
pmap_clear_reference(pg);
pmap_page_protect(pg, VM_PROT_NONE);
uvm_lock_pageq();
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 14768657ff8..5e900b6fdf0 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.60 2013/12/09 08:24:29 espie Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.61 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -71,7 +71,6 @@
* of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
* be a power of two.
*/
-
#define UAO_SWHASH_CLUSTER_SHIFT 4
#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
@@ -115,7 +114,6 @@
* uao_swhash_elt: when a hash table is being used, this structure defines
* the format of an entry in the bucket list.
*/
-
struct uao_swhash_elt {
LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
voff_t tag; /* our 'tag' */
@@ -126,13 +124,11 @@ struct uao_swhash_elt {
/*
* uao_swhash: the swap hash table structure
*/
-
LIST_HEAD(uao_swhash, uao_swhash_elt);
/*
* uao_swhash_elt_pool: pool of uao_swhash_elt structures
*/
-
struct pool uao_swhash_elt_pool;
/*
@@ -142,7 +138,6 @@ struct pool uao_swhash_elt_pool;
* (struct uvm_aobj *) == (struct uvm_object *)
* => only one of u_swslots and u_swhash is used in any given aobj
*/
-
struct uvm_aobj {
struct uvm_object u_obj; /* has: pgops, memt, #pages, #refs */
int u_pages; /* number of pages in entire object */
@@ -164,13 +159,11 @@ struct uvm_aobj {
/*
* uvm_aobj_pool: pool of uvm_aobj structures
*/
-
struct pool uvm_aobj_pool;
/*
* local functions
*/
-
static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *, int,
boolean_t);
static int uao_find_swslot(struct uvm_aobj *, int);
@@ -198,7 +191,6 @@ int uao_grow_convert(struct uvm_object *, int);
*
* note that some functions (e.g. put) are handled elsewhere
*/
-
struct uvm_pagerops aobj_pager = {
NULL, /* init */
uao_reference, /* reference */
@@ -216,7 +208,6 @@ struct uvm_pagerops aobj_pager = {
* ordering is reversed. In that case we must use trylocking to prevent
* deadlock.
*/
-
static LIST_HEAD(aobjlist, uvm_aobj) uao_list = LIST_HEAD_INITIALIZER(uao_list);
static struct mutex uao_list_lock = MUTEX_INITIALIZER(IPL_NONE);
@@ -224,16 +215,13 @@ static struct mutex uao_list_lock = MUTEX_INITIALIZER(IPL_NONE);
/*
* functions
*/
-
/*
* hash table/array related functions
*/
-
/*
* uao_find_swhash_elt: find (or create) a hash table entry for a page
* offset.
*/
-
static struct uao_swhash_elt *
uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
{
@@ -244,9 +232,7 @@ uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
- /*
- * now search the bucket for the requested tag
- */
+ /* now search the bucket for the requested tag */
LIST_FOREACH(elt, swhash, list) {
if (elt->tag == page_tag)
return(elt);
@@ -256,10 +242,7 @@ uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
if (!create)
return NULL;
-
- /*
- * allocate a new entry for the bucket and init/insert it in
- */
+ /* allocate a new entry for the bucket and init/insert it in */
elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK | PR_ZERO);
LIST_INSERT_HEAD(swhash, elt, list);
elt->tag = page_tag;
@@ -274,17 +257,11 @@ __inline static int
uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
{
- /*
- * if noswap flag is set, then we never return a slot
- */
-
+ /* if noswap flag is set, then we never return a slot */
if (aobj->u_flags & UAO_FLAG_NOSWAP)
return(0);
- /*
- * if hashing, look in hash table.
- */
-
+ /* if hashing, look in hash table. */
if (aobj->u_pages > UAO_SWHASH_THRESHOLD) {
struct uao_swhash_elt *elt =
uao_find_swhash_elt(aobj, pageidx, FALSE);
@@ -295,9 +272,7 @@ uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
return(0);
}
- /*
- * otherwise, look in the array
- */
+ /* otherwise, look in the array */
return(aobj->u_swslots[pageidx]);
}
@@ -312,32 +287,24 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
int oldslot;
- /*
- * if noswap flag is set, then we can't set a slot
- */
-
+ /* if noswap flag is set, then we can't set a slot */
if (aobj->u_flags & UAO_FLAG_NOSWAP) {
-
if (slot == 0)
return(0); /* a clear is ok */
/* but a set is not */
printf("uao_set_swslot: uobj = %p\n", uobj);
- panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
+ panic("uao_set_swslot: attempt to set a slot"
+ " on a NOSWAP object");
}
- /*
- * are we using a hash table? if so, add it in the hash.
- */
-
+ /* are we using a hash table? if so, add it in the hash. */
if (aobj->u_pages > UAO_SWHASH_THRESHOLD) {
-
/*
* Avoid allocating an entry just to free it again if
* the page had not swap slot in the first place, and
* we are freeing.
*/
-
struct uao_swhash_elt *elt =
uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
if (elt == NULL) {
@@ -352,7 +319,6 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
* now adjust the elt's reference counter and free it if we've
* dropped it to zero.
*/
-
/* an allocation? */
if (slot) {
if (oldslot == 0)
@@ -373,7 +339,6 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
}
return (oldslot);
}
-
/*
* end of hash/array functions
*/
@@ -409,7 +374,6 @@ uao_free(struct uvm_aobj *aobj)
continue;
}
uvm_swap_free(slot, 1);
-
/*
* this page is no longer
* only in swap.
@@ -425,16 +389,12 @@ uao_free(struct uvm_aobj *aobj)
} else {
int i;
- /*
- * free the array
- */
-
+ /* free the array */
for (i = 0; i < aobj->u_pages; i++) {
int slot = aobj->u_swslots[i];
if (slot) {
uvm_swap_free(slot, 1);
-
/* this page is no longer only in swap. */
uvmexp.swpgonly--;
}
@@ -442,9 +402,7 @@ uao_free(struct uvm_aobj *aobj)
free(aobj->u_swslots, M_UVMAOBJ);
}
- /*
- * finally free the aobj itself
- */
+ /* finally free the aobj itself */
pool_put(&uvm_aobj_pool, aobj);
}
@@ -482,7 +440,6 @@ uao_shrink_hash(struct uvm_object *uobj, int pages)
* If the size of the hash table doesn't change, all we need to do is
* to adjust the page count.
*/
-
if (UAO_SWHASH_BUCKETS(aobj->u_pages) == UAO_SWHASH_BUCKETS(pages)) {
aobj->u_pages = pages;
return 0;
@@ -499,7 +456,6 @@ uao_shrink_hash(struct uvm_object *uobj, int pages)
* Even though the hash table size is changing, the hash of the buckets
* we are interested in copying should not change.
*/
-
for (i = 0; i < UAO_SWHASH_BUCKETS(aobj->u_pages); i++)
LIST_FIRST(&new_swhash[i]) = LIST_FIRST(&aobj->u_swhash[i]);
@@ -526,10 +482,7 @@ uao_shrink_convert(struct uvm_object *uobj, int pages)
uao_shrink_flush(uobj, pages, aobj->u_pages);
- /*
- * Convert swap slots from hash to array.
- */
-
+ /* Convert swap slots from hash to array. */
for (i = 0; i < pages; i++) {
elt = uao_find_swhash_elt(aobj, i, FALSE);
if (elt != NULL) {
@@ -588,7 +541,6 @@ uao_shrink(struct uvm_object *uobj, int pages)
* 2. aobj uses array and array size needs to be adjusted.
* 3. aobj uses hash and hash size needs to be adjusted.
*/
-
if (pages > UAO_SWHASH_THRESHOLD)
return uao_shrink_hash(uobj, pages); /* case 3 */
else if (aobj->u_pages > UAO_SWHASH_THRESHOLD)
@@ -605,7 +557,6 @@ uao_shrink(struct uvm_object *uobj, int pages)
* the caller of these functions does not allow faults to happen in case of
* growth error.
*/
-
int
uao_grow_array(struct uvm_object *uobj, int pages)
{
@@ -645,7 +596,6 @@ uao_grow_hash(struct uvm_object *uobj, int pages)
* If the size of the hash table doesn't change, all we need to do is
* to adjust the page count.
*/
-
if (UAO_SWHASH_BUCKETS(aobj->u_pages) == UAO_SWHASH_BUCKETS(pages)) {
aobj->u_pages = pages;
return 0;
@@ -690,10 +640,7 @@ uao_grow_convert(struct uvm_object *uobj, int pages)
if (new_swhash == NULL)
return ENOMEM;
- /*
- * Set these now, so we can use uao_find_swhash_elt().
- */
-
+ /* Set these now, so we can use uao_find_swhash_elt(). */
old_swslots = aobj->u_swslots;
aobj->u_swhash = new_swhash;
aobj->u_swhashmask = new_hashmask;
@@ -725,7 +672,6 @@ uao_grow(struct uvm_object *uobj, int pages)
* 2. aobj uses array and array size needs to be adjusted.
* 3. aobj uses array and must be converted to hash.
*/
-
if (pages <= UAO_SWHASH_THRESHOLD)
return uao_grow_array(uobj, pages); /* case 2 */
else if (aobj->u_pages > UAO_SWHASH_THRESHOLD)
@@ -752,9 +698,7 @@ uao_create(vsize_t size, int flags)
int mflags;
struct uvm_aobj *aobj;
- /*
- * malloc a new aobj unless we are asked for the kernel object
- */
+ /* malloc a new aobj unless we are asked for the kernel object */
if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
if (kobj_alloced)
panic("uao_create: kernel object already allocated");
@@ -776,9 +720,7 @@ uao_create(vsize_t size, int flags)
refs = 1; /* normal object so 1 ref */
}
- /*
- * allocate hash/array if necessary
- */
+ /* allocate hash/array if necessary */
if (flags == 0 || (flags & (UAO_FLAG_KERNSWAP | UAO_FLAG_CANFAIL))) {
if (flags)
mflags = M_NOWAIT;
@@ -817,16 +759,11 @@ uao_create(vsize_t size, int flags)
uvm_objinit(&aobj->u_obj, &aobj_pager, refs);
- /*
- * now that aobj is ready, add it to the global list
- */
+ /* now that aobj is ready, add it to the global list */
mtx_enter(&uao_list_lock);
LIST_INSERT_HEAD(&uao_list, aobj, u_list);
mtx_leave(&uao_list_lock);
- /*
- * done!
- */
return(&aobj->u_obj);
}
@@ -873,10 +810,7 @@ void
uao_reference_locked(struct uvm_object *uobj)
{
- /*
- * kernel_object already has plenty of references, leave it alone.
- */
-
+ /* kernel_object already has plenty of references, leave it alone. */
if (UVM_OBJ_IS_KERN_OBJECT(uobj))
return;
@@ -905,9 +839,7 @@ uao_detach_locked(struct uvm_object *uobj)
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct vm_page *pg;
- /*
- * detaching from kernel_object is a noop.
- */
+ /* detaching from kernel_object is a noop. */
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
return;
}
@@ -917,9 +849,7 @@ uao_detach_locked(struct uvm_object *uobj)
return;
}
- /*
- * remove the aobj from the global list.
- */
+ /* remove the aobj from the global list. */
mtx_enter(&uao_list_lock);
LIST_REMOVE(aobj, u_list);
mtx_leave(&uao_list_lock);
@@ -944,9 +874,7 @@ uao_detach_locked(struct uvm_object *uobj)
}
uvm_unlock_pageq();
- /*
- * finally, free the rest.
- */
+ /* finally, free the rest. */
uao_free(aobj);
}
@@ -1039,7 +967,6 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
uvm_unlock_pageq();
continue;
-
case PGO_FREE:
/*
* If there are multiple references to
@@ -1062,7 +989,6 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
uvm_unlock_pageq();
continue;
-
default:
panic("uao_flush: weird flags");
}
@@ -1098,19 +1024,12 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
int lcv, gotpages, maxpages, swslot, rv, pageidx;
boolean_t done;
- /*
- * get number of pages
- */
+ /* get number of pages */
maxpages = *npagesp;
- /*
- * step 1: handled the case where fault data structures are locked.
- */
-
+ /* step 1: handled the case where fault data structures are locked. */
if (flags & PGO_LOCKED) {
- /*
- * step 1a: get pages that are already resident.
- */
+ /* step 1a: get pages that are already resident. */
done = TRUE; /* be optimistic */
gotpages = 0; /* # of pages we got so far */
@@ -1141,9 +1060,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
}
}
- /*
- * to be useful must get a non-busy page
- */
+ /* to be useful must get a non-busy page */
if (ptmp == NULL ||
(ptmp->pg_flags & PG_BUSY) != 0) {
if (lcv == centeridx ||
@@ -1163,13 +1080,12 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
pps[lcv] = ptmp;
gotpages++;
- } /* "for" lcv loop */
+ }
/*
* step 1b: now we've either done everything needed or we
* to unlock and do some waiting or I/O.
*/
-
*npagesp = gotpages;
if (done)
/* bingo! */
@@ -1183,15 +1099,12 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* step 2: get non-resident or busy pages.
* data structures are unlocked.
*/
-
for (lcv = 0, current_offset = offset ; lcv < maxpages ;
lcv++, current_offset += PAGE_SIZE) {
-
/*
* - skip over pages we've already gotten or don't want
* - skip over pages we don't _have_ to get
*/
-
if (pps[lcv] != NULL ||
(lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
continue;
@@ -1277,25 +1190,16 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
*/
swslot = uao_find_swslot(aobj, pageidx);
- /*
- * just zero the page if there's nothing in swap.
- */
+ /* just zero the page if there's nothing in swap. */
if (swslot == 0) {
- /*
- * page hasn't existed before, just zero it.
- */
+ /* page hasn't existed before, just zero it. */
uvm_pagezero(ptmp);
} else {
- /*
- * page in the swapped-out page.
- */
+ /* page in the swapped-out page. */
rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
- /*
- * I/O done. check for errors.
- */
- if (rv != VM_PAGER_OK)
- {
+ /* I/O done. check for errors. */
+ if (rv != VM_PAGER_OK) {
if (ptmp->pg_flags & PG_WANTED)
wakeup(ptmp);
@@ -1344,7 +1248,6 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
/*
* uao_dropswap: release any swap resources from this aobj page.
*/
-
int
uao_dropswap(struct uvm_object *uobj, int pageidx)
{
@@ -1357,7 +1260,6 @@ uao_dropswap(struct uvm_object *uobj, int pageidx)
return (slot);
}
-
/*
* page in every page in every aobj that is paged-out to a range of swslots.
*
@@ -1368,10 +1270,7 @@ uao_swap_off(int startslot, int endslot)
{
struct uvm_aobj *aobj, *nextaobj, *prevaobj = NULL;
- /*
- * walk the list of all aobjs.
- */
-
+ /* walk the list of all aobjs. */
mtx_enter(&uao_list_lock);
for (aobj = LIST_FIRST(&uao_list);
@@ -1422,9 +1321,7 @@ uao_swap_off(int startslot, int endslot)
prevaobj = aobj;
}
- /*
- * done with traversal, unlock the list
- */
+ /* done with traversal, unlock the list */
mtx_leave(&uao_list_lock);
if (prevaobj) {
uao_detach_locked(&prevaobj->u_obj);
@@ -1432,7 +1329,6 @@ uao_swap_off(int startslot, int endslot)
return FALSE;
}
-
/*
* page in any pages from aobj in the given range.
*
@@ -1457,9 +1353,7 @@ restart:
for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
int slot = elt->slots[i];
- /*
- * if the slot isn't in range, skip it.
- */
+ /* if slot isn't in range, skip it. */
if (slot < startslot ||
slot >= endslot) {
continue;
@@ -1486,16 +1380,12 @@ restart:
for (i = 0; i < aobj->u_pages; i++) {
int slot = aobj->u_swslots[i];
- /*
- * if the slot isn't in range, skip it
- */
+ /* if the slot isn't in range, skip it */
if (slot < startslot || slot >= endslot) {
continue;
}
- /*
- * process the page.
- */
+ /* process the page. */
rv = uao_pagein_page(aobj, i);
if (rv) {
return rv;
@@ -1533,7 +1423,6 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
* so again there's nothing to do.
*/
return FALSE;
-
}
/*
@@ -1545,9 +1434,7 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
- /*
- * deactivate the page (to put it on a page queue).
- */
+ /* deactivate the page (to put it on a page queue). */
pmap_clear_reference(pg);
uvm_lock_pageq();
uvm_pagedeactivate(pg);
@@ -1565,7 +1452,6 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
* => aobj must be locked and is returned locked.
* => start is inclusive. end is exclusive.
*/
-
void
uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
{
@@ -1657,7 +1543,6 @@ uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
* adjust the counter of pages only in swap for all
* the swap slots we've freed.
*/
-
if (swpgonlydelta > 0) {
simple_lock(&uvm.swap_data_lock);
KASSERT(uvmexp.swpgonly >= swpgonlydelta);
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index 9a5d93dec71..0ec469c9692 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.44 2013/08/13 06:56:41 kettenis Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.45 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -67,7 +67,6 @@ struct mutex udv_lock = MUTEX_INITIALIZER(IPL_NONE);
/*
* functions
*/
-
static void udv_reference(struct uvm_object *);
static void udv_detach(struct uvm_object *);
static int udv_fault(struct uvm_faultinfo *, vaddr_t,
@@ -79,7 +78,6 @@ static boolean_t udv_flush(struct uvm_object *, voff_t, voff_t,
/*
* master pager structure
*/
-
struct uvm_pagerops uvm_deviceops = {
NULL, /* inited statically */
udv_reference,
@@ -89,11 +87,6 @@ struct uvm_pagerops uvm_deviceops = {
};
/*
- * the ops.
- */
-
-
-/*
* udv_attach
*
* get a VM object that is associated with a device. allocate a new
@@ -113,20 +106,14 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
struct uvm_object *obj;
#endif
- /*
- * before we do anything, ensure this device supports mmap
- */
-
+ /* before we do anything, ensure this device supports mmap */
mapfn = cdevsw[major(device)].d_mmap;
if (mapfn == NULL ||
mapfn == (paddr_t (*)(dev_t, off_t, int)) enodev ||
mapfn == (paddr_t (*)(dev_t, off_t, int)) nullop)
return(NULL);
- /*
- * Negative offsets on the object are not allowed.
- */
-
+ /* Negative offsets on the object are not allowed. */
if (off < 0)
return(NULL);
@@ -143,41 +130,28 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
* XXX assumes VM_PROT_* == PROT_*
* XXX clobbers off and size, but nothing else here needs them.
*/
-
while (size != 0) {
if ((*mapfn)(device, off, accessprot) == -1)
return (NULL);
off += PAGE_SIZE; size -= PAGE_SIZE;
}
- /*
- * keep looping until we get it
- */
-
+ /* keep looping until we get it */
for (;;) {
-
- /*
- * first, attempt to find it on the main list
- */
-
+ /* first, attempt to find it on the main list */
mtx_enter(&udv_lock);
LIST_FOREACH(lcv, &udv_list, u_list) {
if (device == lcv->u_device)
break;
}
- /*
- * got it on main list. put a hold on it and unlock udv_lock.
- */
-
+ /* got it on main list. put a hold on it and unlock udv_lock. */
if (lcv) {
-
/*
* if someone else has a hold on it, sleep and start
* over again. Else, we need take HOLD flag so we
* don't have to re-order locking here.
*/
-
if (lcv->u_flags & UVM_DEVICE_HOLD) {
lcv->u_flags |= UVM_DEVICE_WANTED;
msleep(lcv, &udv_lock, PVM | PNORELOCK,
@@ -189,10 +163,7 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
lcv->u_flags |= UVM_DEVICE_HOLD;
mtx_leave(&udv_lock);
- /*
- * bump reference count, unhold, return.
- */
-
+ /* bump reference count, unhold, return. */
lcv->u_obj.uo_refs++;
mtx_enter(&udv_lock);
@@ -203,10 +174,7 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
return(&lcv->u_obj);
}
- /*
- * did not find it on main list. need to malloc a new one.
- */
-
+ /* did not find it on main list. need to malloc a new one. */
mtx_leave(&udv_lock);
/* NOTE: we could sleep in the following malloc() */
udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
@@ -216,7 +184,6 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
* now we have to double check to make sure no one added it
* to the list while we were sleeping...
*/
-
LIST_FOREACH(lcv, &udv_list, u_list) {
if (device == lcv->u_device)
break;
@@ -226,7 +193,6 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
* did we lose a race to someone else?
* free our memory and retry.
*/
-
if (lcv) {
mtx_leave(&udv_lock);
free(udv, M_TEMP);
@@ -237,7 +203,6 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
* we have it! init the data structures, add to list
* and return.
*/
-
uvm_objinit(&udv->u_obj, &uvm_deviceops, 1);
udv->u_flags = 0;
udv->u_device = device;
@@ -255,7 +220,6 @@ udv_attach(void *arg, vm_prot_t accessprot, voff_t off, vsize_t size)
* already be one (the passed in reference) so there is no chance of the
* udv being released or locked out here.
*/
-
static void
udv_reference(struct uvm_object *uobj)
{
@@ -268,15 +232,12 @@ udv_reference(struct uvm_object *uobj)
*
* remove a reference to a VM object.
*/
-
static void
udv_detach(struct uvm_object *uobj)
{
struct uvm_device *udv = (struct uvm_device *)uobj;
- /*
- * loop until done
- */
+ /* loop until done */
again:
if (uobj->uo_refs > 1) {
uobj->uo_refs--;
@@ -284,10 +245,7 @@ again:
}
KASSERT(uobj->uo_npages == 0 && RB_EMPTY(&uobj->memt));
- /*
- * is it being held? if so, wait until others are done.
- */
-
+ /* is it being held? if so, wait until others are done. */
mtx_enter(&udv_lock);
if (udv->u_flags & UVM_DEVICE_HOLD) {
udv->u_flags |= UVM_DEVICE_WANTED;
@@ -299,10 +257,7 @@ again:
goto again;
}
- /*
- * got it! nuke it now.
- */
-
+ /* got it! nuke it now. */
LIST_REMOVE(udv, u_list);
if (udv->u_flags & UVM_DEVICE_WANTED)
wakeup(udv);
@@ -316,7 +271,6 @@ again:
*
* flush pages out of a uvm object. a no-op for devices.
*/
-
static boolean_t
udv_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
{
@@ -337,7 +291,6 @@ udv_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
* it as a flag
* => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
*/
-
static int
udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
int centeridx, vm_fault_t fault_type, vm_prot_t access_type, int flags)
@@ -357,16 +310,12 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
* we do not allow device mappings to be mapped copy-on-write
* so we kill any attempt to do so here.
*/
-
if (UVM_ET_ISCOPYONWRITE(entry)) {
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
return(VM_PAGER_ERROR);
}
- /*
- * get device map function.
- */
-
+ /* get device map function. */
device = udv->u_device;
mapfn = cdevsw[major(device)].d_mmap;
@@ -376,16 +325,12 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
* for pmap_enter (even if we have a submap). since virtual
* addresses in a submap must match the main map, this is ok.
*/
-
/* udv offset = (offset from start of entry) + entry's offset */
curr_offset = entry->offset + (vaddr - entry->start);
/* pmap va = vaddr (virtual address of pps[0]) */
curr_va = vaddr;
- /*
- * loop over the page range entering in as needed
- */
-
+ /* loop over the page range entering in as needed */
retval = VM_PAGER_OK;
for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
curr_va += PAGE_SIZE) {
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 08212b2136b..98e2ad24277 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.71 2014/04/03 20:21:01 miod Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.72 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -150,7 +150,6 @@
/*
* local data structures
*/
-
struct uvm_advice {
int nback;
int nforw;
@@ -159,7 +158,6 @@ struct uvm_advice {
/*
* page range array: set up in uvmfault_init().
*/
-
static struct uvm_advice uvmadvice[UVM_ADV_MASK + 1];
#define UVM_MAXRANGE 16 /* must be max() of nback+nforw+1 */
@@ -167,7 +165,6 @@ static struct uvm_advice uvmadvice[UVM_ADV_MASK + 1];
/*
* private prototypes
*/
-
static void uvmfault_amapcopy(struct uvm_faultinfo *);
static __inline void uvmfault_anonflush(struct vm_anon **, int);
void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
@@ -176,13 +173,11 @@ void uvmfault_update_stats(struct uvm_faultinfo *);
/*
* inline functions
*/
-
/*
* uvmfault_anonflush: try and deactivate pages in specified anons
*
* => does not have to deactivate page if it is busy
*/
-
static __inline void
uvmfault_anonflush(struct vm_anon **anons, int n)
{
@@ -211,11 +206,9 @@ uvmfault_anonflush(struct vm_anon **anons, int n)
/*
* normal functions
*/
-
/*
* uvmfault_init: compute proper values for the uvmadvice[] array.
*/
-
void
uvmfault_init()
{
@@ -241,46 +234,29 @@ uvmfault_init()
*
* => if we are out of RAM we sleep (waiting for more)
*/
-
static void
uvmfault_amapcopy(struct uvm_faultinfo *ufi)
{
- /*
- * while we haven't done the job
- */
-
+ /* while we haven't done the job */
while (1) {
-
- /*
- * no mapping? give up.
- */
-
+ /* no mapping? give up. */
if (uvmfault_lookup(ufi, TRUE) == FALSE)
return;
- /*
- * copy if needed.
- */
-
+ /* copy if needed. */
if (UVM_ET_ISNEEDSCOPY(ufi->entry))
amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
- /*
- * didn't work? must be out of RAM. sleep.
- */
-
+ /* didn't work? must be out of RAM. sleep. */
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
uvmfault_unlockmaps(ufi, TRUE);
uvm_wait("fltamapcopy");
continue;
}
- /*
- * got it!
- */
-
+ /* got it! */
uvmfault_unlockmaps(ufi, TRUE);
return;
}
@@ -297,7 +273,6 @@ uvmfault_amapcopy(struct uvm_faultinfo *ufi)
* => for pages which are on loan from a uvm_object (and thus are not
* owned by the anon): if successful, we return with the owning object
*/
-
int
uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
struct vm_anon *anon)
@@ -315,12 +290,8 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
else
curproc->p_ru.ru_majflt++;
- /*
- * loop until we get it, or fail.
- */
-
+ /* loop until we get it, or fail. */
while (1) {
-
we_own = FALSE; /* TRUE if we set PG_BUSY on a page */
pg = anon->an_page;
@@ -329,23 +300,17 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
* may not own it. call out to uvm_anon_lockpage() to ensure
* the real owner of the page has been identified.
*/
-
if (pg && pg->loan_count)
pg = uvm_anon_lockloanpg(anon);
- /*
- * page there? make sure it is not busy/released.
- */
-
+ /* page there? make sure it is not busy/released. */
if (pg) {
-
/*
* at this point, if the page has a uobject [meaning
* we have it on loan], then that uobject is locked
* by us! if the page is busy, we drop all the
* locks (including uobject) and try again.
*/
-
if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) {
return (VM_PAGER_OK);
}
@@ -365,23 +330,16 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
UVM_WAIT(pg, 0, "anonget2", 0);
}
/* ready to relock and try again */
-
} else {
-
- /*
- * no page, we must try and bring it in.
- */
+ /* no page, we must try and bring it in. */
pg = uvm_pagealloc(NULL, 0, anon, 0);
if (pg == NULL) { /* out of RAM. */
-
uvmfault_unlockall(ufi, amap, NULL, anon);
uvmexp.fltnoram++;
uvm_wait("flt_noram1");
/* ready to relock and try again */
-
} else {
-
/* we set the PG_BUSY bit */
we_own = TRUE;
uvmfault_unlockall(ufi, amap, NULL, anon);
@@ -405,10 +363,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
}
}
- /*
- * now relock and try again
- */
-
+ /* now relock and try again */
locked = uvmfault_relock(ufi);
/*
@@ -421,9 +376,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
* [3] I/O OK! activate the page and sync with the
* non-we_own case (i.e. drop anon lock if not locked).
*/
-
if (we_own) {
-
if (pg->pg_flags & PG_WANTED) {
wakeup(pg);
}
@@ -487,17 +440,11 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
uvm_unlock_pageq();
}
- /*
- * we were not able to relock. restart fault.
- */
-
+ /* we were not able to relock. restart fault. */
if (!locked)
return (VM_PAGER_REFAULT);
- /*
- * verify no one has touched the amap and moved the anon on us.
- */
-
+ /* verify no one touched the amap and moved the anon on us. */
if (ufi != NULL &&
amap_lookup(&ufi->entry->aref,
ufi->orig_rvaddr - ufi->entry->start) != anon) {
@@ -505,16 +452,12 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
uvmfault_unlockall(ufi, amap, NULL, anon);
return (VM_PAGER_REFAULT);
}
-
- /*
- * try it again!
- */
+ /* try it again! */
uvmexp.fltanretry++;
continue;
} /* while (1) */
-
/*NOTREACHED*/
}
@@ -534,9 +477,7 @@ uvmfault_update_stats(struct uvm_faultinfo *ufi)
map = ufi->orig_map;
- /*
- * Update the maxrss for the process.
- */
+ /* Update the maxrss for the process. */
if (map->flags & VM_MAP_ISVMSPACE) {
p = curproc;
KASSERT(p != NULL && &p->p_vmspace->vm_map == map);
@@ -575,10 +516,8 @@ uvmfault_update_stats(struct uvm_faultinfo *ufi)
* uvm_map_pageable). this should be avoided because it keeps
* the map locked off during I/O.
*/
-
#define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
~VM_PROT_WRITE : VM_PROT_ALL)
-
int
uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
vm_prot_t access_type)
@@ -600,10 +539,7 @@ uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
uvmexp.faults++; /* XXX: locking? */
- /*
- * init the IN parameters in the ufi
- */
-
+ /* init the IN parameters in the ufi */
ufi.orig_map = orig_map;
ufi.orig_rvaddr = trunc_page(vaddr);
ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
@@ -613,15 +549,9 @@ uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
else
narrow = FALSE; /* normal fault */
- /*
- * "goto ReFault" means restart the page fault from ground zero.
- */
+ /* "goto ReFault" means restart the page fault from ground zero. */
ReFault:
-
- /*
- * lookup and lock the maps
- */
-
+ /* lookup and lock the maps */
if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
return (EFAULT);
}
@@ -632,10 +562,7 @@ ReFault:
ufi.map, vaddr);
#endif
- /*
- * check protection
- */
-
+ /* check protection */
if ((ufi.entry->protection & access_type) != access_type) {
uvmfault_unlockmaps(&ufi, FALSE);
return (EACCES);
@@ -653,10 +580,7 @@ ReFault:
if (wired)
access_type = enter_prot; /* full access for wired */
- /*
- * handle "needs_copy" case.
- */
-
+ /* handle "needs_copy" case. */
if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
if ((access_type & VM_PROT_WRITE) ||
(ufi.entry->object.uvm_obj == NULL)) {
@@ -665,22 +589,16 @@ ReFault:
uvmfault_amapcopy(&ufi);
uvmexp.fltamcopy++;
goto ReFault;
-
} else {
-
/*
* ensure that we pmap_enter page R/O since
* needs_copy is still true
*/
enter_prot &= ~VM_PROT_WRITE;
-
}
}
- /*
- * identify the players
- */
-
+ /* identify the players */
amap = ufi.entry->aref.ar_amap; /* top layer */
uobj = ufi.entry->object.uvm_obj; /* bottom layer */
@@ -688,7 +606,6 @@ ReFault:
* check for a case 0 fault. if nothing backing the entry then
* error now.
*/
-
if (amap == NULL && uobj == NULL) {
uvmfault_unlockmaps(&ufi, FALSE);
return (EFAULT);
@@ -700,7 +617,6 @@ ReFault:
* to do this the first time through the fault. if we
* ReFault we will disable this by setting "narrow" to true.
*/
-
if (narrow == FALSE) {
/* wide fault (!narrow) */
@@ -718,21 +634,15 @@ ReFault:
centeridx = nback;
narrow = TRUE; /* ensure only once per-fault */
-
} else {
-
/* narrow fault! */
nback = nforw = 0;
startva = ufi.orig_rvaddr;
npages = 1;
centeridx = 0;
-
}
- /*
- * if we've got an amap, extract current anons.
- */
-
+ /* if we've got an amap, extract current anons. */
if (amap) {
anons = anons_store;
amap_lookups(&ufi.entry->aref, startva - ufi.entry->start,
@@ -745,9 +655,7 @@ ReFault:
* for MADV_SEQUENTIAL mappings we want to deactivate the back pages
* now and then forget about them (for the rest of the fault).
*/
-
if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) {
-
/* flush back-page anons? */
if (amap)
uvmfault_anonflush(anons, nback);
@@ -772,11 +680,9 @@ ReFault:
* of preventing future faults. we also init the pages[] array as
* we go.
*/
-
currva = startva;
shadowed = FALSE;
for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) {
-
/*
* dont play with VAs that are already mapped
* except for center)
@@ -787,18 +693,13 @@ ReFault:
continue;
}
- /*
- * unmapped or center page. check if any anon at this level.
- */
+ /* unmapped or center page. check if any anon at this level. */
if (amap == NULL || anons[lcv] == NULL) {
pages[lcv] = NULL;
continue;
}
- /*
- * check for present page and map if possible. re-activate it.
- */
-
+ /* check for present page and map if possible. re-activate it. */
pages[lcv] = PGO_DONTCARE;
if (lcv == centeridx) { /* save center for later! */
shadowed = TRUE;
@@ -818,7 +719,6 @@ ReFault:
* ignore pmap_enter() failures; it's not critical
* that we enter these right now.
*/
-
(void) pmap_enter(ufi.orig_map->pmap, currva,
VM_PAGE_TO_PHYS(anon->an_page),
(anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
@@ -831,7 +731,6 @@ ReFault:
pmap_update(ufi.orig_map->pmap);
/* (shadowed == TRUE) if there is an anon at the faulting address */
-
/*
* note that if we are really short of RAM we could sleep in the above
* call to pmap_enter. bad?
@@ -839,7 +738,6 @@ ReFault:
* XXX Actually, that is bad; pmap_enter() should just fail in that
* XXX case. --thorpej
*/
-
/*
* if the desired page is not shadowed by the amap and we have a
* backing object, then we check to see if the backing object would
@@ -847,7 +745,6 @@ ReFault:
* with the usual pgo_get hook). the backing object signals this by
* providing a pgo_fault routine.
*/
-
if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) {
result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
centeridx, fault_type, access_type,
@@ -870,7 +767,6 @@ ReFault:
*
* ("get" has the option of doing a pmap_enter for us)
*/
-
if (uobj && shadowed == FALSE) {
uvmexp.fltlget++;
gotpages = npages;
@@ -880,17 +776,12 @@ ReFault:
access_type & MASK(ufi.entry),
ufi.entry->advice, PGO_LOCKED);
- /*
- * check for pages to map, if we got any
- */
-
+ /* check for pages to map, if we got any */
uobjpage = NULL;
-
if (gotpages) {
currva = startva;
for (lcv = 0 ; lcv < npages ;
lcv++, currva += PAGE_SIZE) {
-
if (pages[lcv] == NULL ||
pages[lcv] == PGO_DONTCARE)
continue;
@@ -904,7 +795,6 @@ ReFault:
* remember this page as "uobjpage."
* (for later use).
*/
-
if (lcv == centeridx) {
uobjpage = pages[lcv];
continue;
@@ -930,7 +820,6 @@ ReFault:
* failures; it's not critical that we
* enter these right now.
*/
-
(void) pmap_enter(ufi.orig_map->pmap, currva,
VM_PAGE_TO_PHYS(pages[lcv]),
enter_prot & MASK(ufi.entry),
@@ -942,7 +831,6 @@ ReFault:
* we've held the lock the whole time
* we've had the handle.
*/
-
atomic_clearbits_int(&pages[lcv]->pg_flags,
PG_BUSY);
UVM_PAGE_OWN(pages[lcv], NULL);
@@ -964,34 +852,24 @@ ReFault:
* then we've got a pointer to it as "uobjpage" and we've already
* made it BUSY.
*/
-
/*
* there are four possible cases we must address: 1A, 1B, 2A, and 2B
*/
-
- /*
- * redirect case 2: if we are not shadowed, go to case 2.
- */
-
+ /* redirect case 2: if we are not shadowed, go to case 2. */
if (shadowed == FALSE)
goto Case2;
- /*
- * handle case 1: fault on an anon in our amap
- */
-
+ /* handle case 1: fault on an anon in our amap */
anon = anons[centeridx];
/*
* no matter if we have case 1A or case 1B we are going to need to
* have the anon's memory resident. ensure that now.
*/
-
/*
* let uvmfault_anonget do the dirty work.
* also, if it is OK, then the anon's page is on the queues.
*/
-
result = uvmfault_anonget(&ufi, amap, anon);
switch (result) {
case VM_PAGER_OK:
@@ -1007,7 +885,6 @@ ReFault:
* now.
*/
return (EACCES); /* XXX */
-
default:
#ifdef DIAGNOSTIC
panic("uvm_fault: uvmfault_anonget -> %d", result);
@@ -1016,27 +893,17 @@ ReFault:
#endif
}
- /*
- * uobj is non null if the page is on loan from an object (i.e. uobj)
- */
-
+ /* uobj is non null if the page is on loan from an object (i.e. uobj) */
uobj = anon->an_page->uobject;
- /*
- * special handling for loaned pages
- */
-
+ /* special handling for loaned pages */
if (anon->an_page->loan_count) {
-
if ((access_type & VM_PROT_WRITE) == 0) {
-
/*
* for read faults on loaned pages we just cap the
* protection at read-only.
*/
-
enter_prot = enter_prot & ~VM_PROT_WRITE;
-
} else {
/*
* note that we can't allow writes into a loaned page!
@@ -1053,7 +920,6 @@ ReFault:
/* >1 case is already ok */
if (anon->an_ref == 1) {
-
/* get new un-owned replacement page */
pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == NULL) {
@@ -1063,9 +929,7 @@ ReFault:
goto ReFault;
}
- /*
- * copy data, kill loan
- */
+ /* copy data, kill loan */
/* copy old -> new */
uvm_pagecopy(anon->an_page, pg);
@@ -1093,8 +957,6 @@ ReFault:
atomic_clearbits_int(&pg->pg_flags,
PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
-
- /* done! */
} /* ref == 1 */
} /* write fault */
} /* loan count */
@@ -1137,7 +999,6 @@ ReFault:
}
/* got all resources, replace anon with nanon */
-
uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */
/* un-busy! new page */
atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
@@ -1153,15 +1014,12 @@ ReFault:
* to in from amap.
* thus, no one can get at it until we are done with it.
*/
-
} else {
-
uvmexp.flt_anon++;
oanon = anon;
pg = anon->an_page;
if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
enter_prot = enter_prot & ~VM_PROT_WRITE;
-
}
/*
@@ -1170,7 +1028,6 @@ ReFault:
* suspect since some other thread could blast the page out from
* under us between the unlock and the pmap_enter.
*/
-
if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
!= 0) {
@@ -1192,15 +1049,11 @@ ReFault:
goto ReFault;
}
- /*
- * ... update the page queues.
- */
-
+ /* ... update the page queues. */
uvm_lock_pageq();
if (fault_type == VM_FAULT_WIRE) {
uvm_pagewire(pg);
-
/*
* since the now-wired page cannot be paged out,
* release its swap resources for others to use.
@@ -1216,27 +1069,20 @@ ReFault:
uvm_unlock_pageq();
- /*
- * done case 1! finish up by unlocking everything and returning success
- */
-
+ /* done case 1! finish up by unlocking everything and returning success */
uvmfault_unlockall(&ufi, amap, uobj, oanon);
pmap_update(ufi.orig_map->pmap);
return (0);
Case2:
- /*
- * handle case 2: faulting on backing object or zero fill
- */
-
+ /* handle case 2: faulting on backing object or zero fill */
/*
* note that uobjpage can not be PGO_DONTCARE at this point. we now
* set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
* have a backing object, check and see if we are going to promote
* the data up to an anon during the fault.
*/
-
if (uobj == NULL) {
uobjpage = PGO_DONTCARE;
promote = TRUE; /* always need anon here */
@@ -1254,7 +1100,6 @@ Case2:
* get the data for us. once we have the data, we need to reverify
* the state the world. we are currently not holding any resources.
*/
-
if (uobjpage) {
/* update rusage counters */
curproc->p_ru.ru_minflt++;
@@ -1271,10 +1116,7 @@ Case2:
0, access_type & MASK(ufi.entry), ufi.entry->advice,
PGO_SYNCIO);
- /*
- * recover from I/O
- */
-
+ /* recover from I/O */
if (result != VM_PAGER_OK) {
KASSERT(result != VM_PAGER_PEND);
@@ -1286,17 +1128,13 @@ Case2:
return (EACCES); /* XXX i/o error */
}
- /*
- * re-verify the state of the world.
- */
-
+ /* re-verify the state of the world. */
locked = uvmfault_relock(&ufi);
/*
* Re-verify that amap slot is still free. if there is
* a problem, we clean up.
*/
-
if (locked && amap && amap_lookup(&ufi.entry->aref,
ufi.orig_rvaddr - ufi.entry->start)) {
if (locked)
@@ -1304,10 +1142,7 @@ Case2:
locked = FALSE;
}
- /*
- * didn't get the lock? release the page and retry.
- */
-
+ /* didn't get the lock? release the page and retry. */
if (locked == FALSE) {
if (uobjpage->pg_flags & PG_WANTED)
/* still holding object lock */
@@ -1328,7 +1163,6 @@ Case2:
/*
* we have the data in uobjpage which is PG_BUSY
*/
-
}
/*
@@ -1336,9 +1170,7 @@ Case2:
* - at this point uobjpage can not be NULL
* - at this point uobjpage could be PG_WANTED (handle later)
*/
-
if (promote == FALSE) {
-
/*
* we are not promoting. if the mapping is COW ensure that we
* don't give more access than we should (e.g. when doing a read
@@ -1347,7 +1179,6 @@ Case2:
*
* set "pg" to the page we want to map in (uobjpage, usually)
*/
-
uvmexp.flt_obj++;
if (UVM_ET_ISCOPYONWRITE(ufi.entry))
enter_prot &= ~VM_PROT_WRITE;
@@ -1367,7 +1198,6 @@ Case2:
enter_prot = enter_prot & ~VM_PROT_WRITE;
} else {
/* write fault: must break the loan here */
-
/* alloc new un-owned page */
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@@ -1435,9 +1265,7 @@ Case2:
} /* write fault case */
} /* if loan_count */
-
} else {
-
/*
* if we are going to promote the data to an anon we
* allocate a blank anon here and plug it into our amap.
@@ -1463,10 +1291,7 @@ Case2:
* out of memory resources?
*/
if (anon == NULL || pg == NULL) {
-
- /*
- * arg! must unbusy our page and fail or sleep.
- */
+ /* arg! must unbusy our page and fail or sleep. */
if (uobjpage != PGO_DONTCARE) {
if (uobjpage->pg_flags & PG_WANTED)
wakeup(uobjpage);
@@ -1493,10 +1318,7 @@ Case2:
goto ReFault;
}
- /*
- * fill in the data
- */
-
+ /* fill in the data */
if (uobjpage != PGO_DONTCARE) {
uvmexp.flt_prcopy++;
/* copy page [pg now dirty] */
@@ -1510,10 +1332,7 @@ Case2:
pmap_page_protect(uobjpage, VM_PROT_NONE);
}
- /*
- * dispose of uobjpage. drop handle to uobj as well.
- */
-
+ /* dispose of uobjpage. drop handle to uobj as well. */
if (uobjpage->pg_flags & PG_WANTED)
wakeup(uobjpage);
atomic_clearbits_int(&uobjpage->pg_flags,
@@ -1535,19 +1354,14 @@ Case2:
anon, 0);
}
- /*
- * note: pg is either the uobjpage or the new page in the new anon
- */
-
+ /* note: pg is either the uobjpage or the new page in the new anon */
/*
* all resources are present. we can now map it in and free our
* resources.
*/
-
if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
!= 0) {
-
/*
* No need to undo what we did; we can simply think of
* this as the pmap throwing away the mapping information.
@@ -1555,7 +1369,6 @@ Case2:
* We do, however, have to go through the ReFault path,
* as the map may change while we're asleep.
*/
-
if (pg->pg_flags & PG_WANTED)
wakeup(pg);
@@ -1577,7 +1390,6 @@ Case2:
if (fault_type == VM_FAULT_WIRE) {
uvm_pagewire(pg);
if (pg->pg_flags & PQ_AOBJ) {
-
/*
* since the now-wired page cannot be paged out,
* release its swap resources for others to use.
@@ -1613,7 +1425,6 @@ Case2:
* be write-locked in uvm_fault() must be taken care of by
* the caller. See uvm_map_pageable().
*/
-
int
uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type)
{
@@ -1628,7 +1439,6 @@ uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type)
* to undo what we have done. note that in uvm_fault VM_PROT_NONE
* is replaced with the max protection if fault_type is VM_FAULT_WIRE.
*/
-
for (va = start ; va < end ; va += PAGE_SIZE) {
rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type);
if (rv) {
@@ -1645,7 +1455,6 @@ uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type)
/*
* uvm_fault_unwire(): unwire range of virtual space.
*/
-
void
uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end)
{
@@ -1660,7 +1469,6 @@ uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end)
*
* => map must be at least read-locked.
*/
-
void
uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
{
@@ -1678,12 +1486,9 @@ uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
* the PAs from the pmap. we also lock out the page daemon so that
* we can call uvm_pageunwire.
*/
-
uvm_lock_pageq();
- /*
- * find the beginning map entry for the region.
- */
+ /* find the beginning map entry for the region. */
KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
panic("uvm_fault_unwire_locked: address not in map");
@@ -1692,9 +1497,7 @@ uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
if (pmap_extract(pmap, va, &pa) == FALSE)
continue;
- /*
- * find the map entry for the current address.
- */
+ /* find the map entry for the current address. */
KASSERT(va >= entry->start);
while (va >= entry->end) {
next = RB_NEXT(uvm_map_addr, &map->addr, entry);
@@ -1702,9 +1505,7 @@ uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
entry = next;
}
- /*
- * if the entry is no longer wired, tell the pmap.
- */
+ /* if the entry is no longer wired, tell the pmap. */
if (VM_MAPENT_ISWIRED(entry) == 0)
pmap_unwire(pmap, va);
@@ -1726,7 +1527,6 @@ uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked)
* ufi can be NULL when this isn't really a fault,
* but merely paging in anon data.
*/
-
if (ufi == NULL) {
return;
}
@@ -1772,10 +1572,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
{
vm_map_t tmpmap;
- /*
- * init ufi values for lookup.
- */
-
+ /* init ufi values for lookup. */
ufi->map = ufi->orig_map;
ufi->size = ufi->orig_size;
@@ -1783,33 +1580,26 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
* keep going down levels until we are done. note that there can
* only be two levels so we won't loop very long.
*/
-
while (1) {
if (ufi->orig_rvaddr < ufi->map->min_offset ||
ufi->orig_rvaddr >= ufi->map->max_offset)
return(FALSE);
- /*
- * lock map
- */
+ /* lock map */
if (write_lock) {
vm_map_lock(ufi->map);
} else {
vm_map_lock_read(ufi->map);
}
- /*
- * lookup
- */
+ /* lookup */
if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
&ufi->entry)) {
uvmfault_unlockmaps(ufi, write_lock);
return(FALSE);
}
- /*
- * reduce size if necessary
- */
+ /* reduce size if necessary */
if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
ufi->size = ufi->entry->end - ufi->orig_rvaddr;
@@ -1824,15 +1614,11 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
continue;
}
- /*
- * got it!
- */
-
+ /* got it! */
ufi->mapv = ufi->map->timestamp;
return(TRUE);
- } /* while loop */
-
+ }
/*NOTREACHED*/
}
@@ -1849,7 +1635,6 @@ uvmfault_relock(struct uvm_faultinfo *ufi)
* ufi can be NULL when this isn't really a fault,
* but merely paging in anon data.
*/
-
if (ufi == NULL) {
return TRUE;
}
@@ -1860,7 +1645,6 @@ uvmfault_relock(struct uvm_faultinfo *ufi)
* relock map. fail if version mismatch (in which case nothing
* gets locked).
*/
-
vm_map_lock_read(ufi->map);
if (ufi->mapv != ufi->map->timestamp) {
vm_map_unlock_read(ufi->map);
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index 62c7d152527..ffb283f22c1 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_glue.c,v 1.61 2014/04/03 21:40:10 tedu Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.62 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */
/*
@@ -89,7 +89,6 @@
*
* - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
*/
-
boolean_t
uvm_kernacc(caddr_t addr, size_t len, int rw)
{
@@ -366,7 +365,6 @@ uvm_init_limits(struct proc *p)
* This causes any single, large process to start random page
* replacement once it fills memory.
*/
-
p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
diff --git a/sys/uvm/uvm_init.c b/sys/uvm/uvm_init.c
index e3ad18e233f..e5a8aee5cd5 100644
--- a/sys/uvm/uvm_init.c
+++ b/sys/uvm/uvm_init.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_init.c,v 1.31 2014/04/03 20:21:01 miod Exp $ */
+/* $OpenBSD: uvm_init.c,v 1.32 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_init.c,v 1.14 2000/06/27 17:29:23 mrg Exp $ */
/*
@@ -68,23 +68,17 @@ struct uvmexp uvmexp; /* decl */
/*
* uvm_init: init the VM system. called from kern/init_main.c.
*/
-
void
uvm_init(void)
{
vaddr_t kvm_start, kvm_end;
- /*
- * step 0: ensure that the hardware set the page size
- */
-
+ /* step 0: ensure that the hardware set the page size */
if (uvmexp.pagesize == 0) {
panic("uvm_init: page size not set");
}
- /*
- * step 1: set up stats.
- */
+ /* step 1: set up stats. */
averunnable.fscale = FSCALE;
/*
@@ -94,7 +88,6 @@ uvm_init(void)
* kvm_start and kvm_end will be set to the area of kernel virtual
* memory which is available for general use.
*/
-
uvm_page_init(&kvm_start, &kvm_end);
/*
@@ -102,7 +95,6 @@ uvm_init(void)
* vm_map_entry structures that are used for "special" kernel maps
* (e.g. kernel_map, kmem_map, etc...).
*/
-
uvm_map_init();
/*
@@ -116,21 +108,18 @@ uvm_init(void)
/*
* step 4.5: init (tune) the fault recovery code.
*/
-
uvmfault_init();
/*
* step 5: init the pmap module. the pmap module is free to allocate
* memory for its private use (e.g. pvlists).
*/
-
pmap_init();
/*
* step 6: init the kernel memory allocator. after this call the
* kernel memory allocator (malloc) can be used.
*/
-
kmeminit();
/*
@@ -141,14 +130,12 @@ uvm_init(void)
/*
* step 7: init all pagers and the pager_map.
*/
-
uvm_pager_init();
/*
* step 8: init anonymous memory system
*/
-
- amap_init(); /* init amap module */
+ amap_init();
/*
* step 9: init uvm_km_page allocator memory.
@@ -159,7 +146,6 @@ uvm_init(void)
* the VM system is now up! now that malloc is up we can
* enable paging of kernel objects.
*/
-
uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
UAO_FLAG_KERNSWAP);
diff --git a/sys/uvm/uvm_io.c b/sys/uvm/uvm_io.c
index bfeea500ace..4971b1eff4f 100644
--- a/sys/uvm/uvm_io.c
+++ b/sys/uvm/uvm_io.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_io.c,v 1.20 2012/03/09 13:01:29 ariane Exp $ */
+/* $OpenBSD: uvm_io.c,v 1.21 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_io.c,v 1.12 2000/06/27 17:29:23 mrg Exp $ */
/*
@@ -72,7 +72,6 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
* large chunk size. if we have trouble finding vm space we will
* reduce it.
*/
-
if (uio->uio_resid == 0)
return(0);
togo = uio->uio_resid;
@@ -97,16 +96,9 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
if (flags & UVM_IO_FIXPROT)
extractflags |= UVM_EXTRACT_FIXPROT;
- /*
- * step 1: main loop... while we've got data to move
- */
-
+ /* step 1: main loop... while we've got data to move */
for (/*null*/; togo > 0 ; pageoffset = 0) {
-
- /*
- * step 2: extract mappings from the map into kernel_map
- */
-
+ /* step 2: extract mappings from the map into kernel_map */
error = uvm_map_extract(map, baseva, chunksz, &kva,
extractflags);
if (error) {
@@ -122,10 +114,7 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
break;
}
- /*
- * step 3: move a chunk of data
- */
-
+ /* step 3: move a chunk of data */
sz = chunksz - pageoffset;
if (sz > togo)
sz = togo;
@@ -133,11 +122,7 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
togo -= sz;
baseva += chunksz;
-
- /*
- * step 4: unmap the area of kernel memory
- */
-
+ /* step 4: unmap the area of kernel memory */
vm_map_lock(kernel_map);
TAILQ_INIT(&dead_entries);
uvm_unmap_remove(kernel_map, kva, kva+chunksz,
@@ -153,9 +138,5 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
break;
}
- /*
- * done
- */
-
return (error);
}
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 33c61e74b00..7a810b1e72f 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.111 2013/05/30 18:02:04 tedu Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.112 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -152,7 +152,6 @@ struct uvm_constraint_range no_constraint = { 0x0, (paddr_t)-1 };
/*
* local data structues
*/
-
static struct vm_map kernel_map_store;
/*
@@ -163,15 +162,12 @@ static struct vm_map kernel_map_store;
* we assume that [min -> start] has already been allocated and that
* "end" is the end.
*/
-
void
uvm_km_init(vaddr_t start, vaddr_t end)
{
vaddr_t base = VM_MIN_KERNEL_ADDRESS;
- /*
- * next, init kernel memory objects.
- */
+ /* next, init kernel memory objects. */
/* kernel_object: for pageable anonymous kernel memory */
uao_init();
@@ -196,10 +192,6 @@ uvm_km_init(vaddr_t start, vaddr_t end)
UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0)
panic("uvm_km_init: could not reserve space for kernel");
- /*
- * install!
- */
-
kernel_map = &kernel_map_store;
}
@@ -221,26 +213,17 @@ uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
size = round_page(size); /* round up to pagesize */
- /*
- * first allocate a blank spot in the parent map
- */
-
+ /* first allocate a blank spot in the parent map */
if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
UVM_ADV_RANDOM, mapflags)) != 0) {
panic("uvm_km_suballoc: unable to allocate space in parent map");
}
- /*
- * set VM bounds (min is filled in by uvm_map)
- */
-
+ /* set VM bounds (min is filled in by uvm_map) */
*max = *min + size;
- /*
- * add references to pmap and create or init the submap
- */
-
+ /* add references to pmap and create or init the submap */
pmap_reference(vm_map_pmap(map));
if (submap == NULL) {
submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
@@ -251,10 +234,7 @@ uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
submap->pmap = vm_map_pmap(map);
}
- /*
- * now let uvm_map_submap plug in it...
- */
-
+ /* now let uvm_map_submap plug in it... */
if (uvm_map_submap(map, *min, *max, submap) != 0)
panic("uvm_km_suballoc: submap allocation failed");
@@ -309,7 +289,6 @@ uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end)
* be on the active or inactive queues (because these objects are
* never allowed to "page").
*/
-
void
uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
{
@@ -341,7 +320,6 @@ uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
* to uvm_pglistalloc
* => flags: ZERO - correspond to uvm_pglistalloc flags
*/
-
vaddr_t
uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
vsize_t valign, int flags, paddr_t low, paddr_t high, paddr_t alignment,
@@ -358,37 +336,25 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
KASSERT(!(flags & UVM_KMF_VALLOC) ||
!(flags & UVM_KMF_ZERO));
- /*
- * setup for call
- */
-
+ /* setup for call */
size = round_page(size);
kva = vm_map_min(map); /* hint */
if (nsegs == 0)
nsegs = atop(size);
- /*
- * allocate some virtual space
- */
-
+ /* allocate some virtual space */
if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
valign, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) {
return(0);
}
- /*
- * if all we wanted was VA, return now
- */
-
+ /* if all we wanted was VA, return now */
if (flags & UVM_KMF_VALLOC) {
return(kva);
}
- /*
- * recover object offset from virtual address
- */
-
+ /* recover object offset from virtual address */
if (obj != NULL)
offset = kva - vm_map_min(kernel_map);
else
@@ -428,7 +394,6 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
* map it in: note that we call pmap_enter with the map and
* object unlocked in case we are kmem_map.
*/
-
if (obj == NULL) {
pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
UVM_PROT_RW);
@@ -449,7 +414,6 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
/*
* uvm_km_free: free an area of kernel memory
*/
-
void
uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
{
@@ -462,7 +426,6 @@ uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
*
* => XXX: "wanted" bit + unlock&wait on other end?
*/
-
void
uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, vsize_t size)
{
@@ -483,7 +446,6 @@ uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, vsize_t size)
*
* => we can sleep if needed
*/
-
vaddr_t
uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
{
@@ -496,26 +458,17 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
size = round_page(size);
kva = vm_map_min(map); /* hint */
- /*
- * allocate some virtual space
- */
-
+ /* allocate some virtual space */
if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
UVM_INH_NONE, UVM_ADV_RANDOM, 0)) != 0)) {
return(0);
}
- /*
- * recover object offset from virtual address
- */
-
+ /* recover object offset from virtual address */
offset = kva - vm_map_min(kernel_map);
- /*
- * now allocate the memory. we must be careful about released pages.
- */
-
+ /* now allocate the memory. we must be careful about released pages. */
loopva = kva;
while (size) {
/* allocate ram */
@@ -556,7 +509,6 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
* zero on request (note that "size" is now zero due to the above loop
* so we need to subtract kva from loopva to reconstruct the size).
*/
-
if (zeroit)
memset((caddr_t)kva, 0, loopva - kva);
@@ -591,9 +543,7 @@ uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags)
size = round_page(size);
kva = vm_map_min(map); /* hint */
- /*
- * allocate some virtual space. will be demand filled by kernel_object.
- */
+ /* allocate some virtual space, demand filled by kernel_object. */
if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
@@ -611,7 +561,6 @@ uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags)
* => if no room in map, wait for space to free, unless requested size
* is larger than map (in which case we return 0)
*/
-
vaddr_t
uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
{
@@ -630,17 +579,13 @@ uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
* allocate some virtual space. will be demand filled
* by kernel_object.
*/
-
if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,
UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) == 0)) {
return(kva);
}
- /*
- * failed. sleep for a while (on map)
- */
-
+ /* failed. sleep for a while (on map) */
tsleep(map, PVM, "vallocwait", 0);
}
/*NOTREACHED*/
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index a6bb8bd6c08..5d668e16c4e 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.166 2014/04/10 01:40:04 tedu Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.167 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -200,7 +200,6 @@ void uvm_mapent_forkcopy(struct vmspace*, struct vm_map*,
/*
* Tree validation.
*/
-
#ifdef VMMAP_DEBUG
void uvm_tree_assert(struct vm_map*, int, char*,
char*, int);
@@ -615,9 +614,7 @@ uvm_map_sel_limits(vaddr_t *min, vaddr_t *max, vsize_t sz, int guardpg,
if (sel_min > sel_max)
return ENOMEM;
- /*
- * Correct for bias.
- */
+ /* Correct for bias. */
if (sel_max - sel_min > FSPACE_BIASGAP) {
if (bias > 0) {
sel_min = sel_max - FSPACE_BIASGAP;
@@ -821,9 +818,7 @@ uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr,
} else
KASSERT(*end_ptr == uvm_map_entrybyaddr(atree, addr + sz - 1));
- /*
- * Validation.
- */
+ /* Validation. */
KDASSERT(*start_ptr != NULL && *end_ptr != NULL);
KDASSERT((*start_ptr)->start <= addr &&
VMMAP_FREE_END(*start_ptr) > addr &&
@@ -885,9 +880,7 @@ uvm_map_findspace(struct vm_map *map, struct vm_map_entry**first,
return 0;
}
- /*
- * Fall back to brk() and stack() address selectors.
- */
+ /* Fall back to brk() and stack() address selectors. */
uaddr = map->uaddr_brk_stack;
if (uvm_addr_invoke(map, uaddr, first, last,
addr, sz, pmap_align, pmap_offset, prot, hint) == 0)
@@ -990,21 +983,15 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
if (align == 0 ||
(align <= pmap_align && (pmap_offset & (align - 1)) == 0)) {
- /*
- * pmap_offset satisfies align, no change.
- */
+ /* pmap_offset satisfies align, no change. */
} else {
- /*
- * Align takes precedence over pmap prefer.
- */
+ /* Align takes precedence over pmap prefer. */
pmap_align = align;
pmap_offset = 0;
}
}
- /*
- * Decode parameters.
- */
+ /* Decode parameters. */
prot = UVM_PROTECTION(flags);
maxprot = UVM_MAXPROTECTION(flags);
advice = UVM_ADVICE(flags);
@@ -1015,23 +1002,17 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
KASSERT((sz & (vaddr_t)PAGE_MASK) == 0);
KASSERT((align & (align - 1)) == 0);
- /*
- * Holes are incompatible with other types of mappings.
- */
+ /* Holes are incompatible with other types of mappings. */
if (flags & UVM_FLAG_HOLE) {
KASSERT(uobj == NULL && (flags & UVM_FLAG_FIXED) &&
(flags & (UVM_FLAG_OVERLAY | UVM_FLAG_COPYONW)) == 0);
}
- /*
- * Unset hint for kernel_map non-fixed allocations.
- */
+ /* Unset hint for kernel_map non-fixed allocations. */
if (!(map->flags & VM_MAP_ISVMSPACE) && !(flags & UVM_FLAG_FIXED))
hint = 0;
- /*
- * Check protection.
- */
+ /* Check protection. */
if ((prot & maxprot) != prot)
return EACCES;
@@ -1072,9 +1053,7 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
*addr + sz - uvm_maxkaddr, flags);
}
- /*
- * Check that the space is available.
- */
+ /* Check that the space is available. */
if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
error = ENOMEM;
goto unlock;
@@ -1091,15 +1070,11 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
*/
} else if ((maxprot & VM_PROT_EXECUTE) != 0 &&
map->uaddr_exe != NULL) {
- /*
- * Run selection algorithm for executables.
- */
+ /* Run selection algorithm for executables. */
error = uvm_addr_invoke(map, map->uaddr_exe, &first, &last,
addr, sz, pmap_align, pmap_offset, prot, hint);
- /*
- * Grow kernel memory and try again.
- */
+ /* Grow kernel memory and try again. */
if (error != 0 && (map->flags & VM_MAP_ISVMSPACE) == 0) {
uvm_map_kmem_grow(map, &dead, sz, flags);
@@ -1111,18 +1086,14 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
if (error != 0)
goto unlock;
} else {
- /*
- * Update freelists from vmspace.
- */
+ /* Update freelists from vmspace. */
if (map->flags & VM_MAP_ISVMSPACE)
uvm_map_vmspace_update(map, &dead, flags);
error = uvm_map_findspace(map, &first, &last, addr, sz,
pmap_align, pmap_offset, prot, hint);
- /*
- * Grow kernel memory and try again.
- */
+ /* Grow kernel memory and try again. */
if (error != 0 && (map->flags & VM_MAP_ISVMSPACE) == 0) {
uvm_map_kmem_grow(map, &dead, sz, flags);
@@ -1137,9 +1108,7 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
KASSERT((map->flags & VM_MAP_ISVMSPACE) == VM_MAP_ISVMSPACE ||
uvm_maxkaddr >= *addr + sz);
- /*
- * If we only want a query, return now.
- */
+ /* If we only want a query, return now. */
if (flags & UVM_FLAG_QUERY) {
error = 0;
goto unlock;
@@ -1187,9 +1156,7 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
M_WAITOK);
}
- /*
- * Update map and process statistics.
- */
+ /* Update map and process statistics. */
if (!(flags & UVM_FLAG_HOLE)) {
map->size += sz;
if ((map->flags & VM_MAP_ISVMSPACE) && uobj == NULL) {
@@ -1235,36 +1202,26 @@ uvm_mapent_isjoinable(struct vm_map *map, struct vm_map_entry *e1,
{
KDASSERT(e1 != NULL && e2 != NULL);
- /*
- * Must be the same entry type and not have free memory between.
- */
+ /* Must be the same entry type and not have free memory between. */
if (e1->etype != e2->etype || e1->end != e2->start)
return 0;
- /*
- * Submaps are never joined.
- */
+ /* Submaps are never joined. */
if (UVM_ET_ISSUBMAP(e1))
return 0;
- /*
- * Never merge wired memory.
- */
+ /* Never merge wired memory. */
if (VM_MAPENT_ISWIRED(e1) || VM_MAPENT_ISWIRED(e2))
return 0;
- /*
- * Protection, inheritance and advice must be equal.
- */
+ /* Protection, inheritance and advice must be equal. */
if (e1->protection != e2->protection ||
e1->max_protection != e2->max_protection ||
e1->inheritance != e2->inheritance ||
e1->advice != e2->advice)
return 0;
- /*
- * If uvm_object: objects itself and offsets within object must match.
- */
+ /* If uvm_object: object itself and offsets within object must match. */
if (UVM_ET_ISOBJ(e1)) {
if (e1->object.uvm_obj != e2->object.uvm_obj)
return 0;
@@ -1283,9 +1240,7 @@ uvm_mapent_isjoinable(struct vm_map *map, struct vm_map_entry *e1,
if (e2->aref.ar_amap && amap_refs(e2->aref.ar_amap) != 1)
return 0;
- /*
- * Apprently, e1 and e2 match.
- */
+ /* Apprently, e1 and e2 match. */
return 1;
}
@@ -1315,7 +1270,6 @@ uvm_mapent_merge(struct vm_map *map, struct vm_map_entry *e1,
* Don't drop obj reference:
* uvm_unmap_detach will do this for us.
*/
-
free = uvm_map_uaddr_e(map, e1);
uvm_mapent_free_remove(map, free, e1);
@@ -1345,9 +1299,7 @@ uvm_mapent_tryjoin(struct vm_map *map, struct vm_map_entry *entry,
struct vm_map_entry *other;
struct vm_map_entry *merged;
- /*
- * Merge with previous entry.
- */
+ /* Merge with previous entry. */
other = RB_PREV(uvm_map_addr, &map->addr, entry);
if (other && uvm_mapent_isjoinable(map, other, entry)) {
merged = uvm_mapent_merge(map, other, entry, dead);
@@ -1387,18 +1339,14 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
while ((entry = TAILQ_FIRST(deadq)) != NULL) {
if (waitok)
uvm_pause();
- /*
- * Drop reference to amap, if we've got one.
- */
+ /* Drop reference to amap, if we've got one. */
if (entry->aref.ar_amap)
amap_unref(entry->aref.ar_amap,
entry->aref.ar_pageoff,
atop(entry->end - entry->start),
flags & AMAP_REFALL);
- /*
- * Drop reference to our backing object, if we've got one.
- */
+ /* Drop reference to our backing object, if we've got one. */
if (UVM_ET_ISSUBMAP(entry)) {
/* ... unlikely to happen, but play it safe */
uvm_map_deallocate(entry->object.sub_map);
@@ -1408,9 +1356,7 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
entry->object.uvm_obj);
}
- /*
- * Step to next.
- */
+ /* Step to next. */
TAILQ_REMOVE(deadq, entry, dfree.deadq);
uvm_mapent_free(entry);
}
@@ -1445,9 +1391,7 @@ uvm_map_mkentry(struct vm_map *map, struct vm_map_entry *first,
min = addr + sz;
max = VMMAP_FREE_END(last);
- /*
- * Initialize new entry.
- */
+ /* Initialize new entry. */
if (new == NULL)
entry = uvm_mapent_alloc(map, flags);
else
@@ -1465,9 +1409,7 @@ uvm_map_mkentry(struct vm_map *map, struct vm_map_entry *first,
entry->guard = 0;
entry->fspace = 0;
- /*
- * Reset free space in first.
- */
+ /* Reset free space in first. */
free = uvm_map_uaddr_e(map, first);
uvm_mapent_free_remove(map, free, first);
first->guard = 0;
@@ -1486,9 +1428,7 @@ uvm_map_mkentry(struct vm_map *map, struct vm_map_entry *first,
uvm_mapent_addr_remove(map, last);
DEAD_ENTRY_PUSH(dead, last);
}
- /*
- * Remove first if it is entirely inside <addr, addr+sz>.
- */
+ /* Remove first if it is entirely inside <addr, addr+sz>. */
if (first->start == addr) {
uvm_mapent_addr_remove(map, first);
DEAD_ENTRY_PUSH(dead, first);
@@ -1497,9 +1437,7 @@ uvm_map_mkentry(struct vm_map *map, struct vm_map_entry *first,
addr, flags);
}
- /*
- * Finally, link in entry.
- */
+ /* Finally, link in entry. */
uvm_mapent_addr_insert(map, entry);
uvm_map_fix_space(map, entry, min, max, flags);
@@ -1689,9 +1627,8 @@ uvm_mapent_mkfree(struct vm_map *map, struct vm_map_entry *entry,
if (prev == NULL ||
VMMAP_FREE_END(prev) != entry->start)
prev = RB_PREV(uvm_map_addr, &map->addr, entry);
- /*
- * Entry is describing only free memory and has nothing to drain into.
- */
+
+ /* Entry is describing only free memory and has nothing to drain into. */
if (prev == NULL && entry->start == entry->end && markfree) {
*prev_ptr = entry;
return;
@@ -1719,21 +1656,15 @@ uvm_mapent_mkfree(struct vm_map *map, struct vm_map_entry *entry,
void
uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
{
- /*
- * Unwire removed map entry.
- */
+ /* Unwire removed map entry. */
if (VM_MAPENT_ISWIRED(entry)) {
entry->wired_count = 0;
uvm_fault_unwire_locked(map, entry->start, entry->end);
}
- /*
- * Entry-type specific code.
- */
+ /* Entry-type specific code. */
if (UVM_ET_ISHOLE(entry)) {
- /*
- * Nothing to be done for holes.
- */
+ /* Nothing to be done for holes. */
} else if (map->flags & VM_MAP_INTRSAFE) {
KASSERT(vm_map_pmap(map) == pmap_kernel());
uvm_km_pgremove_intrsafe(entry->start, entry->end);
@@ -1741,7 +1672,6 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
} else if (UVM_ET_ISOBJ(entry) &&
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
KASSERT(vm_map_pmap(map) == pmap_kernel());
-
/*
* Note: kernel object mappings are currently used in
* two ways:
@@ -1771,7 +1701,6 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
* note there is version of uvm_km_pgremove() that
* is used for "intrsafe" objects.
*/
-
/*
* remove mappings from pmap and drop the pages
* from the object. offsets are always relative
@@ -1789,9 +1718,7 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
entry->etype &= ~UVM_ET_OBJ;
entry->object.uvm_obj = NULL; /* to be safe */
} else {
- /*
- * remove mappings the standard way.
- */
+ /* remove mappings the standard way. */
pmap_remove(map->pmap, entry->start, entry->end);
}
}
@@ -1820,9 +1747,7 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
else
splassert(IPL_VM);
- /*
- * Find first affected entry.
- */
+ /* Find first affected entry. */
entry = uvm_map_entrybyaddr(&map->addr, start);
KDASSERT(entry != NULL && entry->start <= start);
if (entry->end <= start && markfree)
@@ -1853,9 +1778,7 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
/* Kill entry. */
uvm_unmap_kill_entry(map, entry);
- /*
- * Update space usage.
- */
+ /* Update space usage. */
if ((map->flags & VM_MAP_ISVMSPACE) &&
entry->object.uvm_obj == NULL &&
!UVM_ET_ISHOLE(entry)) {
@@ -1865,9 +1788,7 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
if (!UVM_ET_ISHOLE(entry))
map->size -= entry->end - entry->start;
- /*
- * Actual removal of entry.
- */
+ /* Actual removal of entry. */
uvm_mapent_mkfree(map, entry, &prev_hint, dead, markfree);
}
@@ -2020,9 +1941,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
}
}
- /*
- * decrease counter in the rest of the entries
- */
+ /* decrease counter in the rest of the entries */
for (; iter != end;
iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) {
if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
@@ -2037,9 +1956,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
return error;
}
- /*
- * We are currently holding a read lock.
- */
+ /* We are currently holding a read lock. */
if ((lockflags & UVM_LK_EXIT) == 0) {
vm_map_unbusy(map);
vm_map_unlock_read(map);
@@ -2102,9 +2019,7 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
goto out;
}
- /*
- * Check that the range has no holes.
- */
+ /* Check that the range has no holes. */
for (last = first; last != NULL && last->start < end;
last = RB_NEXT(uvm_map_addr, &map->addr, last)) {
if (UVM_ET_ISHOLE(last) ||
@@ -2135,9 +2050,7 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
last = RB_PREV(uvm_map_addr, &map->addr, last);
}
- /*
- * Wire/unwire pages here.
- */
+ /* Wire/unwire pages here. */
if (new_pageable) {
/*
* Mark pageable.
@@ -2294,9 +2207,7 @@ uvm_map_setup(struct vm_map *map, vaddr_t min, vaddr_t max, int flags)
map->timestamp = 0;
rw_init(&map->lock, "vmmaplk");
- /*
- * Configure the allocators.
- */
+ /* Configure the allocators. */
if (flags & VM_MAP_ISVMSPACE)
uvm_map_setup_md(map);
else
@@ -2339,9 +2250,7 @@ uvm_map_teardown(struct vm_map *map)
panic("uvm_map_teardown: rw_enter failed on free map");
}
- /*
- * Remove address selectors.
- */
+ /* Remove address selectors. */
uvm_addr_destroy(map->uaddr_exe);
map->uaddr_exe = NULL;
for (i = 0; i < nitems(map->uaddr_any); i++) {
@@ -2539,21 +2448,16 @@ uvm_tree_sanity(struct vm_map *map, char *file, int line)
*/
UVM_ASSERT(map, iter->end >= iter->start, file, line);
UVM_ASSERT(map, VMMAP_FREE_END(iter) >= iter->end, file, line);
- /*
- * May not be empty.
- */
+
+ /* May not be empty. */
UVM_ASSERT(map, iter->start < VMMAP_FREE_END(iter),
file, line);
- /*
- * Addresses for entry must lie within map boundaries.
- */
+ /* Addresses for entry must lie within map boundaries. */
UVM_ASSERT(map, iter->start >= vm_map_min(map) &&
VMMAP_FREE_END(iter) <= vm_map_max(map), file, line);
- /*
- * Tree may not have gaps.
- */
+ /* Tree may not have gaps. */
UVM_ASSERT(map, iter->start == addr, file, line);
addr = VMMAP_FREE_END(iter);
@@ -2677,10 +2581,7 @@ uvm_map_init(void)
static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
int lcv;
- /*
- * now set up static pool of kernel map entries ...
- */
-
+ /* now set up static pool of kernel map entries ... */
uvm.kentry_free = NULL;
for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
RB_LEFT(&kernel_map_entry[lcv], daddrs.addr_entry) =
@@ -2688,9 +2589,7 @@ uvm_map_init(void)
uvm.kentry_free = &kernel_map_entry[lcv];
}
- /*
- * initialize the map-related pools.
- */
+ /* initialize the map-related pools. */
pool_init(&uvm_vmspace_pool, sizeof(struct vmspace),
0, 0, 0, "vmsppl", &pool_allocator_nointr);
pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry),
@@ -2737,9 +2636,7 @@ uvm_map_printit(struct vm_map *map, boolean_t full,
(*pr)("\tpmap=%p(resident=<<NOT SUPPORTED!!!>>)\n", map->pmap);
#endif
- /*
- * struct vmspace handling.
- */
+ /* struct vmspace handling. */
if (map->flags & VM_MAP_ISVMSPACE) {
vm = (struct vmspace *)map;
@@ -2959,9 +2856,7 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
if (first->end < start)
first = RB_NEXT(uvm_map_addr, &map->addr, first);
- /*
- * First, check for protection violations.
- */
+ /* First, check for protection violations. */
for (iter = first; iter != NULL && iter->start < end;
iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) {
/* Treat memory holes as free space. */
@@ -2978,9 +2873,7 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
}
}
- /*
- * Fix protections.
- */
+ /* Fix protections. */
for (iter = first; iter != NULL && iter->start < end;
iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) {
/* Treat memory holes as free space. */
@@ -3153,10 +3046,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
pmap_unuse_final(p); /* before stack addresses go away */
TAILQ_INIT(&dead_entries);
- /*
- * see if more than one process is using this vmspace...
- */
-
+ /* see if more than one process is using this vmspace... */
if (ovm->vm_refcnt == 1) {
/*
* if p is the only process using its vmspace then we can safely
@@ -3193,9 +3083,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
KDASSERT(RB_EMPTY(&map->addr));
- /*
- * Nuke statistics and boundaries.
- */
+ /* Nuke statistics and boundaries. */
bzero(&ovm->vm_startcopy,
(caddr_t) (ovm + 1) - (caddr_t) &ovm->vm_startcopy);
@@ -3206,21 +3094,15 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
end -= PAGE_SIZE;
}
- /*
- * Setup new boundaries and populate map with entries.
- */
+ /* Setup new boundaries and populate map with entries. */
map->min_offset = start;
map->max_offset = end;
uvm_map_setup_entries(map);
vm_map_unlock(map);
- /*
- * but keep MMU holes unavailable
- */
+ /* but keep MMU holes unavailable */
pmap_remove_holes(map);
-
} else {
-
/*
* p's vmspace is being shared, so we can't reuse it for p since
* it is still being used for others. allocate a new vmspace
@@ -3229,10 +3111,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
nvm = uvmspace_alloc(start, end,
(map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE, TRUE);
- /*
- * install new vmspace and drop our ref to the old one.
- */
-
+ /* install new vmspace and drop our ref to the old one. */
pmap_deactivate(p);
p->p_vmspace = nvm;
pmap_activate(p);
@@ -3240,9 +3119,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
uvmspace_free(ovm);
}
- /*
- * Release dead entries
- */
+ /* Release dead entries */
uvm_unmap_detach(&dead_entries, 0);
}
@@ -3251,7 +3128,6 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
*
* - XXX: no locking on vmspace
*/
-
void
uvmspace_free(struct vmspace *vm)
{
@@ -3288,10 +3164,7 @@ uvm_mapent_clone(struct vm_map *dstmap, vaddr_t dstaddr, vsize_t dstlen,
KDASSERT(!UVM_ET_ISSUBMAP(old_entry));
- /*
- * Create new entry (linked in on creation).
- * Fill in first, last.
- */
+ /* Create new entry (linked in on creation). Fill in first, last. */
first = last = NULL;
if (!uvm_map_isavail(dstmap, NULL, &first, &last, dstaddr, dstlen)) {
panic("uvmspace_fork: no space in map for "
@@ -3311,10 +3184,7 @@ uvm_mapent_clone(struct vm_map *dstmap, vaddr_t dstaddr, vsize_t dstlen,
new_entry->inheritance = old_entry->inheritance;
new_entry->advice = old_entry->advice;
- /*
- * gain reference to object backing the map (can't
- * be a submap).
- */
+ /* gain reference to object backing the map (can't be a submap). */
if (new_entry->aref.ar_amap) {
new_entry->aref.ar_pageoff += off >> PAGE_SHIFT;
amap_ref(new_entry->aref.ar_amap, new_entry->aref.ar_pageoff,
@@ -3370,9 +3240,7 @@ uvm_mapent_forkshared(struct vmspace *new_vm, struct vm_map *new_map,
pmap_copy(new_map->pmap, old_map->pmap, new_entry->start,
(new_entry->end - new_entry->start), new_entry->start);
- /*
- * Update process statistics.
- */
+ /* Update process statistics. */
if (!UVM_ET_ISHOLE(new_entry))
new_map->size += new_entry->end - new_entry->start;
if (!UVM_ET_ISOBJ(new_entry) && !UVM_ET_ISHOLE(new_entry)) {
@@ -3434,7 +3302,6 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
* clear it here as well.
*
*/
-
if (old_entry->aref.ar_amap != NULL &&
((amap_flags(old_entry->aref.ar_amap) &
AMAP_SHARED) != 0 ||
@@ -3454,9 +3321,7 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
* amap_cow_now. note that we have already
* allocated any needed amap (above).
*/
-
if (VM_MAPENT_ISWIRED(old_entry)) {
-
/*
* resolve all copy-on-write faults now
* (note that there is nothing to do if
@@ -3466,10 +3331,8 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
*/
if (old_entry->aref.ar_amap)
amap_cow_now(new_map, new_entry);
-
} else {
if (old_entry->aref.ar_amap) {
-
/*
* setup mappings to trigger copy-on-write faults
* we must write-protect the parent if it has
@@ -3499,12 +3362,9 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
old_entry->etype |= UVM_ET_NEEDSCOPY;
}
- /*
- * parent must now be write-protected
- */
+ /* parent must now be write-protected */
protect_child = FALSE;
} else {
-
/*
* we only need to protect the child if the
* parent has write access.
@@ -3513,23 +3373,18 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
protect_child = TRUE;
else
protect_child = FALSE;
-
}
-
/*
* copy the mappings
* XXX: need a way to tell if this does anything
*/
-
if (!UVM_ET_ISHOLE(new_entry))
pmap_copy(new_map->pmap, old_map->pmap,
new_entry->start,
(old_entry->end - old_entry->start),
old_entry->start);
- /*
- * protect the child's mappings if necessary
- */
+ /* protect the child's mappings if necessary */
if (protect_child) {
pmap_protect(new_map->pmap, new_entry->start,
new_entry->end,
@@ -3538,9 +3393,7 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
}
}
- /*
- * Update process statistics.
- */
+ /* Update process statistics. */
if (!UVM_ET_ISHOLE(new_entry))
new_map->size += new_entry->end - new_entry->start;
if (!UVM_ET_ISOBJ(new_entry) && !UVM_ET_ISHOLE(new_entry)) {
@@ -3574,18 +3427,13 @@ uvmspace_fork(struct vmspace *vm1)
new_map = &vm2->vm_map;
vm_map_lock(new_map);
- /*
- * go entry-by-entry
- */
-
+ /* go entry-by-entry */
TAILQ_INIT(&dead);
RB_FOREACH(old_entry, uvm_map_addr, &old_map->addr) {
if (old_entry->start == old_entry->end)
continue;
- /*
- * first, some sanity checks on the old entry
- */
+ /* first, some sanity checks on the old entry */
if (UVM_ET_ISSUBMAP(old_entry)) {
panic("fork: encountered a submap during fork "
"(illegal)");
@@ -3597,9 +3445,7 @@ uvmspace_fork(struct vmspace *vm1)
"needs_copy (illegal)");
}
- /*
- * Apply inheritance.
- */
+ /* Apply inheritance. */
if (old_entry->inheritance == MAP_INHERIT_SHARE) {
uvm_mapent_forkshared(vm2, new_map,
old_map, old_entry, &dead);
@@ -3744,16 +3590,12 @@ uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
for (entry = uvm_map_entrybyaddr(&map->addr, start);
entry != NULL && entry->start < end;
entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) {
- /*
- * Fail if a hole is found.
- */
+ /* Fail if a hole is found. */
if (UVM_ET_ISHOLE(entry) ||
(entry->end < end && entry->end != VMMAP_FREE_END(entry)))
return FALSE;
- /*
- * Check protection.
- */
+ /* Check protection. */
if ((entry->protection & protection) != protection)
return FALSE;
}
@@ -3796,7 +3638,6 @@ uvm_map_deallocate(vm_map_t map)
*
* No lock required: we are only one to access this map.
*/
-
TAILQ_INIT(&dead);
uvm_tree_sanity(map, __FILE__, __LINE__);
uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
@@ -3892,7 +3733,6 @@ uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
/*
* XXXJRT: disallow holes?
*/
-
while (entry != NULL && entry->start < end) {
UVM_MAP_CLIP_END(map, entry, end);
entry->advice = new_advice;
@@ -3935,34 +3775,23 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
* Also, since the mapping may not contain gaps, error out if the
* mapped area is not in source map.
*/
-
if ((start & (vaddr_t)PAGE_MASK) != 0 ||
(end & (vaddr_t)PAGE_MASK) != 0 || end < start)
return EINVAL;
if (start < srcmap->min_offset || end > srcmap->max_offset)
return EINVAL;
- /*
- * Initialize dead entries.
- * Handle len == 0 case.
- */
-
+ /* Initialize dead entries. Handle len == 0 case. */
if (len == 0)
return 0;
- /*
- * Acquire lock on srcmap.
- */
+ /* Acquire lock on srcmap. */
vm_map_lock(srcmap);
- /*
- * Lock srcmap, lookup first and last entry in <start,len>.
- */
+ /* Lock srcmap, lookup first and last entry in <start,len>. */
first = uvm_map_entrybyaddr(&srcmap->addr, start);
- /*
- * Check that the range is contiguous.
- */
+ /* Check that the range is contiguous. */
for (entry = first; entry != NULL && entry->end < end;
entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) {
if (VMMAP_FREE_END(entry) != entry->end ||
@@ -3996,9 +3825,7 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
}
}
- /*
- * Lock destination map (kernel_map).
- */
+ /* Lock destination map (kernel_map). */
vm_map_lock(kernel_map);
if (uvm_map_findspace(kernel_map, &tmp1, &tmp2, &dstaddr, len,
@@ -4013,19 +3840,14 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
* We now have srcmap and kernel_map locked.
* dstaddr contains the destination offset in dstmap.
*/
-
- /*
- * step 1: start looping through map entries, performing extraction.
- */
+ /* step 1: start looping through map entries, performing extraction. */
for (entry = first; entry != NULL && entry->start < end;
entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) {
KDASSERT(!UVM_ET_ISNEEDSCOPY(entry));
if (UVM_ET_ISHOLE(entry))
continue;
- /*
- * Calculate uvm_mapent_clone parameters.
- */
+ /* Calculate uvm_mapent_clone parameters. */
cp_start = entry->start;
if (cp_start < start) {
cp_off = start - cp_start;
@@ -4056,18 +3878,14 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
error = 0;
- /*
- * Unmap copied entries on failure.
- */
+ /* Unmap copied entries on failure. */
fail2_unmap:
if (error) {
uvm_unmap_remove(kernel_map, dstaddr, dstaddr + len, &dead,
FALSE, TRUE);
}
- /*
- * Release maps, release dead entries.
- */
+ /* Release maps, release dead entries. */
fail2:
vm_map_unlock(kernel_map);
@@ -4094,7 +3912,6 @@ fail:
* => caller must not write-lock map (read OK).
* => we may sleep while cleaning if SYNCIO [with map read-locked]
*/
-
int amap_clean_works = 1; /* XXX for now, just in case... */
int
@@ -4119,9 +3936,7 @@ uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
vm_map_lock_read(map);
first = uvm_map_entrybyaddr(&map->addr, start);
- /*
- * Make a first pass to check for holes.
- */
+ /* Make a first pass to check for holes. */
for (entry = first; entry->start < end;
entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) {
if (UVM_ET_ISSUBMAP(entry)) {
@@ -4209,9 +4024,7 @@ deactivate_it:
uvm_unlock_pageq();
break;
-
case PGO_FREE:
-
/*
* If there are mutliple references to
* the amap, just deactivate the page.
@@ -4229,7 +4042,6 @@ deactivate_it:
if (refs == 0)
uvm_anfree(anon);
break;
-
default:
panic("uvm_map_clean: weird flags");
}
@@ -4274,9 +4086,7 @@ uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr);
tmp = uvm_mapent_alloc(map, 0);
- /*
- * Invoke splitentry.
- */
+ /* Invoke splitentry. */
uvm_map_splitentry(map, entry, tmp, addr);
}
@@ -4507,9 +4317,7 @@ uvm_map_kmem_grow(struct vm_map *map, struct uvm_map_deadq *dead,
/* Destroy free list. */
uvm_map_freelist_update_clear(map, dead);
- /*
- * Include the guard page in the hard minimum requirement of alloc_sz.
- */
+ /* Include the guard page in the hard minimum requirement of alloc_sz. */
if (map->flags & VM_MAP_GUARDPAGES)
alloc_sz += PAGE_SIZE;
@@ -4669,9 +4477,7 @@ uvm_map_fix_space(struct vm_map *map, struct vm_map_entry *entry,
uvm_map_uaddr_e(map, entry));
while (min != max) {
- /*
- * Claim guard page for entry.
- */
+ /* Claim guard page for entry. */
if ((map->flags & VM_MAP_GUARDPAGES) && entry != NULL &&
VMMAP_FREE_END(entry) == entry->end &&
entry->start != entry->end) {
@@ -4776,9 +4582,7 @@ uvm_map_mquery(struct vm_map *map, vaddr_t *addr_p, vsize_t sz, voff_t offset,
addr = *addr_p;
vm_map_lock_read(map);
- /*
- * Configure pmap prefer.
- */
+ /* Configure pmap prefer. */
if (offset != UVM_UNKNOWN_OFFSET) {
pmap_align = MAX(PAGE_SIZE, PMAP_PREFER_ALIGN());
pmap_offset = PMAP_PREFER_OFFSET(offset);
@@ -4787,9 +4591,7 @@ uvm_map_mquery(struct vm_map *map, vaddr_t *addr_p, vsize_t sz, voff_t offset,
pmap_offset = 0;
}
- /*
- * Align address to pmap_prefer unless FLAG_FIXED is set.
- */
+ /* Align address to pmap_prefer unless FLAG_FIXED is set. */
if (!(flags & UVM_FLAG_FIXED) && offset != UVM_UNKNOWN_OFFSET) {
tmp = (addr & ~(pmap_align - 1)) | pmap_offset;
if (tmp < addr)
@@ -4797,9 +4599,7 @@ uvm_map_mquery(struct vm_map *map, vaddr_t *addr_p, vsize_t sz, voff_t offset,
addr = tmp;
}
- /*
- * First, check if the requested range is fully available.
- */
+ /* First, check if the requested range is fully available. */
entry = uvm_map_entrybyaddr(&map->addr, addr);
last = NULL;
if (uvm_map_isavail(map, NULL, &entry, &last, addr, sz)) {
@@ -4824,7 +4624,6 @@ uvm_map_mquery(struct vm_map *map, vaddr_t *addr_p, vsize_t sz, voff_t offset,
* Note that for case [2], the forward moving is handled by the
* for loop below.
*/
-
if (entry == NULL) {
/* [1] Outside the map. */
if (addr >= map->max_offset)
@@ -4836,9 +4635,7 @@ uvm_map_mquery(struct vm_map *map, vaddr_t *addr_p, vsize_t sz, voff_t offset,
entry = RB_NEXT(uvm_map_addr, &map->addr, entry);
}
- /*
- * Test if the next entry is sufficient for the allocation.
- */
+ /* Test if the next entry is sufficient for the allocation. */
for (; entry != NULL;
entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) {
if (entry->fspace == 0)
@@ -4846,7 +4643,6 @@ uvm_map_mquery(struct vm_map *map, vaddr_t *addr_p, vsize_t sz, voff_t offset,
addr = VMMAP_FREE_START(entry);
restart: /* Restart address checks on address change. */
-
tmp = (addr & ~(pmap_align - 1)) | pmap_offset;
if (tmp < addr)
tmp += pmap_align;
@@ -4854,9 +4650,7 @@ restart: /* Restart address checks on address change. */
if (addr >= VMMAP_FREE_END(entry))
continue;
- /*
- * Skip brk() allocation addresses.
- */
+ /* Skip brk() allocation addresses. */
if (addr + sz > map->b_start && addr < map->b_end) {
if (VMMAP_FREE_END(entry) > map->b_end) {
addr = map->b_end;
@@ -4864,9 +4658,7 @@ restart: /* Restart address checks on address change. */
} else
continue;
}
- /*
- * Skip stack allocation addresses.
- */
+ /* Skip stack allocation addresses. */
if (addr + sz > map->s_start && addr < map->s_end) {
if (VMMAP_FREE_END(entry) > map->s_end) {
addr = map->s_end;
@@ -4905,14 +4697,10 @@ uvm_mapent_bias(struct vm_map *map, struct vm_map_entry *entry)
start = VMMAP_FREE_START(entry);
end = VMMAP_FREE_END(entry);
- /*
- * Stay at the top of brk() area.
- */
+ /* Stay at the top of brk() area. */
if (end >= map->b_start && start < map->b_end)
return 1;
- /*
- * Stay at the far end of the stack area.
- */
+ /* Stay at the far end of the stack area. */
if (end >= map->s_start && start < map->s_end) {
#ifdef MACHINE_STACK_GROWS_UP
return 1;
@@ -4921,9 +4709,7 @@ uvm_mapent_bias(struct vm_map *map, struct vm_map_entry *entry)
#endif
}
- /*
- * No bias, this area is meant for us.
- */
+ /* No bias, this area is meant for us. */
return 0;
}
@@ -5054,7 +4840,6 @@ RB_GENERATE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
* MD code: vmspace allocator setup.
*/
-
#ifdef __i386__
void
uvm_map_setup_md(struct vm_map *map)
diff --git a/sys/uvm/uvm_meter.c b/sys/uvm/uvm_meter.c
index f592c8277e3..a4ea174aa40 100644
--- a/sys/uvm/uvm_meter.c
+++ b/sys/uvm/uvm_meter.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_meter.c,v 1.30 2013/03/23 16:12:31 deraadt Exp $ */
+/* $OpenBSD: uvm_meter.c,v 1.31 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
/*
@@ -82,7 +82,6 @@ static fixpt_t cexp[3] = {
/*
* prototypes
*/
-
static void uvm_loadav(struct loadavg *);
/*
@@ -258,10 +257,7 @@ uvm_total(struct vmtotal *totalp)
memset(totalp, 0, sizeof *totalp);
- /*
- * calculate process statistics
- */
-
+ /* calculate process statistics */
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_flag & P_SYSTEM)
continue;
@@ -278,7 +274,6 @@ uvm_total(struct vmtotal *totalp)
if (p->p_slptime >= maxslp)
continue;
break;
-
case SRUN:
case SIDL:
case SONPROC:
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index a8e6511b2b6..d6d3899102c 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_mmap.c,v 1.93 2013/05/30 16:29:46 tedu Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.94 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -100,7 +100,6 @@
* fd: fd of the file we want to map
* off: offset within the file
*/
-
int
sys_mquery(struct proc *p, void *v, register_t *retval)
{
@@ -161,7 +160,6 @@ sys_mquery(struct proc *p, void *v, register_t *retval)
/*
* sys_mincore: determine if pages are in core or not.
*/
-
/* ARGSUSED */
int
sys_mincore(struct proc *p, void *v, register_t *retval)
@@ -306,7 +304,6 @@ sys_mincore(struct proc *p, void *v, register_t *retval)
* - if address isn't page aligned the mapping starts at trunc_page(addr)
* and the return value is adjusted up by the page offset.
*/
-
int
sys_mmap(struct proc *p, void *v, register_t *retval)
{
@@ -332,10 +329,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
caddr_t handle;
int error;
- /*
- * first, extract syscall args from the uap.
- */
-
+ /* first, extract syscall args from the uap. */
addr = (vaddr_t) SCARG(uap, addr);
size = (vsize_t) SCARG(uap, len);
prot = SCARG(uap, prot);
@@ -360,17 +354,11 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
if (size == 0)
return (EINVAL);
- /*
- * align file position and save offset. adjust size.
- */
+ /* align file position and save offset. adjust size. */
ALIGN_ADDR(pos, size, pageoff);
- /*
- * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
- */
-
+ /* now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" */
if (flags & MAP_FIXED) {
-
/* adjust address by the same amount as we did the offset */
addr -= pageoff;
if (addr & PAGE_MASK)
@@ -386,11 +374,8 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
}
- /*
- * check for file mappings (i.e. not anonymous) and verify file.
- */
+ /* check for file mappings (i.e. not anonymous) and verify file. */
if ((flags & MAP_ANON) == 0) {
-
if ((fp = fd_getfile(fdp, fd)) == NULL)
return (EBADF);
@@ -449,10 +434,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
}
- /*
- * now check protection
- */
-
+ /* now check protection */
maxprot = VM_PROT_EXECUTE;
/* check read access */
@@ -490,12 +472,8 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
maxprot |= VM_PROT_WRITE;
}
- /*
- * set handle to vnode
- */
-
+ /* set handle to vnode */
handle = (caddr_t)vp;
-
} else { /* MAP_ANON case */
/*
* XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
@@ -505,7 +483,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
goto out;
}
- is_anon: /* label for SunOS style /dev/zero */
+is_anon: /* label for SunOS style /dev/zero */
handle = NULL;
maxprot = VM_PROT_ALL;
pos = 0;
@@ -520,10 +498,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
}
}
- /*
- * now let kernel internal function uvm_mmap do the work.
- */
-
+ /* now let kernel internal function uvm_mmap do the work. */
error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur, p);
@@ -554,10 +529,7 @@ sys_msync(struct proc *p, void *v, register_t *retval)
vm_map_t map;
int flags, uvmflags;
- /*
- * extract syscall args from the uap
- */
-
+ /* extract syscall args from the uap */
addr = (vaddr_t)SCARG(uap, addr);
size = (vsize_t)SCARG(uap, len);
flags = SCARG(uap, flags);
@@ -570,22 +542,15 @@ sys_msync(struct proc *p, void *v, register_t *retval)
if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
flags |= MS_SYNC;
- /*
- * align the address to a page boundary, and adjust the size accordingly
- */
+ /* align the address to a page boundary, and adjust the size accordingly */
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
return (EINVAL); /* disallow wrap-around. */
- /*
- * get map
- */
-
+ /* get map */
map = &p->p_vmspace->vm_map;
- /*
- * translate MS_ flags into PGO_ flags
- */
+ /* translate MS_ flags into PGO_ flags */
uvmflags = PGO_CLEANIT;
if (flags & MS_INVALIDATE)
uvmflags |= PGO_FREE;
@@ -600,7 +565,6 @@ sys_msync(struct proc *p, void *v, register_t *retval)
/*
* sys_munmap: unmap a users memory
*/
-
int
sys_munmap(struct proc *p, void *v, register_t *retval)
{
@@ -614,16 +578,11 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
vaddr_t vm_min_address = VM_MIN_ADDRESS;
struct uvm_map_deadq dead_entries;
- /*
- * get syscall args...
- */
-
+ /* get syscall args... */
addr = (vaddr_t) SCARG(uap, addr);
size = (vsize_t) SCARG(uap, len);
- /*
- * align the address to a page boundary, and adjust the size accordingly
- */
+ /* align address to a page boundary, and adjust size accordingly */
ALIGN_ADDR(addr, size, pageoff);
/*
@@ -645,15 +604,11 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
* interesting system call semantic: make sure entire range is
* allocated before allowing an unmap.
*/
-
if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
vm_map_unlock(map);
return (EINVAL);
}
- /*
- * doit!
- */
TAILQ_INIT(&dead_entries);
uvm_unmap_remove(map, addr, addr + size, &dead_entries, FALSE, TRUE);
@@ -667,7 +622,6 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
/*
* sys_mprotect: the mprotect system call
*/
-
int
sys_mprotect(struct proc *p, void *v, register_t *retval)
{
@@ -705,7 +659,6 @@ sys_mprotect(struct proc *p, void *v, register_t *retval)
/*
* sys_minherit: the minherit system call
*/
-
int
sys_minherit(struct proc *p, void *v, register_t *retval)
{
@@ -736,7 +689,6 @@ sys_minherit(struct proc *p, void *v, register_t *retval)
/*
* sys_madvise: give advice about memory usage.
*/
-
/* ARGSUSED */
int
sys_madvise(struct proc *p, void *v, register_t *retval)
@@ -836,15 +788,11 @@ sys_mlock(struct proc *p, void *v, register_t *retval)
vsize_t size, pageoff;
int error;
- /*
- * extract syscall args from uap
- */
+ /* extract syscall args from uap */
addr = (vaddr_t)SCARG(uap, addr);
size = (vsize_t)SCARG(uap, len);
- /*
- * align the address to a page boundary and adjust the size accordingly
- */
+ /* align address to a page boundary and adjust size accordingly */
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
return (EINVAL); /* disallow wrap-around. */
@@ -881,16 +829,11 @@ sys_munlock(struct proc *p, void *v, register_t *retval)
vsize_t size, pageoff;
int error;
- /*
- * extract syscall args from uap
- */
-
+ /* extract syscall args from uap */
addr = (vaddr_t)SCARG(uap, addr);
size = (vsize_t)SCARG(uap, len);
- /*
- * align the address to a page boundary, and adjust the size accordingly
- */
+ /* align address to a page boundary, and adjust size accordingly */
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
return (EINVAL); /* disallow wrap-around. */
@@ -908,7 +851,6 @@ sys_munlock(struct proc *p, void *v, register_t *retval)
/*
* sys_mlockall: lock all pages mapped into an address space.
*/
-
int
sys_mlockall(struct proc *p, void *v, register_t *retval)
{
@@ -938,7 +880,6 @@ sys_mlockall(struct proc *p, void *v, register_t *retval)
/*
* sys_munlockall: unlock all pages mapped into an address space.
*/
-
int
sys_munlockall(struct proc *p, void *v, register_t *retval)
{
@@ -955,7 +896,6 @@ sys_munlockall(struct proc *p, void *v, register_t *retval)
* sysv shm uses "named anonymous memory")
* - caller must page-align the file offset
*/
-
int
uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
vm_prot_t maxprot, int flags, caddr_t handle, voff_t foff,
@@ -968,10 +908,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
uvm_flag_t uvmflag = 0;
vsize_t align = 0; /* userland page size */
- /*
- * check params
- */
-
+ /* check params */
if (size == 0)
return(0);
if (foff & PAGE_MASK)
@@ -983,7 +920,6 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
* for non-fixed mappings, round off the suggested address.
* for fixed mappings, check alignment and zap old mappings.
*/
-
if ((flags & MAP_FIXED) == 0) {
*addr = round_page(*addr); /* round */
} else {
@@ -999,7 +935,6 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
* handle anon vs. non-anon mappings. for non-anon mappings attach
* to underlying vm object.
*/
-
if (flags & MAP_ANON) {
if ((flags & MAP_FIXED) == 0 && size >= __LDPGSZ)
align = __LDPGSZ;
@@ -1011,9 +946,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
else
/* shared: create amap now */
uvmflag |= UVM_FLAG_OVERLAY;
-
} else {
-
vp = (struct vnode *) handle; /* get vnode */
if (vp->v_type != VCHR) {
uobj = uvn_attach((void *) vp, (flags & MAP_SHARED) ?
@@ -1078,10 +1011,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
uvmflag |= UVM_FLAG_COPYONW;
}
- /*
- * set up mapping flags
- */
-
+ /* set up mapping flags */
uvmflag = UVM_MAPFLAG(prot, maxprot,
(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,
advice, uvmflag);
@@ -1135,10 +1065,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
return (0);
}
- /*
- * errors: first detach from the uobj, if any.
- */
-
+ /* errors: first detach from the uobj, if any. */
if (uobj)
uobj->pgops->pgo_detach(uobj);
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index de09dfc1bd3..90fb3f02674 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.129 2014/01/23 22:06:30 miod Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.130 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -95,11 +95,9 @@ uvm_pagecmp(struct vm_page *a, struct vm_page *b)
/*
* global vars... XXXCDC: move to uvm. structure.
*/
-
/*
* physical memory config is stored in vm_physmem.
*/
-
struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
@@ -108,7 +106,6 @@ int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
* of the things necessary to do idle page zero'ing efficiently.
* We therefore provide a way to disable it from machdep code here.
*/
-
/*
* XXX disabled until we can find a way to do this without causing
* problems for either cpu caches or DMA latency.
@@ -118,27 +115,23 @@ boolean_t vm_page_zero_enable = FALSE;
/*
* local variables
*/
-
/*
* these variables record the values returned by vm_page_bootstrap,
* for debugging purposes. The implementation of uvm_pageboot_alloc
* and pmap_startup here also uses them internally.
*/
-
static vaddr_t virtual_space_start;
static vaddr_t virtual_space_end;
/*
* local prototypes
*/
-
static void uvm_pageinsert(struct vm_page *);
static void uvm_pageremove(struct vm_page *);
/*
* inline functions
*/
-
/*
* uvm_pageinsert: insert a page in the object
*
@@ -146,7 +139,6 @@ static void uvm_pageremove(struct vm_page *);
* => call should have already set pg's object and offset pointers
* and bumped the version counter
*/
-
__inline static void
uvm_pageinsert(struct vm_page *pg)
{
@@ -165,7 +157,6 @@ uvm_pageinsert(struct vm_page *pg)
*
* => caller must lock page queues
*/
-
static __inline void
uvm_pageremove(struct vm_page *pg)
{
@@ -184,7 +175,6 @@ uvm_pageremove(struct vm_page *pg)
*
* => we return the range of kernel virtual memory in kvm_startp/kvm_endp
*/
-
void
uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
{
@@ -246,10 +236,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
sizeof(struct vm_page));
memset(pagearray, 0, pagecount * sizeof(struct vm_page));
- /*
- * init the vm_page structures and put them in the correct place.
- */
-
+ /* init the vm_page structures and put them in the correct place. */
for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
n = seg->end - seg->start;
if (n > pagecount) {
@@ -278,9 +265,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
}
}
- /*
- * Add pages to free pool.
- */
+ /* Add pages to free pool. */
uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start],
seg->avail_end - seg->avail_start);
}
@@ -294,9 +279,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
*kvm_startp = round_page(virtual_space_start);
*kvm_endp = trunc_page(virtual_space_end);
- /*
- * init locks for kernel threads
- */
+ /* init locks for kernel threads */
mtx_init(&uvm.aiodoned_lock, IPL_BIO);
/*
@@ -312,16 +295,9 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
- /*
- * determine if we should zero pages in the idle loop.
- */
-
+ /* determine if we should zero pages in the idle loop. */
uvm.page_idle_zero = vm_page_zero_enable;
- /*
- * done!
- */
-
uvm.page_init_done = TRUE;
}
@@ -330,7 +306,6 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
*
* => sets page_shift and page_mask from uvmexp.pagesize.
*/
-
void
uvm_setpagesize(void)
{
@@ -347,7 +322,6 @@ uvm_setpagesize(void)
/*
* uvm_pageboot_alloc: steal memory from physmem for bootstrapping
*/
-
vaddr_t
uvm_pageboot_alloc(vsize_t size)
{
@@ -374,9 +348,7 @@ uvm_pageboot_alloc(vsize_t size)
/* round to page size */
size = round_page(size);
- /*
- * on first call to this function, initialize ourselves.
- */
+ /* on first call to this function, initialize ourselves. */
if (initialized == FALSE) {
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
@@ -387,9 +359,7 @@ uvm_pageboot_alloc(vsize_t size)
initialized = TRUE;
}
- /*
- * allocate virtual memory for this request
- */
+ /* allocate virtual memory for this request */
if (virtual_space_start == virtual_space_end ||
(virtual_space_end - virtual_space_start) < size)
panic("uvm_pageboot_alloc: out of virtual space");
@@ -410,13 +380,9 @@ uvm_pageboot_alloc(vsize_t size)
virtual_space_start += size;
- /*
- * allocate and mapin physical pages to back new virtual pages
- */
-
+ /* allocate and mapin physical pages to back new virtual pages */
for (vaddr = round_page(addr) ; vaddr < addr + size ;
vaddr += PAGE_SIZE) {
-
if (!uvm_page_physget(&paddr))
panic("uvm_pageboot_alloc: out of memory");
@@ -559,9 +525,7 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
panic("uvm_page_physload: start >= end");
#endif
- /*
- * do we have room?
- */
+ /* do we have room? */
if (vm_nphysseg == VM_PHYSSEG_MAX) {
printf("uvm_page_physload: unable to load physical memory "
"segment\n");
@@ -581,9 +545,7 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
}
preload = (lcv == vm_nphysseg);
- /*
- * if VM is already running, attempt to malloc() vm_page structures
- */
+ /* if VM is already running, attempt to malloc() vm_page structures */
if (!preload) {
/*
* XXXCDC: need some sort of lockout for this case
@@ -620,9 +582,7 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
}
}
- /*
- * Add pages to free pool.
- */
+ /* Add pages to free pool. */
if ((flags & PHYSLOAD_DEVICE) == 0) {
uvm_pmr_freepages(&pgs[avail_start - start],
avail_end - avail_start);
@@ -630,24 +590,17 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
} else {
-
/* gcc complains if these don't get init'd */
pgs = NULL;
npages = 0;
}
- /*
- * now insert us in the proper place in vm_physmem[]
- */
-
+ /* now insert us in the proper place in vm_physmem[] */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
-
/* random: put it at the end (easy!) */
ps = &vm_physmem[vm_nphysseg];
-
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-
{
int x;
/* sort by address for binary search */
@@ -661,9 +614,7 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
/* structure copy */
seg[1] = seg[0];
}
-
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-
{
int x;
/* sort by largest segment first */
@@ -678,11 +629,8 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
/* structure copy */
seg[1] = seg[0];
}
-
#else
-
panic("uvm_page_physload: unknown physseg strategy selected!");
-
#endif
ps->start = start;
@@ -697,10 +645,6 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
}
vm_nphysseg++;
- /*
- * done!
- */
-
return;
}
@@ -967,7 +911,6 @@ uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
* [3] only pagedaemon "reserved" pages remain and
* the requestor isn't the pagedaemon.
*/
-
use_reserve = (flags & UVM_PGA_USERESERVE) ||
(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
@@ -995,7 +938,7 @@ uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
return(pg);
- fail:
+fail:
return (NULL);
}
@@ -1007,18 +950,12 @@ void
uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
{
- /*
- * remove it from the old object
- */
-
+ /* remove it from the old object */
if (pg->uobject) {
uvm_pageremove(pg);
}
- /*
- * put it in the new object
- */
-
+ /* put it in the new object */
if (newobj) {
pg->uobject = newobj;
pg->offset = newoff;
@@ -1036,7 +973,6 @@ uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
* => caller must lock page queues
* => assumes all valid mappings of pg are gone
*/
-
void
uvm_pagefree(struct vm_page *pg)
{
@@ -1056,9 +992,7 @@ uvm_pagefree(struct vm_page *pg)
* if the page was an object page (and thus "TABLED"), remove it
* from the object.
*/
-
if (pg->pg_flags & PG_TABLED) {
-
/*
* if the object page is on loan we are going to drop ownership.
* it is possible that an anon will take over as owner for this
@@ -1080,7 +1014,6 @@ uvm_pagefree(struct vm_page *pg)
* return (when the last loan is dropped, then the page can be
* freed by whatever was holding the last loan).
*/
-
if (saved_loan_count)
return;
} else if (saved_loan_count && pg->uanon) {
@@ -1098,10 +1031,7 @@ uvm_pagefree(struct vm_page *pg)
}
KASSERT(saved_loan_count == 0);
- /*
- * now remove the page from the queues
- */
-
+ /* now remove the page from the queues */
if (pg->pg_flags & PQ_ACTIVE) {
TAILQ_REMOVE(&uvm.page_active, pg, pageq);
flags_to_clear |= PQ_ACTIVE;
@@ -1116,10 +1046,7 @@ uvm_pagefree(struct vm_page *pg)
uvmexp.inactive--;
}
- /*
- * if the page was wired, unwire it now.
- */
-
+ /* if the page was wired, unwire it now. */
if (pg->wire_count) {
pg->wire_count = 0;
uvmexp.wired--;
@@ -1130,18 +1057,13 @@ uvm_pagefree(struct vm_page *pg)
flags_to_clear |= PQ_ANON;
}
- /*
- * Clean page state bits.
- */
+ /* Clean page state bits. */
flags_to_clear |= PQ_AOBJ; /* XXX: find culprit */
flags_to_clear |= PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|
PG_CLEAN|PG_CLEANCHK;
atomic_clearbits_int(&pg->pg_flags, flags_to_clear);
- /*
- * and put on free queue
- */
-
+ /* and put on free queue */
#ifdef DEBUG
pg->uobject = (void *)0xdeadbeef;
pg->offset = 0xdeadbeef;
@@ -1160,7 +1082,6 @@ uvm_pagefree(struct vm_page *pg)
* => pages must either all belong to the same object, or all belong to anons.
* => if pages are anon-owned, anons must have 0 refcount.
*/
-
void
uvm_page_unbusy(struct vm_page **pgs, int npgs)
{
@@ -1512,7 +1433,6 @@ uvm_pageactivate(struct vm_page *pg)
uvmexp.inactive--;
}
if (pg->wire_count == 0) {
-
/*
* if page is already active, remove it from list so we
* can put it at tail. if it wasn't active, then mark
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index f5f86813ab0..4e4918fd0e7 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pager.c,v 1.64 2013/11/02 00:08:17 krw Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.65 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */
/*
@@ -94,28 +94,19 @@ void uvm_pseg_release(vaddr_t);
/*
* uvm_pager_init: init pagers (at boot time)
*/
-
void
uvm_pager_init(void)
{
int lcv;
- /*
- * init pager map
- */
-
+ /* init pager map */
uvm_pseg_init(&psegs[0]);
mtx_init(&uvm_pseg_lck, IPL_VM);
- /*
- * init ASYNC I/O queue
- */
-
+ /* init ASYNC I/O queue */
TAILQ_INIT(&uvm.aio_done);
- /*
- * call pager init functions
- */
+ /* call pager init functions */
for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
lcv++) {
if (uvmpagerops[lcv]->pgo_init)
@@ -332,11 +323,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
* pages in the middle of an I/O. (consider an msync()). let's
* lock it for now (better to delay than corrupt data?).
*/
-
- /*
- * get cluster boundaries, check sanity, and apply our limits as well.
- */
-
+ /* get cluster boundaries, check sanity, and apply our limits as well.*/
uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
if ((flags & PGO_ALLPAGES) == 0) {
if (lo < mlo)
@@ -350,11 +337,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
return(pps);
}
- /*
- * now determine the center and attempt to cluster around the
- * edges
- */
-
+ /* now determine the center and attempt to cluster around the edges */
center_idx = (center->offset - lo) >> PAGE_SHIFT;
pps[center_idx] = center; /* plug in the center page */
ppsp = &pps[center_idx];
@@ -369,7 +352,6 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
* [i.e. there is no need to query the pmap with pmap_is_modified
* since there are no mappings].
*/
-
for (forward = 0 ; forward <= 1 ; forward++) {
incr = forward ? PAGE_SIZE : -PAGE_SIZE;
curoff = center->offset + incr;
@@ -436,7 +418,6 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
/*
* done! return the cluster array to the caller!!!
*/
-
return(ppsp);
}
@@ -473,7 +454,6 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
* with all pages busy (caller must un-busy and check
* wanted/released flags).
*/
-
int
uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
struct vm_page ***ppsp_ptr, int *npages, int flags,
@@ -488,14 +468,11 @@ uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
* note that uobj is !null if we are doing normal object pageout.
* note that the page queues must be locked to cluster.
*/
-
if (uobj) { /* if !swap-backed */
-
/*
* attempt to build a cluster for pageout using its
* make-put-cluster function (if it has one).
*/
-
if (uobj->pgops->pgo_mk_pcluster) {
ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
npages, pg, flags, start, stop);
@@ -506,9 +483,7 @@ uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
}
swblk = 0; /* XXX: keep gcc happy */
-
} else {
-
/*
* for swap-backed pageout, the caller (the pagedaemon) has
* already built the cluster for us. the starting swap
@@ -528,7 +503,6 @@ uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
* now attempt the I/O. if we have a failure and we are
* clustered, we will drop the cluster and try again.
*/
-
ReTry:
if (uobj) {
result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
@@ -549,12 +523,9 @@ ReTry:
* [in this case the async i/o done function must clean up when
* i/o is done...]
*/
-
if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
- /*
- * drop cluster
- */
+ /* drop cluster */
if (*npages > 1 || pg == NULL)
uvm_pager_dropcluster(uobj, pg, ppsp, npages,
PGO_PDFREECLUST);
@@ -567,7 +538,6 @@ ReTry:
* was one). give up! the caller only has one page ("pg")
* to worry about.
*/
-
if (*npages > 1 || pg == NULL) {
uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
@@ -577,7 +547,6 @@ ReTry:
* "swblk" (for transient errors, so we can retry),
* or 0 (for hard errors).
*/
-
if (uobj == NULL && pg != NULL) {
/* XXX daddr_t -> int */
int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
@@ -590,12 +559,10 @@ ReTry:
}
}
if (result == VM_PAGER_AGAIN) {
-
/*
* for transient failures, free all the swslots that
* we're not going to retry with.
*/
-
if (uobj == NULL) {
if (pg) {
/* XXX daddr_t -> int */
@@ -611,13 +578,11 @@ ReTry:
goto ReTry;
}
} else if (uobj == NULL) {
-
/*
* for hard errors on swap-backed pageouts,
* mark the swslots as bad. note that we do not
* free swslots that we mark bad.
*/
-
/* XXX daddr_t -> int */
uvm_swap_markbad(swblk, *npages);
}
@@ -655,12 +620,8 @@ uvm_pager_dropcluster(struct uvm_object *uobj, struct vm_page *pg,
{
int lcv;
- /*
- * drop all pages but "pg"
- */
-
+ /* drop all pages but "pg" */
for (lcv = 0 ; lcv < *npages ; lcv++) {
-
/* skip "pg" or empty slot */
if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
continue;
@@ -746,7 +707,6 @@ uvm_aio_biodone(struct buf *bp)
* uvm_aio_aiodone: do iodone processing for async i/os.
* this should be called in thread context, not interrupt context.
*/
-
void
uvm_aio_aiodone(struct buf *bp)
{
@@ -801,7 +761,6 @@ uvm_aio_aiodone(struct buf *bp)
* or this was a successful write,
* mark the page PG_CLEAN and not PG_FAKE.
*/
-
if ((pgs[i]->pg_flags & PG_FAKE) || (write && error != ENOMEM)) {
pmap_clear_reference(pgs[i]);
pmap_clear_modify(pgs[i]);
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 42fbc56018c..5fc88e8c8dd 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.66 2014/02/06 16:40:40 tedu Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.67 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -112,10 +112,7 @@ uvm_wait(const char *wmsg)
{
int timo = 0;
- /*
- * check for page daemon going to sleep (waiting for itself)
- */
-
+ /* check for page daemon going to sleep (waiting for itself) */
if (curproc == uvm.pagedaemon_proc) {
printf("uvm_wait emergency bufbackoff\n");
if (bufbackoff(NULL, 4) == 0)
@@ -186,7 +183,6 @@ uvmpd_tune(void)
/*
* uvm_pageout: the main loop for the pagedaemon
*/
-
void
uvm_pageout(void *arg)
{
@@ -195,10 +191,7 @@ uvm_pageout(void *arg)
int work_done;
int npages = 0;
- /*
- * ensure correct priority and set paging parameters...
- */
-
+ /* ensure correct priority and set paging parameters... */
uvm.pagedaemon_proc = curproc;
(void) spl0();
uvm_lock_pageq();
@@ -206,10 +199,6 @@ uvm_pageout(void *arg)
uvmpd_tune();
uvm_unlock_pageq();
- /*
- * main loop
- */
-
for (;;) {
long size;
work_done = 0; /* No work done this iteration. */
@@ -230,10 +219,7 @@ uvm_pageout(void *arg)
uvm_unlock_fpageq();
- /*
- * now lock page queues and recompute inactive count
- */
-
+ /* now lock page queues and recompute inactive count */
uvm_lock_pageq();
if (npages != uvmexp.npages) { /* check for new pages? */
npages = uvmexp.npages;
@@ -245,9 +231,7 @@ uvm_pageout(void *arg)
uvmexp.inactarg = uvmexp.freetarg + 1;
}
- /*
- * Reclaim pages from the buffer cache if possible.
- */
+ /* Reclaim pages from the buffer cache if possible. */
size = 0;
if (pma != NULL)
size += pma->pm_size >> PAGE_SHIFT;
@@ -256,9 +240,7 @@ uvm_pageout(void *arg)
BUFPAGES_DEFICIT);
(void) bufbackoff(&constraint, size * 2);
- /*
- * Scan if needed to meet our targets.
- */
+ /* Scan if needed to meet our targets. */
if (pma != NULL ||
((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) ||
((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) {
@@ -289,10 +271,7 @@ uvm_pageout(void *arg)
}
uvm_unlock_fpageq();
- /*
- * scan done. unlock page queues (the only lock we are holding)
- */
-
+ /* scan done. unlock page queues (only lock we are holding) */
uvm_unlock_pageq();
}
/*NOTREACHED*/
@@ -302,7 +281,6 @@ uvm_pageout(void *arg)
/*
* uvm_aiodone_daemon: main loop for the aiodone daemon.
*/
-
void
uvm_aiodone_daemon(void *arg)
{
@@ -312,7 +290,6 @@ uvm_aiodone_daemon(void *arg)
uvm.aiodoned_proc = curproc;
for (;;) {
-
/*
* Check for done aio structures. If we've got structures to
* process, do so. Otherwise sleep while avoiding races.
@@ -325,10 +302,7 @@ uvm_aiodone_daemon(void *arg)
TAILQ_INIT(&uvm.aio_done);
mtx_leave(&uvm.aiodoned_lock);
- /*
- * process each i/o that's done.
- */
-
+ /* process each i/o that's done. */
free = uvmexp.free;
while (bp != NULL) {
if (bp->b_flags & B_PDAEMON) {
@@ -383,30 +357,25 @@ uvmpd_scan_inactive(struct pglist *pglst)
* swap cluster in "swpps" rather than in pps (allows us to mix
* clustering types in the event of a mixed inactive queue).
*/
-
/*
* swslot is non-zero if we are building a swap cluster. we want
* to stay in the loop while we have a page to scan or we have
* a swap-cluster to build.
*/
-
swslot = 0;
swnpages = swcpages = 0;
free = 0;
dirtyreacts = 0;
for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
-
/*
* note that p can be NULL iff we have traversed the whole
* list and need to do one final swap-backed clustered pageout.
*/
-
uobj = NULL;
anon = NULL;
if (p) {
-
/*
* update our copy of "free" and see if we've met
* our target
@@ -428,7 +397,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
if (p) { /* if (we have a new page to consider) */
-
/*
* we are below target and have a new page to consider.
*/
@@ -466,7 +434,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* if the page is ownerless, claim it in the
* name of "anon"!
*/
-
if ((p->pg_flags & PQ_ANON) == 0) {
KASSERT(p->loan_count > 0);
p->loan_count--;
@@ -496,7 +463,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* the page is not busy. if the page is clean we
* can free it now and continue.
*/
-
if (p->pg_flags & PG_CLEAN) {
if (p->pg_flags & PQ_SWAPBACKED) {
/* this page now lives only in swap */
@@ -527,7 +493,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* this page is dirty, skip it if we'll have met our
* free target when all the current pageouts complete.
*/
-
if (free + uvmexp.paging > uvmexp.freetarg << 2) {
continue;
}
@@ -538,7 +503,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* reactivate it so that we eventually cycle
* all pages thru the inactive queue.
*/
-
KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
if ((p->pg_flags & PQ_SWAPBACKED) &&
uvmexp.swpgonly == uvmexp.swpages) {
@@ -552,7 +516,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* is full, free any swap allocated to the page
* so that other pages can be paged out.
*/
-
KASSERT(uvmexp.swpginuse <= uvmexp.swpages);
if ((p->pg_flags & PQ_SWAPBACKED) &&
uvmexp.swpginuse == uvmexp.swpages) {
@@ -587,13 +550,8 @@ uvmpd_scan_inactive(struct pglist *pglst)
* for swap-backed pages we need to (re)allocate
* swap space.
*/
-
if (swap_backed) {
-
- /*
- * free old swap slot (if any)
- */
-
+ /* free old swap slot (if any) */
if (anon) {
if (anon->an_swslot) {
uvm_swap_free(anon->an_swslot,
@@ -605,10 +563,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
p->offset >> PAGE_SHIFT);
}
- /*
- * start new cluster (if necessary)
- */
-
+ /* start new cluster (if necessary) */
if (swslot == 0) {
swnpages = MAXBSIZE >> PAGE_SHIFT;
swslot = uvm_swap_alloc(&swnpages,
@@ -624,10 +579,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
swcpages = 0; /* cluster is empty */
}
- /*
- * add block to cluster
- */
-
+ /* add block to cluster */
swpps[swcpages] = p;
if (anon)
anon->an_swslot = swslot + swcpages;
@@ -638,7 +590,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
swcpages++;
}
} else {
-
/* if p == NULL we must be doing a last swap i/o */
swap_backed = TRUE;
}
@@ -652,7 +603,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
*
* for object pages, we always do the pageout.
*/
-
if (swap_backed) {
if (p) { /* if we just added a page to cluster */
/* cluster not full yet? */
@@ -749,10 +699,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
#endif
- /*
- * clean up "p" if we have one
- */
-
+ /* clean up "p" if we have one */
if (p) {
/*
* the I/O request to "p" is done and uvm_pager_put
@@ -838,19 +785,16 @@ uvmpd_scan_inactive(struct pglist *pglst)
nextpg = TAILQ_FIRST(pglst); /* reload! */
}
} else {
-
/*
* if p is null in this loop, make sure it stays null
* in the next loop.
*/
-
nextpg = NULL;
/*
* lock page queues here just so they're always locked
* at the end of the loop.
*/
-
uvm_lock_pageq();
}
}
@@ -903,7 +847,6 @@ uvmpd_scan(void)
* alternate starting queue between swap and object based on the
* low bit of uvmexp.pdrevs (which we bump by one each call).
*/
-
got_it = FALSE;
pages_freed = uvmexp.pdfreed; /* XXX - int */
if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
@@ -918,14 +861,12 @@ uvmpd_scan(void)
* we have done the scan to get free pages. now we work on meeting
* our inactive target.
*/
-
inactive_shortage = uvmexp.inactarg - uvmexp.inactive - BUFPAGES_INACT;
/*
* detect if we're not going to be able to page anything out
* until we free some swap resources from active pages.
*/
-
swap_shortage = 0;
if (uvmexp.free < uvmexp.freetarg &&
uvmexp.swpginuse == uvmexp.swpages &&
@@ -953,10 +894,7 @@ uvmpd_scan(void)
}
}
- /*
- * skip this page if it's busy.
- */
-
+ /* skip this page if it's busy. */
if ((p->pg_flags & PG_BUSY) != 0) {
continue;
}
@@ -965,7 +903,6 @@ uvmpd_scan(void)
* if there's a shortage of swap, free any swap allocated
* to this page so that other pages can be paged out.
*/
-
if (swap_shortage > 0) {
if ((p->pg_flags & PQ_ANON) && p->uanon->an_swslot) {
uvm_swap_free(p->uanon->an_swslot, 1);
@@ -989,7 +926,6 @@ uvmpd_scan(void)
* deactivate this page if there's a shortage of
* inactive pages.
*/
-
if (inactive_shortage > 0) {
pmap_page_protect(p, VM_PROT_NONE);
/* no need to check wire_count as pg is "active" */
diff --git a/sys/uvm/uvm_pmemrange.c b/sys/uvm/uvm_pmemrange.c
index 3eb24f4b5f8..c0de6ad90e4 100644
--- a/sys/uvm/uvm_pmemrange.c
+++ b/sys/uvm/uvm_pmemrange.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmemrange.c,v 1.39 2014/04/05 17:18:00 miod Exp $ */
+/* $OpenBSD: uvm_pmemrange.c,v 1.40 2014/04/13 23:14:15 tedu Exp $ */
/*
* Copyright (c) 2009, 2010 Ariane van der Steldt <ariane@stack.nl>
@@ -940,9 +940,7 @@ drain_found:
}
}
- /*
- * Try a smaller search now.
- */
+ /* Try a smaller search now. */
if (++try < nitems(search))
goto rescan;
@@ -1005,10 +1003,7 @@ drain_found:
}
fail:
- /*
- * Allocation failed.
- */
-
+ /* Allocation failed. */
/* XXX: claim from memory reserve here */
while (!TAILQ_EMPTY(result))
@@ -1026,11 +1021,7 @@ fail:
return ENOMEM;
out:
-
- /*
- * Allocation succesful.
- */
-
+ /* Allocation succesful. */
uvmexp.free -= fcount;
uvm_unlock_fpageq();
@@ -1400,9 +1391,7 @@ uvm_pmr_split(paddr_t pageno)
uvm_pmr_insert(drain, prev + before, 1);
}
- /*
- * Move free chunks that no longer fall in the range.
- */
+ /* Move free chunks that no longer fall in the range. */
for (; rebuild != NULL; rebuild = next) {
next = RB_NEXT(uvm_pmr_addr, &pmr->addr, rebuild);
@@ -1591,9 +1580,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
KDASSERT(pmr != NULL && init_root != NULL);
root = init_root;
- /*
- * Which direction to use for searching.
- */
+ /* Which direction to use for searching. */
if (start != 0 && atop(VM_PAGE_TO_PHYS(root)) + root->fpgsz <= start)
direction = 1;
else if (end != 0 && atop(VM_PAGE_TO_PHYS(root)) >= end)
@@ -1601,9 +1588,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
else /* nothing to do */
return root;
- /*
- * First, update root to fall within the chosen range.
- */
+ /* First, update root to fall within the chosen range. */
while (root && !PMR_INTERSECTS_WITH(
atop(VM_PAGE_TO_PHYS(root)),
atop(VM_PAGE_TO_PHYS(root)) + root->fpgsz,
@@ -1659,9 +1644,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
if (low == high)
return NULL;
- /*
- * Ack, no hits. Walk the address tree until to find something usable.
- */
+ /* No hits. Walk the address tree until we find something usable. */
for (low = RB_NEXT(uvm_pmr_addr, &pmr->addr, low);
low != high;
low = RB_NEXT(uvm_pmr_addr, &pmr->addr, low)) {
@@ -1672,9 +1655,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
return low;
}
- /*
- * Nothing found.
- */
+ /* Nothing found. */
return NULL;
}
@@ -1946,9 +1927,7 @@ uvm_wakeup_pla(paddr_t low, psize_t len)
high = low + len;
- /*
- * Wake specific allocations waiting for this memory.
- */
+ /* Wake specific allocations waiting for this memory. */
for (pma = TAILQ_FIRST(&uvm.pmr_control.allocs); pma != NULL;
pma = pma_next) {
pma_next = TAILQ_NEXT(pma, pmq);
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index 8e2a0d3fe8e..06d8b5c1dce 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_swap.c,v 1.124 2013/11/24 15:44:26 jsing Exp $ */
+/* $OpenBSD: uvm_swap.c,v 1.125 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */
/*
@@ -196,7 +196,6 @@ struct vndbuf {
struct task vb_task;
};
-
/*
* We keep a of pool vndbuf's and vndxfer structures.
*/
@@ -280,7 +279,6 @@ uvm_swap_init(void)
* then get a handle on the vnode for /dev/drum by using
* the its dev_t number ("swapdev", from MD conf.c).
*/
-
LIST_INIT(&swap_priority);
uvmexp.nswapdev = 0;
@@ -298,25 +296,14 @@ uvm_swap_init(void)
if (swapmap == 0)
panic("uvm_swap_init: extent_create failed");
- /*
- * allocate pools for structures used for swapping to files.
- */
-
-
+ /* allocate pools for structures used for swapping to files. */
pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0, "swp vnx",
NULL);
-
pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0, "swp vnd",
NULL);
- /*
- * Setup the initial swap partition
- */
+ /* Setup the initial swap partition */
swapmount();
-
- /*
- * done!
- */
}
#ifdef UVM_SWAP_ENCRYPT
@@ -698,9 +685,7 @@ sys_swapctl(struct proc *p, void *v, register_t *retval)
goto out;
}
- /*
- * all other requests require superuser privs. verify.
- */
+ /* all other requests require superuser privs. verify. */
if ((error = suser(p, 0)))
goto out;
@@ -721,7 +706,6 @@ sys_swapctl(struct proc *p, void *v, register_t *retval)
error = 0; /* assume no error */
switch(SCARG(uap, cmd)) {
-
case SWAP_DUMPDEV:
if (vp->v_type != VBLK) {
error = ENOTBLK;
@@ -729,7 +713,6 @@ sys_swapctl(struct proc *p, void *v, register_t *retval)
}
dumpdev = vp->v_rdev;
break;
-
case SWAP_CTL:
/*
* get new priority, remove old entry (if any) and then
@@ -747,16 +730,13 @@ sys_swapctl(struct proc *p, void *v, register_t *retval)
if (error)
free(spp, M_VMSWAP);
break;
-
case SWAP_ON:
-
/*
* check for duplicates. if none found, then insert a
* dummy entry on the list to prevent someone else from
* trying to enable this device while we are working on
* it.
*/
-
priority = SCARG(uap, misc);
if ((sdp = swaplist_find(vp, 0)) != NULL) {
error = EBUSY;
@@ -800,7 +780,6 @@ sys_swapctl(struct proc *p, void *v, register_t *retval)
break;
}
break;
-
case SWAP_OFF:
if ((sdp = swaplist_find(vp, 0)) == NULL) {
error = ENXIO;
@@ -821,14 +800,11 @@ sys_swapctl(struct proc *p, void *v, register_t *retval)
*/
error = swap_off(p, sdp);
break;
-
default:
error = EINVAL;
}
- /*
- * done! release the ref gained by namei() and unlock.
- */
+ /* done! release the ref gained by namei() and unlock. */
vput(vp);
out:
@@ -994,18 +970,14 @@ swap_on(struct proc *p, struct swapdev *sdp)
}
#endif
- /*
- * add a ref to vp to reflect usage as a swap device.
- */
+ /* add a ref to vp to reflect usage as a swap device. */
vref(vp);
#ifdef UVM_SWAP_ENCRYPT
if (uvm_doswapencrypt)
uvm_swap_initcrypt(sdp, npages);
#endif
- /*
- * now add the new swapdev to the drum and enable.
- */
+ /* now add the new swapdev to the drum and enable. */
swapdrum_add(sdp, npages);
sdp->swd_npages = size;
sdp->swd_flags &= ~SWF_FAKE; /* going live */
@@ -1014,9 +986,7 @@ swap_on(struct proc *p, struct swapdev *sdp)
return (0);
bad:
- /*
- * failure: close device if necessary and return error.
- */
+ /* failure: close device if necessary and return error. */
if (vp != rootvp)
(void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
return (error);
@@ -1117,10 +1087,7 @@ swstrategy(struct buf *bp)
return;
}
- /*
- * convert drum page number to block number on this swapdev.
- */
-
+ /* convert drum page number to block number on this swapdev. */
pageno -= sdp->swd_drumoffset; /* page # on swapdev */
bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
@@ -1129,13 +1096,10 @@ swstrategy(struct buf *bp)
* for regular files we have to do more work which we delegate
* to sw_reg_strategy().
*/
-
switch (sdp->swd_vp->v_type) {
default:
panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
-
case VBLK:
-
/*
* must convert "bp" from an I/O on /dev/drum to an I/O
* on the swapdev (sdp).
@@ -1147,11 +1111,8 @@ swstrategy(struct buf *bp)
splx(s);
VOP_STRATEGY(bp);
return;
-
case VREG:
- /*
- * delegate to sw_reg_strategy function.
- */
+ /* delegate to sw_reg_strategy function. */
sw_reg_strategy(sdp, bp, bn);
return;
}
@@ -1193,7 +1154,6 @@ sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
for (resid = bp->b_resid; resid; resid -= sz) {
struct vndbuf *nbp;
-
/*
* translate byteoffset into block number. return values:
* vp = vnode of underlying device
@@ -1389,16 +1349,12 @@ sw_reg_iodone_internal(void *xvbp, void *xvnx)
if (vbp->vb_buf.b_error)
vnx->vx_error = vbp->vb_buf.b_error;
- /*
- * disassociate this buffer from the vnode (if any).
- */
+ /* disassociate this buffer from the vnode (if any). */
if (vbp->vb_buf.b_vp != NULL) {
brelvp(&vbp->vb_buf);
}
- /*
- * kill vbp structure
- */
+ /* kill vbp structure */
putvndbuf(vbp);
/*
@@ -1607,18 +1563,14 @@ uvm_swap_get(struct vm_page *page, int swslot, int flags)
return VM_PAGER_ERROR;
}
- /*
- * this page is (about to be) no longer only in swap.
- */
+ /* this page is (about to be) no longer only in swap. */
uvmexp.swpgonly--;
result = uvm_swap_io(&page, swslot, 1, B_READ |
((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
if (result != VM_PAGER_OK && result != VM_PAGER_PEND) {
- /*
- * oops, the read failed so it really is still only in swap.
- */
+ /* oops, the read failed so it really is still only in swap. */
uvmexp.swpgonly++;
}
@@ -1647,9 +1599,7 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
write = (flags & B_READ) == 0;
async = (flags & B_ASYNC) != 0;
- /*
- * convert starting drum slot to block number
- */
+ /* convert starting drum slot to block number */
startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
/*
@@ -1729,9 +1679,7 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
}
}
- /*
- * encrypt to swap
- */
+ /* encrypt to swap */
if (write && bounce) {
int i, opages;
caddr_t src, dst;
@@ -1852,31 +1800,23 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
splx(s);
}
- /*
- * for async ops we must set up the iodone handler.
- */
+ /* for async ops we must set up the iodone handler. */
if (async) {
bp->b_flags |= B_CALL | (curproc == uvm.pagedaemon_proc ?
B_PDAEMON : 0);
bp->b_iodone = uvm_aio_biodone;
}
- /*
- * now we start the I/O, and if async, return.
- */
+ /* now we start the I/O, and if async, return. */
VOP_STRATEGY(bp);
if (async)
return (VM_PAGER_PEND);
- /*
- * must be sync i/o. wait for it to finish
- */
+ /* must be sync i/o. wait for it to finish */
(void) biowait(bp);
result = (bp->b_flags & B_ERROR) ? VM_PAGER_ERROR : VM_PAGER_OK;
- /*
- * decrypt swap
- */
+ /* decrypt swap */
if (!write && !(bp->b_flags & B_ERROR)) {
int i;
caddr_t data = (caddr_t)kva;
@@ -1913,20 +1853,14 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
if (bounce)
uvm_pagermapout(bouncekva, npages);
}
- /*
- * kill the pager mapping
- */
+ /* kill the pager mapping */
uvm_pagermapout(kva, npages);
- /*
- * Not anymore needed, free after encryption/bouncing
- */
+ /* Not anymore needed, free after encryption/bouncing */
if (!write && bounce)
uvm_swap_freepages(tpps, npages);
- /*
- * now dispose of the buf
- */
+ /* now dispose of the buf */
s = splbio();
if (bp->b_vp)
brelvp(bp);
@@ -1936,9 +1870,7 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
pool_put(&bufpool, bp);
splx(s);
- /*
- * finally return.
- */
+ /* finally return. */
return (result);
}
diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c
index 1aa9fcebdb4..f885c8a5a47 100644
--- a/sys/uvm/uvm_unix.c
+++ b/sys/uvm/uvm_unix.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_unix.c,v 1.48 2014/03/26 05:23:42 guenther Exp $ */
+/* $OpenBSD: uvm_unix.c,v 1.49 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $ */
/*
@@ -86,9 +86,7 @@ sys_obreak(struct proc *p, void *v, register_t *retval)
if (new == old)
return (0);
- /*
- * grow or shrink?
- */
+ /* grow or shrink? */
if (new > old) {
error = uvm_map(&vm->vm_map, &old, new - old, NULL,
UVM_UNKNOWN_OFFSET, 0,
@@ -112,22 +110,17 @@ sys_obreak(struct proc *p, void *v, register_t *retval)
/*
* uvm_grow: enlarge the "stack segment" to include sp.
*/
-
void
uvm_grow(struct proc *p, vaddr_t sp)
{
struct vmspace *vm = p->p_vmspace;
int si;
- /*
- * For user defined stacks (from sendsig).
- */
+ /* For user defined stacks (from sendsig). */
if (sp < (vaddr_t)vm->vm_maxsaddr)
return;
- /*
- * For common case of already allocated (from trap).
- */
+ /* For common case of already allocated (from trap). */
#ifdef MACHINE_STACK_GROWS_UP
if (sp < USRSTACK + ptoa(vm->vm_ssize))
#else
@@ -135,9 +128,7 @@ uvm_grow(struct proc *p, vaddr_t sp)
#endif
return;
- /*
- * Really need to check vs limit and increment stack size if ok.
- */
+ /* Really need to check vs limit and increment stack size if ok. */
#ifdef MACHINE_STACK_GROWS_UP
si = atop(sp - USRSTACK) - vm->vm_ssize + 1;
#else
@@ -177,9 +168,7 @@ uvm_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
entry->start != p->p_p->ps_sigcode)
continue;
- /*
- * Don't dump mmaped devices.
- */
+ /* Don't dump mmaped devices. */
if (entry->object.uvm_obj != NULL &&
UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
continue;
@@ -214,9 +203,7 @@ uvm_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
} else
flag = CORE_DATA;
- /*
- * Set up a new core file segment.
- */
+ /* Set up a new core file segment. */
CORE_SETMAGIC(cseg, CORESEGMAGIC, CORE_GETMID(*chdr), flag);
cseg.c_addr = start;
cseg.c_size = end - start;
@@ -290,9 +277,7 @@ uvm_coredump_walkmap(struct proc *p, void *iocookie,
entry->start != p->p_p->ps_sigcode)
continue;
- /*
- * Don't dump mmaped devices.
- */
+ /* Don't dump mmaped devices. */
if (entry->object.uvm_obj != NULL &&
UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
continue;
diff --git a/sys/uvm/uvm_user.c b/sys/uvm/uvm_user.c
index 6875b554d00..c2ac52b676a 100644
--- a/sys/uvm/uvm_user.c
+++ b/sys/uvm/uvm_user.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_user.c,v 1.11 2007/04/27 17:01:54 art Exp $ */
+/* $OpenBSD: uvm_user.c,v 1.12 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_user.c,v 1.8 2000/06/27 17:29:37 mrg Exp $ */
/*
@@ -49,7 +49,6 @@
/*
* uvm_deallocate: deallocate memory (unmap)
*/
-
void
uvm_deallocate(struct vm_map *map, vaddr_t start, vsize_t size)
{
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index 78fa81fdee2..a67c01e5295 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_vnode.c,v 1.80 2014/04/08 18:48:41 beck Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.81 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
@@ -83,7 +83,6 @@ struct rwlock uvn_sync_lock; /* locks sync operation */
/*
* functions
*/
-
void uvn_cluster(struct uvm_object *, voff_t, voff_t *, voff_t *);
void uvn_detach(struct uvm_object *);
boolean_t uvn_flush(struct uvm_object *, voff_t, voff_t, int);
@@ -97,7 +96,6 @@ void uvn_reference(struct uvm_object *);
/*
* master pager structure
*/
-
struct uvm_pagerops uvm_vnodeops = {
uvn_init,
uvn_reference,
@@ -113,13 +111,11 @@ struct uvm_pagerops uvm_vnodeops = {
/*
* the ops!
*/
-
/*
* uvn_init
*
* init pager private data structures.
*/
-
void
uvn_init(void)
{
@@ -142,7 +138,6 @@ uvn_init(void)
* => note that uvm_object is first thing in vnode structure, so their
* pointers are equiv.
*/
-
struct uvm_object *
uvn_attach(void *arg, vm_prot_t accessprot)
{
@@ -155,17 +150,13 @@ uvn_attach(void *arg, vm_prot_t accessprot)
used_vnode_size = (u_quad_t)0; /* XXX gcc -Wuninitialized */
- /*
- * first get a lock on the uvn.
- */
+ /* first get a lock on the uvn. */
while (uvn->u_flags & UVM_VNODE_BLOCKED) {
uvn->u_flags |= UVM_VNODE_WANTED;
UVM_WAIT(uvn, FALSE, "uvn_attach", 0);
}
- /*
- * if we're mapping a BLK device, make sure it is a disk.
- */
+ /* if we're mapping a BLK device, make sure it is a disk. */
if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
return(NULL);
}
@@ -243,9 +234,7 @@ uvn_attach(void *arg, vm_prot_t accessprot)
printf("used_vnode_size = %llu\n", (long long)used_vnode_size);
#endif
- /*
- * now set up the uvn.
- */
+ /* now set up the uvn. */
uvm_objinit(&uvn->u_obj, &uvm_vnodeops, 1);
oldflags = uvn->u_flags;
uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST;
@@ -321,10 +310,7 @@ uvn_detach(struct uvm_object *uobj)
return;
}
- /*
- * get other pointers ...
- */
-
+ /* get other pointers ... */
uvn = (struct uvm_vnode *) uobj;
vp = (struct vnode *) uobj;
@@ -338,7 +324,6 @@ uvn_detach(struct uvm_object *uobj)
* we just dropped the last reference to the uvn. see if we can
* let it "stick around".
*/
-
if (uvn->u_flags & UVM_VNODE_CANPERSIST) {
/* won't block */
uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES);
@@ -346,10 +331,7 @@ uvn_detach(struct uvm_object *uobj)
return;
}
- /*
- * its a goner!
- */
-
+ /* its a goner! */
uvn->u_flags |= UVM_VNODE_DYING;
/*
@@ -361,7 +343,6 @@ uvn_detach(struct uvm_object *uobj)
* pageout by the daemon. (there can't be any pending "get"'s
* because there are no references to the object).
*/
-
(void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES);
/*
@@ -372,7 +353,6 @@ uvn_detach(struct uvm_object *uobj)
* state, so that in the case the vnode gets terminated we know
* to leave it alone. Otherwise we'll kill the vnode when it's empty.
*/
-
uvn->u_flags |= UVM_VNODE_RELKILL;
/* wait on any outstanding io */
while (uobj->uo_npages && uvn->u_flags & UVM_VNODE_RELKILL) {
@@ -398,9 +378,7 @@ uvn_detach(struct uvm_object *uobj)
if (oldflags & UVM_VNODE_WANTED)
wakeup(uvn);
- /*
- * drop our reference to the vnode.
- */
+ /* drop our reference to the vnode. */
vrele(vp);
return;
@@ -431,16 +409,13 @@ uvn_detach(struct uvm_object *uobj)
* ops will fail (due to the backing vnode now being "dead"). this
* will prob. kill any process using the uvn due to pgo_get failing.
*/
-
void
uvm_vnp_terminate(struct vnode *vp)
{
struct uvm_vnode *uvn = &vp->v_uvm;
int oldflags;
- /*
- * check if it is valid
- */
+ /* check if it is valid */
if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
return;
}
@@ -451,7 +426,6 @@ uvm_vnp_terminate(struct vnode *vp)
* because it is valid, and uvn's that are in the ALOCK state haven't
* been marked valid yet.
*/
-
#ifdef DEBUG
/*
* debug check: are we yanking the vnode out from under our uvn?
@@ -470,7 +444,6 @@ uvm_vnp_terminate(struct vnode *vp)
* away. note that a uvn can only be in the RELKILL state if it
* has a zero reference count.
*/
-
if (uvn->u_flags & UVM_VNODE_RELKILL)
uvn->u_flags &= ~UVM_VNODE_RELKILL; /* cancel RELKILL */
@@ -494,7 +467,6 @@ uvm_vnp_terminate(struct vnode *vp)
* as we just did a flush we expect all the pages to be gone or in
* the process of going. sleep to wait for the rest to go [via iosync].
*/
-
while (uvn->u_obj.uo_npages) {
#ifdef DEBUG
struct vm_page *pp;
@@ -520,20 +492,15 @@ uvm_vnp_terminate(struct vnode *vp)
* terminating a uvn with active mappings we let it live ... future
* calls down to the vnode layer will fail.
*/
-
oldflags = uvn->u_flags;
if (uvn->u_obj.uo_refs) {
-
/*
* uvn must live on it is dead-vnode state until all references
* are gone. restore flags. clear CANPERSIST state.
*/
-
uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED|
UVM_VNODE_WANTED|UVM_VNODE_CANPERSIST);
-
} else {
-
/*
* free the uvn now. note that the vref reference is already
* gone [it is dropped when we enter the persist state].
@@ -587,7 +554,6 @@ uvm_vnp_terminate(struct vnode *vp)
* dont forget to look at "object" wanted flag in all cases.
*/
-
/*
* uvn_flush: flush pages out of a uvm object.
*
@@ -616,7 +582,6 @@ uvm_vnp_terminate(struct vnode *vp)
* off (i.e. we need to do an iosync). also note that once a
* page is PG_BUSY it must stay in its object until it is un-busyed.
*/
-
#define UVN_HASH_PENALTY 4 /* XXX: a guess */
boolean_t
@@ -629,10 +594,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
boolean_t retval, need_iosync, needs_clean;
voff_t curoff;
- /*
- * get init vals and determine how we are going to traverse object
- */
-
+ /* get init vals and determine how we are going to traverse object */
need_iosync = FALSE;
retval = TRUE; /* return value */
if (flags & PGO_ALLPAGES) {
@@ -653,7 +615,6 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
* bit is always up to date since there are no mappings).
* [borrowed PG_CLEANCHK idea from FreeBSD VM]
*/
-
if ((flags & PGO_CLEANIT) != 0) {
KASSERT(uobj->pgops->pgo_mk_pcluster != 0);
for (curoff = start ; curoff < stop; curoff += PAGE_SIZE) {
@@ -669,7 +630,6 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
for (curoff = start; curoff < stop; curoff += PAGE_SIZE) {
if ((pp = uvm_pagelookup(uobj, curoff)) == NULL)
continue;
-
/*
* handle case where we do not need to clean page (either
* because we are not clean or because page is not dirty or
@@ -680,7 +640,6 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
* queue it must stay put until it is !PG_BUSY (so as not to
* confuse pagedaemon).
*/
-
if ((flags & PGO_CLEANIT) == 0 || (pp->pg_flags & PG_BUSY) != 0) {
needs_clean = FALSE;
if ((pp->pg_flags & PG_BUSY) != 0 &&
@@ -704,9 +663,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
needs_clean = ((pp->pg_flags & PG_CLEAN) == 0);
}
- /*
- * if we don't need a clean... deactivate/free pages then cont.
- */
+ /* if we don't need a clean, deactivate/free pages then cont. */
if (!needs_clean) {
if (flags & PGO_DEACTIVATE) {
if (pp->wire_count == 0) {
@@ -739,7 +696,6 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
* let uvm_pager_put attempted a clustered page out.
* note: locked: page queues.
*/
-
atomic_setbits_int(&pp->pg_flags, PG_BUSY);
UVM_PAGE_OWN(pp, "uvn_flush");
pmap_page_protect(pp, VM_PROT_READ);
@@ -768,7 +724,6 @@ ReTry:
* map the pages into kernel memory (pager_map) due to lack
* of vm space. if this happens we drop back to sync I/O.
*/
-
if (result == VM_PAGER_AGAIN) {
/*
* it is unlikely, but page could have been released
@@ -795,7 +750,6 @@ ReTry:
* if success (OK, PEND) then uvm_pager_put returns the cluster
* to us in ppsp/npages.
*/
-
/*
* for pending async i/o if we are not deactivating
* we can move on to the next page. aiodoned deals with
@@ -808,10 +762,8 @@ ReTry:
* need to look at each page of the I/O operation, and do what
* we gotta do.
*/
-
for (lcv = 0 ; lcv < npages; lcv++) {
ptmp = ppsp[lcv];
-
/*
* verify the page didn't get moved
*/
@@ -824,7 +776,6 @@ ReTry:
* finished
* (in which case the page is no longer busy).
*/
-
if (result != VM_PAGER_PEND) {
if (ptmp->pg_flags & PG_WANTED)
wakeup(ptmp);
@@ -838,10 +789,7 @@ ReTry:
pmap_clear_modify(ptmp);
}
- /*
- * dispose of page
- */
-
+ /* dispose of page */
if (flags & PGO_DEACTIVATE) {
if (ptmp->wire_count == 0) {
pmap_page_protect(ptmp, VM_PROT_NONE);
@@ -868,14 +816,10 @@ ReTry:
} /* end of "pp" for loop */
- /*
- * done with pagequeues: unlock
- */
+ /* done with pagequeues: unlock */
uvm_unlock_pageq();
- /*
- * now wait for all I/O if required.
- */
+ /* now wait for all I/O if required. */
if (need_iosync) {
while (uvn->u_nio != 0) {
uvn->u_flags |= UVM_VNODE_IOSYNC;
@@ -926,7 +870,6 @@ uvn_cluster(struct uvm_object *uobj, voff_t offset, voff_t *loffset,
* => XXX: currently we use VOP_READ/VOP_WRITE which are only sync.
* [thus we never do async i/o! see iodone comment]
*/
-
int
uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
{
@@ -937,7 +880,6 @@ uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
return(retval);
}
-
/*
* uvn_get: get pages (synchronously) from backing store
*
@@ -947,7 +889,6 @@ uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
* => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
* => NOTE: caller must check for released pages!!
*/
-
int
uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
@@ -957,17 +898,12 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
int lcv, result, gotpages;
boolean_t done;
- /*
- * step 1: handled the case where fault data structures are locked.
- */
-
+ /* step 1: handled the case where fault data structures are locked. */
if (flags & PGO_LOCKED) {
-
/*
* gotpages is the current number of pages we've gotten (which
* we pass back up to caller via *npagesp.
*/
-
gotpages = 0;
/*
@@ -975,7 +911,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* if the data structures are locked (i.e. the first time
* through).
*/
-
done = TRUE; /* be optimistic */
for (lcv = 0, current_offset = offset ; lcv < *npagesp ;
@@ -1006,7 +941,7 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
pps[lcv] = ptmp;
gotpages++;
- } /* "for" lcv loop */
+ }
/*
* XXX: given the "advice", should we consider async read-ahead?
@@ -1040,7 +975,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* page at a time (otherwise we'd chunk). the VOP_READ() will do
* async-read-ahead for us at a lower level.
*/
-
for (lcv = 0, current_offset = offset;
lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) {
@@ -1065,15 +999,12 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* then it means that we allocated a new busy/fake/clean page
* ptmp in the object and we need to do I/O to fill in the data.
*/
-
while (pps[lcv] == NULL) { /* top of "pps" while loop */
-
/* look for a current page */
ptmp = uvm_pagelookup(uobj, current_offset);
/* nope? allocate one now (if we can) */
if (ptmp == NULL) {
-
ptmp = uvm_pagealloc(uobj, current_offset,
NULL, 0);
@@ -1115,7 +1046,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* will point to it. nothing more to do except go to the
* next page.
*/
-
if (pps[lcv])
continue; /* next lcv */
@@ -1123,7 +1053,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* we have a "fake/busy/clean" page that we just allocated. do
* I/O to fill it with valid data.
*/
-
result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1,
PGO_SYNCIO, UIO_READ);
@@ -1131,7 +1060,6 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* I/O done. because we used syncio the result can not be
* PEND or AGAIN.
*/
-
if (result != VM_PAGER_OK) {
if (ptmp->pg_flags & PG_WANTED)
wakeup(ptmp);
@@ -1161,7 +1089,7 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
pmap_clear_modify(ptmp); /* ... and clean */
pps[lcv] = ptmp;
- } /* lcv loop */
+ }
return (VM_PAGER_OK);
}
@@ -1186,18 +1114,12 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
int waitf, result, mapinflags;
size_t got, wanted;
- /*
- * init values
- */
-
+ /* init values */
waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT;
vn = (struct vnode *) uvn;
file_offset = pps[0]->offset;
- /*
- * check for sync'ing I/O.
- */
-
+ /* check for sync'ing I/O. */
while (uvn->u_flags & UVM_VNODE_IOSYNC) {
if (waitf == M_NOWAIT) {
return(VM_PAGER_AGAIN);
@@ -1206,18 +1128,12 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
UVM_WAIT(&uvn->u_flags, FALSE, "uvn_iosync", 0);
}
- /*
- * check size
- */
-
+ /* check size */
if (file_offset >= uvn->u_size) {
return(VM_PAGER_BAD);
}
- /*
- * first try and map the pages in (without waiting)
- */
-
+ /* first try and map the pages in (without waiting) */
mapinflags = (rw == UIO_READ) ?
UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE;
@@ -1231,7 +1147,6 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
* and can unlock it. if we still don't have a kva, try again
* (this time with sleep ok).
*/
-
uvn->u_nio++; /* we have an I/O in progress! */
if (kva == 0)
kva = uvm_pagermapin(pps, npages,
@@ -1242,11 +1157,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
* get touched (so we can look at "offset" without having to lock
* the object). set up for I/O.
*/
-
- /*
- * fill out uio/iov
- */
-
+ /* fill out uio/iov */
iov.iov_base = (caddr_t) kva;
wanted = npages << PAGE_SHIFT;
if (file_offset + wanted > uvn->u_size)
@@ -1260,10 +1171,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
uio.uio_resid = wanted;
uio.uio_procp = curproc;
- /*
- * do the I/O! (XXX: curproc?)
- */
-
+ /* do the I/O! (XXX: curproc?) */
/*
* This process may already have this vnode locked, if we faulted in
* copyin() or copyout() on a region backed by this vnode
@@ -1291,13 +1199,11 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
}
/* NOTE: vnode now unlocked (unless vnislocked) */
-
/*
* result == unix style errno (0 == OK!)
*
* zero out rest of buffer (if needed)
*/
-
if (result == 0) {
got = wanted - uio.uio_resid;
@@ -1309,23 +1215,15 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
}
}
- /*
- * now remove pager mapping
- */
+ /* now remove pager mapping */
uvm_pagermapout(kva, npages);
- /*
- * now clean up the object (i.e. drop I/O count)
- */
-
+ /* now clean up the object (i.e. drop I/O count) */
uvn->u_nio--; /* I/O DONE! */
if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) {
wakeup(&uvn->u_nio);
}
- /*
- * done!
- */
if (result == 0)
return(VM_PAGER_OK);
else
@@ -1373,9 +1271,7 @@ uvm_vnp_uncache(struct vnode *vp)
{
struct uvm_vnode *uvn = &vp->v_uvm;
- /*
- * lock uvn part of the vnode and check to see if we need to do anything
- */
+ /* lock uvn part of the vnode and check if we need to do anything */
if ((uvn->u_flags & UVM_VNODE_VALID) == 0 ||
(uvn->u_flags & UVM_VNODE_BLOCKED) != 0) {
@@ -1386,7 +1282,6 @@ uvm_vnp_uncache(struct vnode *vp)
* we have a valid, non-blocked uvn. clear persist flag.
* if uvn is currently active we can return now.
*/
-
uvn->u_flags &= ~UVM_VNODE_CANPERSIST;
if (uvn->u_obj.uo_refs) {
return(FALSE);
@@ -1396,7 +1291,6 @@ uvm_vnp_uncache(struct vnode *vp)
* uvn is currently persisting! we have to gain a reference to
* it so that we can call uvn_detach to kill the uvn.
*/
-
vref(vp); /* seems ok, even with VOP_LOCK */
uvn->u_obj.uo_refs++; /* value is now 1 */
@@ -1423,10 +1317,6 @@ uvm_vnp_uncache(struct vnode *vp)
uvn_detach(&uvn->u_obj);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
- /*
- * and return...
- */
-
return(TRUE);
}
@@ -1453,9 +1343,7 @@ uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
{
struct uvm_vnode *uvn = &vp->v_uvm;
- /*
- * lock uvn and check for valid object, and if valid: do it!
- */
+ /* lock uvn and check for valid object, and if valid: do it! */
if (uvn->u_flags & UVM_VNODE_VALID) {
/*
@@ -1469,11 +1357,6 @@ uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
}
uvn->u_size = newsize;
}
-
- /*
- * done
- */
- return;
}
/*
@@ -1504,7 +1387,6 @@ uvm_vnp_sync(struct mount *mp)
*/
SIMPLEQ_INIT(&uvn_sync_q);
LIST_FOREACH(uvn, &uvn_wlist, u_wlist) {
-
vp = (struct vnode *)uvn;
if (mp && vp->v_mount != mp)
continue;
@@ -1520,7 +1402,6 @@ uvm_vnp_sync(struct mount *mp)
if ((uvn->u_flags & UVM_VNODE_BLOCKED) != 0)
continue;
-
/*
* gain reference. watch out for persisting uvns (need to
* regain vnode REF).