summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_pdaemon.c
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2014-04-13 23:14:16 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2014-04-13 23:14:16 +0000
commit8c056ce52ed8926ad8c2f8c92d2f8fdfaaf5e2d9 (patch)
tree63ebc99133093232a5ed420c9b08e8c960fcd642 /sys/uvm/uvm_pdaemon.c
parentf7270b4ff1a063635837af701d72a8ec3b6c5b65 (diff)
compress code by turning four line comments into one line comments.
emphatic ok usual suspects, grudging ok miod
Diffstat (limited to 'sys/uvm/uvm_pdaemon.c')
-rw-r--r--sys/uvm/uvm_pdaemon.c90
1 files changed, 13 insertions, 77 deletions
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 42fbc56018c..5fc88e8c8dd 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.66 2014/02/06 16:40:40 tedu Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.67 2014/04/13 23:14:15 tedu Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -112,10 +112,7 @@ uvm_wait(const char *wmsg)
{
int timo = 0;
- /*
- * check for page daemon going to sleep (waiting for itself)
- */
-
+ /* check for page daemon going to sleep (waiting for itself) */
if (curproc == uvm.pagedaemon_proc) {
printf("uvm_wait emergency bufbackoff\n");
if (bufbackoff(NULL, 4) == 0)
@@ -186,7 +183,6 @@ uvmpd_tune(void)
/*
* uvm_pageout: the main loop for the pagedaemon
*/
-
void
uvm_pageout(void *arg)
{
@@ -195,10 +191,7 @@ uvm_pageout(void *arg)
int work_done;
int npages = 0;
- /*
- * ensure correct priority and set paging parameters...
- */
-
+ /* ensure correct priority and set paging parameters... */
uvm.pagedaemon_proc = curproc;
(void) spl0();
uvm_lock_pageq();
@@ -206,10 +199,6 @@ uvm_pageout(void *arg)
uvmpd_tune();
uvm_unlock_pageq();
- /*
- * main loop
- */
-
for (;;) {
long size;
work_done = 0; /* No work done this iteration. */
@@ -230,10 +219,7 @@ uvm_pageout(void *arg)
uvm_unlock_fpageq();
- /*
- * now lock page queues and recompute inactive count
- */
-
+ /* now lock page queues and recompute inactive count */
uvm_lock_pageq();
if (npages != uvmexp.npages) { /* check for new pages? */
npages = uvmexp.npages;
@@ -245,9 +231,7 @@ uvm_pageout(void *arg)
uvmexp.inactarg = uvmexp.freetarg + 1;
}
- /*
- * Reclaim pages from the buffer cache if possible.
- */
+ /* Reclaim pages from the buffer cache if possible. */
size = 0;
if (pma != NULL)
size += pma->pm_size >> PAGE_SHIFT;
@@ -256,9 +240,7 @@ uvm_pageout(void *arg)
BUFPAGES_DEFICIT);
(void) bufbackoff(&constraint, size * 2);
- /*
- * Scan if needed to meet our targets.
- */
+ /* Scan if needed to meet our targets. */
if (pma != NULL ||
((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) ||
((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) {
@@ -289,10 +271,7 @@ uvm_pageout(void *arg)
}
uvm_unlock_fpageq();
- /*
- * scan done. unlock page queues (the only lock we are holding)
- */
-
+ /* scan done. unlock page queues (only lock we are holding) */
uvm_unlock_pageq();
}
/*NOTREACHED*/
@@ -302,7 +281,6 @@ uvm_pageout(void *arg)
/*
* uvm_aiodone_daemon: main loop for the aiodone daemon.
*/
-
void
uvm_aiodone_daemon(void *arg)
{
@@ -312,7 +290,6 @@ uvm_aiodone_daemon(void *arg)
uvm.aiodoned_proc = curproc;
for (;;) {
-
/*
* Check for done aio structures. If we've got structures to
* process, do so. Otherwise sleep while avoiding races.
@@ -325,10 +302,7 @@ uvm_aiodone_daemon(void *arg)
TAILQ_INIT(&uvm.aio_done);
mtx_leave(&uvm.aiodoned_lock);
- /*
- * process each i/o that's done.
- */
-
+ /* process each i/o that's done. */
free = uvmexp.free;
while (bp != NULL) {
if (bp->b_flags & B_PDAEMON) {
@@ -383,30 +357,25 @@ uvmpd_scan_inactive(struct pglist *pglst)
* swap cluster in "swpps" rather than in pps (allows us to mix
* clustering types in the event of a mixed inactive queue).
*/
-
/*
* swslot is non-zero if we are building a swap cluster. we want
* to stay in the loop while we have a page to scan or we have
* a swap-cluster to build.
*/
-
swslot = 0;
swnpages = swcpages = 0;
free = 0;
dirtyreacts = 0;
for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
-
/*
* note that p can be NULL iff we have traversed the whole
* list and need to do one final swap-backed clustered pageout.
*/
-
uobj = NULL;
anon = NULL;
if (p) {
-
/*
* update our copy of "free" and see if we've met
* our target
@@ -428,7 +397,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
if (p) { /* if (we have a new page to consider) */
-
/*
* we are below target and have a new page to consider.
*/
@@ -466,7 +434,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* if the page is ownerless, claim it in the
* name of "anon"!
*/
-
if ((p->pg_flags & PQ_ANON) == 0) {
KASSERT(p->loan_count > 0);
p->loan_count--;
@@ -496,7 +463,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* the page is not busy. if the page is clean we
* can free it now and continue.
*/
-
if (p->pg_flags & PG_CLEAN) {
if (p->pg_flags & PQ_SWAPBACKED) {
/* this page now lives only in swap */
@@ -527,7 +493,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* this page is dirty, skip it if we'll have met our
* free target when all the current pageouts complete.
*/
-
if (free + uvmexp.paging > uvmexp.freetarg << 2) {
continue;
}
@@ -538,7 +503,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* reactivate it so that we eventually cycle
* all pages thru the inactive queue.
*/
-
KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
if ((p->pg_flags & PQ_SWAPBACKED) &&
uvmexp.swpgonly == uvmexp.swpages) {
@@ -552,7 +516,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
* is full, free any swap allocated to the page
* so that other pages can be paged out.
*/
-
KASSERT(uvmexp.swpginuse <= uvmexp.swpages);
if ((p->pg_flags & PQ_SWAPBACKED) &&
uvmexp.swpginuse == uvmexp.swpages) {
@@ -587,13 +550,8 @@ uvmpd_scan_inactive(struct pglist *pglst)
* for swap-backed pages we need to (re)allocate
* swap space.
*/
-
if (swap_backed) {
-
- /*
- * free old swap slot (if any)
- */
-
+ /* free old swap slot (if any) */
if (anon) {
if (anon->an_swslot) {
uvm_swap_free(anon->an_swslot,
@@ -605,10 +563,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
p->offset >> PAGE_SHIFT);
}
- /*
- * start new cluster (if necessary)
- */
-
+ /* start new cluster (if necessary) */
if (swslot == 0) {
swnpages = MAXBSIZE >> PAGE_SHIFT;
swslot = uvm_swap_alloc(&swnpages,
@@ -624,10 +579,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
swcpages = 0; /* cluster is empty */
}
- /*
- * add block to cluster
- */
-
+ /* add block to cluster */
swpps[swcpages] = p;
if (anon)
anon->an_swslot = swslot + swcpages;
@@ -638,7 +590,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
swcpages++;
}
} else {
-
/* if p == NULL we must be doing a last swap i/o */
swap_backed = TRUE;
}
@@ -652,7 +603,6 @@ uvmpd_scan_inactive(struct pglist *pglst)
*
* for object pages, we always do the pageout.
*/
-
if (swap_backed) {
if (p) { /* if we just added a page to cluster */
/* cluster not full yet? */
@@ -749,10 +699,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
#endif
- /*
- * clean up "p" if we have one
- */
-
+ /* clean up "p" if we have one */
if (p) {
/*
* the I/O request to "p" is done and uvm_pager_put
@@ -838,19 +785,16 @@ uvmpd_scan_inactive(struct pglist *pglst)
nextpg = TAILQ_FIRST(pglst); /* reload! */
}
} else {
-
/*
* if p is null in this loop, make sure it stays null
* in the next loop.
*/
-
nextpg = NULL;
/*
* lock page queues here just so they're always locked
* at the end of the loop.
*/
-
uvm_lock_pageq();
}
}
@@ -903,7 +847,6 @@ uvmpd_scan(void)
* alternate starting queue between swap and object based on the
* low bit of uvmexp.pdrevs (which we bump by one each call).
*/
-
got_it = FALSE;
pages_freed = uvmexp.pdfreed; /* XXX - int */
if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
@@ -918,14 +861,12 @@ uvmpd_scan(void)
* we have done the scan to get free pages. now we work on meeting
* our inactive target.
*/
-
inactive_shortage = uvmexp.inactarg - uvmexp.inactive - BUFPAGES_INACT;
/*
* detect if we're not going to be able to page anything out
* until we free some swap resources from active pages.
*/
-
swap_shortage = 0;
if (uvmexp.free < uvmexp.freetarg &&
uvmexp.swpginuse == uvmexp.swpages &&
@@ -953,10 +894,7 @@ uvmpd_scan(void)
}
}
- /*
- * skip this page if it's busy.
- */
-
+ /* skip this page if it's busy. */
if ((p->pg_flags & PG_BUSY) != 0) {
continue;
}
@@ -965,7 +903,6 @@ uvmpd_scan(void)
* if there's a shortage of swap, free any swap allocated
* to this page so that other pages can be paged out.
*/
-
if (swap_shortage > 0) {
if ((p->pg_flags & PQ_ANON) && p->uanon->an_swslot) {
uvm_swap_free(p->uanon->an_swslot, 1);
@@ -989,7 +926,6 @@ uvmpd_scan(void)
* deactivate this page if there's a shortage of
* inactive pages.
*/
-
if (inactive_shortage > 0) {
pmap_page_protect(p, VM_PROT_NONE);
/* no need to check wire_count as pg is "active" */