diff options
author | Bob Beck <beck@cvs.openbsd.org> | 2012-12-02 19:42:37 +0000 |
---|---|---|
committer | Bob Beck <beck@cvs.openbsd.org> | 2012-12-02 19:42:37 +0000 |
commit | af958412e16e8e024cc5350f5d7b62e52a9e15fb (patch) | |
tree | e06851d8920eac95296a73127aae703994547281 | |
parent | c3c02a2d6fd5229881e56981e52ad6a1ff57ff7d (diff) |
Fix kva reserve - ensure that kva reserve is checked for, as well
as fix the case where buffers can be returned on the vinvalbuf path
and we do not get woken up when waiting for kva.
An earlier version looked at and ok'd by guenther@ in coimbra. - helpful
comments from kettenis@
-rw-r--r-- | sys/kern/vfs_bio.c | 15 | ||||
-rw-r--r-- | sys/kern/vfs_biomem.c | 23 | ||||
-rw-r--r-- | sys/sys/buf.h | 11 |
3 files changed, 26 insertions, 23 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index e7caabc65cc..94fe0b88f3f 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_bio.c,v 1.140 2012/12/02 19:34:14 beck Exp $ */ +/* $OpenBSD: vfs_bio.c,v 1.141 2012/12/02 19:42:36 beck Exp $ */ /* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */ /* @@ -101,19 +101,6 @@ long bufbackpages; /* number of pages we back off when asked to shrink */ vsize_t bufkvm; -/* - * RESERVE_SLOTS of kva space, and the corresponding amount - * of buffer pages are reserved for the cleaner and syncer's - * exclusive use. Since we reserve kva slots to map the buffers - * along with the buffer space, this ensures the cleaner and - * syncer can always map and push out buffers if we get low - * on buffer pages or kva space in which to map them. - */ -#define RESERVE_SLOTS 4 -#define RESERVE_PAGES (RESERVE_SLOTS * MAXPHYS / PAGE_SIZE) -#define BCACHE_MIN (RESERVE_PAGES * 2) -#define UNCLEAN_PAGES (bcstats.numbufpages - bcstats.numcleanpages) - struct proc *cleanerproc; int bd_req; /* Sleep point for cleaner daemon. */ diff --git a/sys/kern/vfs_biomem.c b/sys/kern/vfs_biomem.c index 13a8051a515..b8b160338a6 100644 --- a/sys/kern/vfs_biomem.c +++ b/sys/kern/vfs_biomem.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_biomem.c,v 1.20 2012/11/18 16:56:41 beck Exp $ */ +/* $OpenBSD: vfs_biomem.c,v 1.21 2012/12/02 19:42:36 beck Exp $ */ /* * Copyright (c) 2007 Artur Grabowski <art@openbsd.org> * @@ -30,8 +30,6 @@ vaddr_t buf_kva_start, buf_kva_end; int buf_needva; TAILQ_HEAD(,buf) buf_valist; -int buf_nkvmsleep; - extern struct bcachestats bcstats; /* @@ -135,10 +133,14 @@ buf_map(struct buf *bp) /* * Find some buffer we can steal the space from. */ - while ((vbp = TAILQ_FIRST(&buf_valist)) == NULL) { + vbp = TAILQ_FIRST(&buf_valist); + while ((curproc != syncerproc && + curproc != cleanerproc && + bcstats.kvaslots_avail <= RESERVE_SLOTS) || + vbp == NULL) { buf_needva++; - buf_nkvmsleep++; tsleep(&buf_needva, PRIBIO, "buf_needva", 0); + vbp = TAILQ_FIRST(&buf_valist); } va = buf_unmap(vbp); } @@ -177,8 +179,8 @@ buf_release(struct buf *bp) TAILQ_INSERT_TAIL(&buf_valist, bp, b_valist); bcstats.kvaslots_avail++; if (buf_needva) { - buf_needva--; - wakeup_one(&buf_needva); + buf_needva=0; + wakeup(&buf_needva); } } CLR(bp->b_flags, B_BUSY|B_NOTMAPPED); @@ -222,8 +224,13 @@ buf_dealloc_mem(struct buf *bp) if (!(bp->b_flags & B_BUSY)) { /* XXX - need better test */ TAILQ_REMOVE(&buf_valist, bp, b_valist); bcstats.kvaslots_avail--; - } else + } else { CLR(bp->b_flags, B_BUSY); + if (buf_needva) { + buf_needva = 0; + wakeup(&buf_needva); + } + } SET(bp->b_flags, B_RELEASED); TAILQ_INSERT_HEAD(&buf_valist, bp, b_valist); bcstats.kvaslots_avail++; diff --git a/sys/sys/buf.h b/sys/sys/buf.h index eadae4541a8..0f036cd81b9 100644 --- a/sys/sys/buf.h +++ b/sys/sys/buf.h @@ -1,4 +1,4 @@ -/* $OpenBSD: buf.h,v 1.81 2012/11/17 23:08:22 beck Exp $ */ +/* $OpenBSD: buf.h,v 1.82 2012/12/02 19:42:36 beck Exp $ */ /* $NetBSD: buf.h,v 1.25 1997/04/09 21:12:17 mycroft Exp $ */ /* @@ -281,6 +281,15 @@ struct cluster_info { #ifdef _KERNEL __BEGIN_DECLS +/* Kva slots (of size MAXPHYS) reserved for syncer and cleaner. */ +#define RESERVE_SLOTS 4 +/* Buffer cache pages reserved for syncer and cleaner. */ +#define RESERVE_PAGES (RESERVE_SLOTS * MAXPHYS / PAGE_SIZE) +/* Minimum size of the buffer cache, in pages. */ +#define BCACHE_MIN (RESERVE_PAGES * 2) +#define UNCLEAN_PAGES (bcstats.numbufpages - bcstats.numcleanpages) + +extern struct proc *cleanerproc; extern long bufpages; /* Max number of pages for buffers' data */ extern struct pool bufpool; extern struct bufhead bufhead; |