summaryrefslogtreecommitdiff
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
authorPedro Martelletto <pedro@cvs.openbsd.org>2005-10-08 16:36:24 +0000
committerPedro Martelletto <pedro@cvs.openbsd.org>2005-10-08 16:36:24 +0000
commit155e88756434b388c0ff6090ec26d78bb6617c86 (patch)
tree8f972332240e25bbfc6e123cad64da46c74745c0 /sys/kern/vfs_bio.c
parent061962915214b37c3513762eceb6899291741a64 (diff)
Revert revision 1.39. Under heavy load, it could cause severe corruption
in the buffer lists by removing a buffer from the hash twice. Problem identified in discussion with Alexander Bluhm <Alexander_Bluhm@genua.de>.
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c56
1 files changed, 16 insertions, 40 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index fc05d3afd09..fa37e42b830 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_bio.c,v 1.77 2005/06/27 22:08:39 pedro Exp $ */
+/* $OpenBSD: vfs_bio.c,v 1.78 2005/10/08 16:36:23 pedro Exp $ */
/* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */
/*-
@@ -102,7 +102,7 @@ struct pool bufpool;
#define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
static __inline struct buf *bio_doread(struct vnode *, daddr_t, int, int);
-int getnewbuf(int slpflag, int slptimeo, struct buf **);
+struct buf *getnewbuf(int slpflag, int slptimeo);
/*
* We keep a few counters to monitor the utilization of the buffer cache
@@ -598,7 +598,7 @@ struct buf *
getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
{
struct bufhashhdr *bh;
- struct buf *bp, *nbp = NULL;
+ struct buf *bp;
int s, err;
/*
@@ -638,22 +638,13 @@ start:
}
if (bp == NULL) {
- if (nbp == NULL && getnewbuf(slpflag, slptimeo, &nbp) != 0) {
+ if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
goto start;
- }
- bp = nbp;
binshash(bp, bh);
bp->b_blkno = bp->b_lblkno = blkno;
s = splbio();
bgetvp(vp, bp);
splx(s);
- } else if (nbp != NULL) {
- /*
- * Set B_AGE so that buffer appear at BQ_CLEAN head
- * and gets reused ASAP.
- */
- SET(nbp->b_flags, B_AGE);
- brelse(nbp);
}
allocbuf(bp, size);
@@ -668,7 +659,8 @@ geteblk(int size)
{
struct buf *bp;
- getnewbuf(0, 0, &bp);
+ while ((bp = getnewbuf(0, 0)) == 0)
+ ;
SET(bp->b_flags, B_INVAL);
binshash(bp, &invalhash);
allocbuf(bp, size);
@@ -707,7 +699,8 @@ allocbuf(struct buf *bp, int size)
int amt;
/* find a buffer */
- getnewbuf(0, 0, &nbp);
+ while ((nbp = getnewbuf(0, 0)) == NULL)
+ ;
SET(nbp->b_flags, B_INVAL);
binshash(nbp, &invalhash);
@@ -765,21 +758,13 @@ out:
/*
* Find a buffer which is available for use.
- *
- * We must notify getblk if we slept during the buffer allocation. When
- * that happens, we allocate a buffer anyway (unless tsleep is interrupted
- * or times out) and return !0.
*/
-int
-getnewbuf(int slpflag, int slptimeo, struct buf **bpp)
+struct buf *
+getnewbuf(int slpflag, int slptimeo)
{
struct buf *bp;
- int s, ret, error;
-
- *bpp = NULL;
- ret = 0;
+ int s;
-start:
s = splbio();
/*
* Wake up cleaner if we're getting low on buffers.
@@ -790,24 +775,16 @@ start:
if ((numcleanpages <= locleanpages) &&
curproc != syncerproc && curproc != cleanerproc) {
needbuffer++;
- error = tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf",
- slptimeo);
+ tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
splx(s);
- if (error)
- return (1);
- ret = 1;
- goto start;
+ return (0);
}
if ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) == NULL) {
/* wait for a free buffer of any kind */
nobuffers = 1;
- error = tsleep(&nobuffers, slpflag|(PRIBIO-3),
- "getnewbuf", slptimeo);
+ tsleep(&nobuffers, slpflag|(PRIBIO-3), "getnewbuf", slptimeo);
splx(s);
- if (error)
- return (1);
- ret = 1;
- goto start;
+ return (0);
}
bremfree(bp);
@@ -844,8 +821,7 @@ start:
bp->b_validoff = bp->b_validend = 0;
bremhash(bp);
- *bpp = bp;
- return (ret);
+ return (bp);
}
/*