summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorBob Beck <beck@cvs.openbsd.org>2007-05-29 05:28:55 +0000
committerBob Beck <beck@cvs.openbsd.org>2007-05-29 05:28:55 +0000
commit19d1793a620285adccacc8348f6b3e9d88cc23be (patch)
treefead5765e712a758c1451f2a0eeedf7a57692eee /sys/kern
parent96c361a947808e1870f50cf6f8681214195235e2 (diff)
Step one of some vnode improvements - change getnewvnode to
actually allocate "desiredvnodes" - add a vdrop to un-hold a vnode held with vhold, and change the name cache to make use of vhold/vdrop, while keeping track of which vnodes are referred to by which cache entries to correctly hold/drop vnodes when the cache uses them. ok thib@, tedu@, art@
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_cache.c130
-rw-r--r--sys/kern/vfs_getcwd.c16
-rw-r--r--sys/kern/vfs_subr.c93
3 files changed, 147 insertions, 92 deletions
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 1c3cc2e1953..15ca6b7e043 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_cache.c,v 1.21 2007/04/19 09:25:33 pedro Exp $ */
+/* $OpenBSD: vfs_cache.c,v 1.22 2007/05/29 05:28:53 beck Exp $ */
/* $NetBSD: vfs_cache.c,v 1.13 1996/02/04 02:18:09 christos Exp $ */
/*
@@ -69,8 +69,10 @@
* Structures associated with name caching.
*/
LIST_HEAD(nchashhead, namecache) *nchashtbl;
+TAILQ_HEAD (, namecache) ncneq;
u_long nchash; /* size of hash table - 1 */
long numcache; /* number of cache entries allocated */
+long numcachehv; /* number of holding vnodes */
TAILQ_HEAD(, namecache) nclruhead; /* LRU chain */
struct nchstats nchstats; /* cache effectiveness statistics */
@@ -129,10 +131,16 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
ncpp = &nchashtbl[NCHASH(dvp, cnp)];
LIST_FOREACH(ncp, ncpp, nc_hash) {
if (ncp->nc_dvp == dvp &&
- ncp->nc_dvpid == dvp->v_id &&
ncp->nc_nlen == cnp->cn_namelen &&
- !memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
- break;
+ !memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen)) {
+#ifdef DIAGNOSTIC
+ if (ncp->nc_dvpid != dvp->v_id)
+ panic("ncp->nc_dvpid %x != dvp->v_id %x\n",
+ ncp->nc_dvpid, dvp->v_id);
+ else
+#endif
+ break;
+ }
}
if (ncp == NULL) {
nchstats.ncs_miss++;
@@ -159,6 +167,8 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
goto remove;
}
} else if (ncp->nc_vpid != ncp->nc_vp->v_id) {
+ panic("ncp->vpid %x != ncp->nc_vp->v_id %x\n", ncp->nc_vpid,
+ ncp->nc_vp->v_id);
nchstats.ncs_falsehits++;
goto remove;
}
@@ -201,6 +211,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
*/
if (error || vpid != vp->v_id) {
if (!error) {
+ panic("vpid %x != vp->vid %x\n", vpid, vp->v_id);
vput(vp);
nchstats.ncs_falsehits++;
} else
@@ -236,17 +247,7 @@ remove:
* the cache entry is invalid, or otherwise don't
* want cache entry to exist.
*/
- TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
- LIST_REMOVE(ncp, nc_hash);
- ncp->nc_hash.le_prev = NULL;
-
- if (ncp->nc_vhash.le_prev != NULL) {
- LIST_REMOVE(ncp, nc_vhash);
- ncp->nc_vhash.le_prev = NULL;
- }
-
- pool_put(&nch_pool, ncp);
- numcache--;
+ cache_delete(ncp);
return (-1);
}
@@ -280,12 +281,13 @@ cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
LIST_FOREACH(ncp, nvcpp, nc_vhash) {
if (ncp->nc_vp == vp &&
- ncp->nc_vpid == vp->v_id &&
(dvp = ncp->nc_dvp) != NULL &&
/* avoid pesky '.' entries.. */
dvp != vp && ncp->nc_dvpid == dvp->v_id) {
-
#ifdef DIAGNOSTIC
+ if (ncp->nc_vpid != vp->v_id)
+ panic("ncp->nc_vpid %x != vp->v_id %x\n",
+ ncp->nc_vpid, vp->v_id);
if (ncp->nc_nlen == 1 &&
ncp->nc_name[0] == '.')
panic("cache_revlookup: found entry for .");
@@ -338,6 +340,7 @@ cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
struct namecache *ncp;
struct nchashhead *ncpp;
struct ncvhashhead *nvcpp;
+ int hold = 0;
if (!doingcache || cnp->cn_namelen > NCHNAMLEN)
return;
@@ -345,22 +348,13 @@ cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
/*
* Free the cache slot at head of lru chain.
*/
- if (numcache < desiredvnodes) {
- ncp = pool_get(&nch_pool, PR_WAITOK);
- bzero((char *)ncp, sizeof *ncp);
- numcache++;
- } else if ((ncp = TAILQ_FIRST(&nclruhead)) != NULL) {
- TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
- if (ncp->nc_hash.le_prev != NULL) {
- LIST_REMOVE(ncp, nc_hash);
- ncp->nc_hash.le_prev = NULL;
- }
- if (ncp->nc_vhash.le_prev != NULL) {
- LIST_REMOVE(ncp, nc_vhash);
- ncp->nc_vhash.le_prev = NULL;
- }
- } else
- return;
+ while ((numcache >= desiredvnodes) &&
+ ((ncp = TAILQ_FIRST(&nclruhead)) != NULL)){
+ cache_delete(ncp);
+ }
+ ncp = pool_get(&nch_pool, PR_WAITOK);
+ bzero((char *)ncp, sizeof *ncp);
+ numcache++;
/* grab the vnode we just found */
ncp->nc_vp = vp;
if (vp)
@@ -374,6 +368,15 @@ cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
ncpp = &nchashtbl[NCHASH(dvp, cnp)];
LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
+ /* record references to us in various vnodes... */
+ if (LIST_EMPTY(&dvp->v_cache_src)) {
+ hold = 1;
+ numcachehv++;
+ }
+ LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
+ if (vp)
+ TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
+
/*
* Create reverse-cache entries (used in getcwd) for
* directories.
@@ -389,6 +392,8 @@ cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
nvcpp = &ncvhashtbl[NCVHASH(vp)];
LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
}
+ if (hold)
+ vhold(dvp);
}
/*
@@ -397,7 +402,6 @@ cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
void
nchinit(void)
{
-
TAILQ_INIT(&nclruhead);
nchashtbl = hashinit(desiredvnodes, M_CACHE, M_WAITOK, &nchash);
ncvhashtbl = hashinit(desiredvnodes/8, M_CACHE, M_WAITOK, &ncvhash);
@@ -405,6 +409,44 @@ nchinit(void)
&pool_allocator_nointr);
}
+void cache_delete (struct namecache *ncp)
+{
+ /*
+ * just make it go away...
+ */
+ struct vnode *vp = NULL;
+ /*
+ * XXX abuse of the queue macro internals is endemic to this
+ * and needs fixing - for now this is inherited from earlier evil.
+ */
+ if (ncp->nc_lru.tqe_prev != NULL) {
+ TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
+ ncp->nc_lru.tqe_prev = NULL;
+ }
+ if (ncp->nc_hash.le_prev != NULL) {
+ LIST_REMOVE(ncp, nc_hash);
+ ncp->nc_hash.le_prev = NULL;
+ }
+ if (ncp->nc_vhash.le_prev != NULL) {
+ LIST_REMOVE(ncp, nc_vhash);
+ ncp->nc_vhash.le_prev = NULL;
+ }
+ LIST_REMOVE(ncp, nc_src);
+ if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
+ vp = ncp->nc_dvp;
+ numcachehv--;
+ }
+ if (ncp->nc_vp)
+ TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
+ ncp->nc_vp = NULL;
+ ncp->nc_dvp = NULL;
+ pool_put(&nch_pool, ncp);
+ numcache--;
+ if (vp) {
+ vdrop(vp);
+ }
+}
+
/*
* Cache flush, a particular vnode; called when a vnode is renamed to
* hide entries that would now be invalid
@@ -415,6 +457,12 @@ cache_purge(struct vnode *vp)
struct namecache *ncp;
struct nchashhead *ncpp;
+ while (!LIST_EMPTY(&vp->v_cache_src)) {
+ cache_delete(LIST_FIRST(&vp->v_cache_src));
+ }
+ while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
+ cache_delete(TAILQ_FIRST(&vp->v_cache_dst));
+ }
vp->v_id = ++nextvnodeid;
if (nextvnodeid != 0)
return;
@@ -442,18 +490,6 @@ cache_purgevfs(struct mount *mp)
if (ncp->nc_dvp == NULL || ncp->nc_dvp->v_mount != mp)
continue;
/* free the resources we had */
- ncp->nc_vp = NULL;
- ncp->nc_dvp = NULL;
- TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
- if (ncp->nc_hash.le_prev != NULL) {
- LIST_REMOVE(ncp, nc_hash);
- ncp->nc_hash.le_prev = NULL;
- }
- if (ncp->nc_vhash.le_prev != NULL) {
- LIST_REMOVE(ncp, nc_vhash);
- ncp->nc_vhash.le_prev = NULL;
- }
- pool_put(&nch_pool, ncp);
- numcache--;
+ cache_delete(ncp);
}
}
diff --git a/sys/kern/vfs_getcwd.c b/sys/kern/vfs_getcwd.c
index 687d38c28b5..d05b3ba609b 100644
--- a/sys/kern/vfs_getcwd.c
+++ b/sys/kern/vfs_getcwd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_getcwd.c,v 1.9 2006/05/17 12:52:12 pedro Exp $ */
+/* $OpenBSD: vfs_getcwd.c,v 1.10 2007/05/29 05:28:53 beck Exp $ */
/* $NetBSD: vfs_getcwd.c,v 1.3.2.3 1999/07/11 10:24:09 sommerfeld Exp $ */
/*
@@ -239,17 +239,17 @@ vfs_getcwd_getcache(struct vnode **lvpp, struct vnode **uvpp, char **bpp,
if (error)
*uvpp = NULL;
- /*
- * Verify that vget() succeeded, and check that vnode capability
- * didn't change while we were waiting for the lock.
- */
- if (error || (vpid != uvp->v_id)) {
+
+#ifdef DIAGNOSTIC
+ /* XXX should not happen with vhold/vdrop in cache layer now. */
+ if (vpid != uvp->v_id)
+ panic("vpid %d != uvp->v_id %d\n");
+#endif
+ if (error) {
/*
* Try to get our lock back. If that works, tell the caller to
* try things the hard way, otherwise give up.
*/
- if (!error)
- vput(uvp);
*uvpp = NULL;
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 1650576c0e0..0a5ce5265dd 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_subr.c,v 1.149 2007/05/28 21:05:21 thib Exp $ */
+/* $OpenBSD: vfs_subr.c,v 1.150 2007/05/29 05:28:54 beck Exp $ */
/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
/*
@@ -339,47 +339,50 @@ getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
{
struct proc *p = curproc;
struct freelst *listhd;
- static int toggle;
struct vnode *vp;
int s;
/*
- * We must choose whether to allocate a new vnode or recycle an
- * existing one. The criterion for allocating a new one is that
- * the total number of vnodes is less than the number desired or
- * there are no vnodes on either free list. Generally we only
+ * Allocate a new vnode if we have less than the desired
+ * number allocated, otherwise, recycle one. Generally we only
* want to recycle vnodes that have no buffers associated with
- * them, so we look first on the vnode_free_list. If it is empty,
- * we next consider vnodes with referencing buffers on the
- * vnode_hold_list. The toggle ensures that half the time we
- * will use a buffer from the vnode_hold_list, and half the time
- * we will allocate a new one unless the list has grown to twice
- * the desired size. We are reticent to recycle vnodes from the
- * vnode_hold_list because we will lose the identity of all its
- * referencing buffers.
+ * them, so we look first on the vnode_free_list. If it is
+ * empty, we next consider vnodes with referencing buffers on
+ * the vnode_hold_list. We are reticent to recycle vnodes from
+ * the vnode_hold_list because we will lose the identity of
+ * all its referencing buffers.
*/
- toggle ^= 1;
- if (numvnodes > 2 * desiredvnodes)
- toggle = 0;
-
+ simple_lock(&vnode_free_list_slock);
s = splbio();
- if ((numvnodes < desiredvnodes) ||
- ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
- ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
+ if (numvnodes < desiredvnodes) {
splx(s);
vp = pool_get(&vnode_pool, PR_WAITOK);
bzero((char *)vp, sizeof *vp);
+ LIST_INIT(&vp->v_cache_src);
+ TAILQ_INIT(&vp->v_cache_dst);
numvnodes++;
} else {
- for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
+ for (vp = TAILQ_FIRST(listhd = &vnode_free_list); vp != NULLVP;
vp = TAILQ_NEXT(vp, v_freelist)) {
if (VOP_ISLOCKED(vp) == 0)
break;
}
+ /*
+ * There is nothing on the free list, so we have to try to
+ * recycle one off the hold list
+ */
+ if (vp == NULL) {
+ for (vp = TAILQ_FIRST(listhd = &vnode_hold_list);
+ vp != NULLVP;
+ vp = TAILQ_NEXT(vp, v_freelist)) {
+ if ((VOP_ISLOCKED(vp) == 0) && (vp->v_holdcnt == 0))
+ break;
+ }
+ }
/*
- * Unless this is a bad time of the month, at most
- * the first NCPUS items on the free list are
- * locked, so this is close enough to being empty.
+ * We have made a pass through both the free and hold list
+ * and not encountered an unlocked entry. So this is close
+ * enough to being empty.
*/
if (vp == NULL) {
splx(s);
@@ -393,6 +396,11 @@ getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
vprint("free vnode", vp);
panic("free vnode isn't");
}
+
+ if (vp->v_holdcnt) {
+ vprint("held vnode", vp);
+ panic("unheld vnode being held!");
+ }
#endif
TAILQ_REMOVE(listhd, vp, v_freelist);
@@ -758,7 +766,7 @@ vrele(struct vnode *vp)
void vhold(struct vnode *vp);
/*
- * Page or buffer structure gets a reference.
+ * declare interest in a vnode.
*/
void
vhold(struct vnode *vp)
@@ -775,6 +783,27 @@ vhold(struct vnode *vp)
vp->v_holdcnt++;
}
+void vdrop(struct vnode *vp);
+
+/*
+ * lose interest in a vnode
+ */
+void
+vdrop(struct vnode *vp)
+{
+ vp->v_holdcnt--;
+
+ /*
+ * If it is on the holdlist and the hold count drops to
+ * zero, move it to the free list.
+ */
+ if ((vp->v_bioflag & VBIOONFREELIST) &&
+ vp->v_holdcnt == 0 && vp->v_usecount == 0) {
+ TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ }
+}
+
/*
* Remove any vnodes in the vnode table belonging to mount point mp.
*
@@ -1993,17 +2022,7 @@ brelvp(struct buf *bp)
if (vp->v_holdcnt == 0)
panic("brelvp: holdcnt");
#endif
- vp->v_holdcnt--;
-
- /*
- * If it is on the holdlist and the hold count drops to
- * zero, move it to the free list.
- */
- if ((vp->v_bioflag & VBIOONFREELIST) &&
- vp->v_holdcnt == 0 && vp->v_usecount == 0) {
- TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- }
+ vdrop(vp);
}
/*