summaryrefslogtreecommitdiff
path: root/sys/nfs
diff options
context:
space:
mode:
authorBob Beck <beck@cvs.openbsd.org>2009-12-17 16:30:48 +0000
committerBob Beck <beck@cvs.openbsd.org>2009-12-17 16:30:48 +0000
commit8820ecd58e1bb5a04d8f7198f52debf2aae1b05a (patch)
tree2e5320df15336292480ab96faec2f1d55af3c0bf /sys/nfs
parent3296e7032a5722662e4890b3cc665052c149146c (diff)
This fixes a case where we could panic on a null deref with a bad vnode
in nfs_inactive, on a reboot. The core of the problem was in nfs_nget, when we lose the race to put a new nfsnode in the tree, we have previously allocated a vnode, which getnewvnode has done an insmntque into the nfs mp's mntlist. The problem being we then try again with a new vnode, abandoning this one on the mntlist, leaving junk there for us to die on when we unmount. This introduces VLARVAL - so we can indicate in a vnode that the higher level stuff hiding in v_data is incompletely set up. This flag is then used by nfs to deal with a halfway set up vnode and release it correctly. analysis and bogus fix by art@, correct fix by me after serveral failed attempts and much painful testing by krw@, good suggestions by tedu and miod ok krw@ oga@ thib@ blambert@ art@
Diffstat (limited to 'sys/nfs')
-rw-r--r--sys/nfs/nfs_node.c38
1 files changed, 32 insertions, 6 deletions
diff --git a/sys/nfs/nfs_node.c b/sys/nfs/nfs_node.c
index 94e7fd6640d..44efb9ed504 100644
--- a/sys/nfs/nfs_node.c
+++ b/sys/nfs/nfs_node.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: nfs_node.c,v 1.50 2009/12/15 17:04:00 beck Exp $ */
+/* $OpenBSD: nfs_node.c,v 1.51 2009/12/17 16:30:47 beck Exp $ */
/* $NetBSD: nfs_node.c,v 1.16 1996/02/18 11:53:42 fvdl Exp $ */
/*
@@ -115,15 +115,17 @@ loop:
*/
rw_exit_write(&nfs_hashlock);
error = getnewvnode(VT_NFS, mnt, nfsv2_vnodeop_p, &nvp);
+ /* note that we don't have this vnode set up completely yet */
rw_enter_write(&nfs_hashlock);
if (error) {
*npp = NULL;
rw_exit_write(&nfs_hashlock);
return (error);
}
-
+ nvp->v_flag |= VLARVAL;
np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
if (np != NULL) {
+ vgone(nvp);
rw_exit_write(&nfs_hashlock);
goto loop;
}
@@ -131,6 +133,8 @@ loop:
vp = nvp;
np = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO);
vp->v_data = np;
+ /* we now have an nfsnode on this vnode */
+ vp->v_flag &= ~VLARVAL;
np->n_vnode = vp;
rw_init(&np->n_commitlock, "nfs_commitlk");
@@ -161,14 +165,25 @@ int
nfs_inactive(void *v)
{
struct vop_inactive_args *ap = v;
- struct nfsnode *np = VTONFS(ap->a_vp);
+ struct nfsnode *np;
struct sillyrename *sp;
#ifdef DIAGNOSTIC
if (prtactive && ap->a_vp->v_usecount != 0)
vprint("nfs_inactive: pushing active", ap->a_vp);
#endif
-
+ if (ap->a_vp->v_flag & VLARVAL)
+ /*
+ * vnode was incompletely set up, just return
+ * as we are throwing it away.
+ */
+ return(0);
+#ifdef DIAGNOSTIC
+ if (ap->a_vp->v_data == NULL)
+ panic("NULL v_data (no nfsnode set up?) in vnode %p\n",
+ ap->a_vp);
+#endif
+ np = VTONFS(ap->a_vp);
if (ap->a_vp->v_type != VDIR) {
sp = np->n_sillyrename;
np->n_sillyrename = NULL;
@@ -198,14 +213,25 @@ nfs_reclaim(void *v)
{
struct vop_reclaim_args *ap = v;
struct vnode *vp = ap->a_vp;
- struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+ struct nfsmount *nmp;
struct nfsnode *np = VTONFS(vp);
#ifdef DIAGNOSTIC
if (prtactive && vp->v_usecount != 0)
vprint("nfs_reclaim: pushing active", vp);
#endif
-
+ if (ap->a_vp->v_flag & VLARVAL)
+ /*
+ * vnode was incompletely set up, just return
+ * as we are throwing it away.
+ */
+ return(0);
+#ifdef DIAGNOSTIC
+ if (ap->a_vp->v_data == NULL)
+ panic("NULL v_data (no nfsnode set up?) in vnode %p\n",
+ ap->a_vp);
+#endif
+ nmp = VFSTONFS(vp->v_mount);
rw_enter_write(&nfs_hashlock);
RB_REMOVE(nfs_nodetree, &nmp->nm_ntree, np);
rw_exit_write(&nfs_hashlock);