summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/miscfs/nullfs/null.h23
-rw-r--r--sys/miscfs/nullfs/null_subr.c27
-rw-r--r--sys/miscfs/nullfs/null_vfsops.c6
-rw-r--r--sys/miscfs/nullfs/null_vnops.c309
-rw-r--r--sys/miscfs/union/union_vnops.c23
5 files changed, 366 insertions, 22 deletions
diff --git a/sys/miscfs/nullfs/null.h b/sys/miscfs/nullfs/null.h
index dc35a7edd1a..2ba6eef6d19 100644
--- a/sys/miscfs/nullfs/null.h
+++ b/sys/miscfs/nullfs/null.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: null.h,v 1.2 1996/02/27 07:58:01 niklas Exp $ */
-/* $NetBSD: null.h,v 1.5 1996/02/09 22:40:26 christos Exp $ */
+/* $OpenBSD: null.h,v 1.3 1996/05/22 12:04:35 deraadt Exp $ */
+/* $NetBSD: null.h,v 1.7 1996/05/17 20:53:11 gwr Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -57,9 +57,26 @@ struct null_node {
LIST_ENTRY(null_node) null_hash; /* Hash list */
struct vnode *null_lowervp; /* VREFed once */
struct vnode *null_vnode; /* Back pointer */
+ unsigned int null_flags; /* locking, etc. */
+#ifdef DIAGNOSTIC
+ pid_t null_pid; /* who's locking it? */
+ caddr_t null_lockpc; /* their return addr */
+ caddr_t null_lockpc2; /* their return addr^2 */
+#endif
};
-extern int null_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp));
+#if !defined(__GNUC__) || __GNUC__ < 2 || \
+ (__GNUC__ == 2 && __GNUC_MINOR__ < 5)
+#define RETURN_PC(frameno) (void *)0
+#else
+#define RETURN_PC(frameno) __builtin_return_address(frameno)
+#endif
+
+#define NULL_WANTED 0x01
+#define NULL_LOCKED 0x02
+#define NULL_LLOCK 0x04
+
+extern int null_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp, int lockit));
#define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data))
#define VTONULL(vp) ((struct null_node *)(vp)->v_data)
diff --git a/sys/miscfs/nullfs/null_subr.c b/sys/miscfs/nullfs/null_subr.c
index e83fec300ac..ceb0b3e3c38 100644
--- a/sys/miscfs/nullfs/null_subr.c
+++ b/sys/miscfs/nullfs/null_subr.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: null_subr.c,v 1.2 1996/02/27 07:58:02 niklas Exp $ */
-/* $NetBSD: null_subr.c,v 1.5 1996/02/09 22:40:28 christos Exp $ */
+/* $OpenBSD: null_subr.c,v 1.3 1996/05/22 12:04:35 deraadt Exp $ */
+/* $NetBSD: null_subr.c,v 1.6 1996/05/10 22:50:52 jtk Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -157,6 +157,11 @@ null_node_alloc(mp, lowervp, vpp)
vp->v_data = xp;
xp->null_vnode = vp;
xp->null_lowervp = lowervp;
+ xp->null_flags = 0;
+#ifdef DIAGNOSTIC
+ xp->null_pid = -1;
+ xp->null_lockpc = xp->null_lockpc2 = 0;
+#endif
/*
* Before we insert our new node onto the hash chains,
* check to see if someone else has beaten us to it.
@@ -232,12 +237,16 @@ loop:
* Try to find an existing null_node vnode refering
* to it, otherwise make a new null_node vnode which
* contains a reference to the lower vnode.
+ *
+ * >>> we assume that the lower node is already locked upon entry, so we mark
+ * the upper node as locked too (if caller requests it). <<<
*/
int
-null_node_create(mp, lowervp, newvpp)
+null_node_create(mp, lowervp, newvpp, takelock)
struct mount *mp;
struct vnode *lowervp;
struct vnode **newvpp;
+ int takelock;
{
struct vnode *aliasvp;
@@ -247,7 +256,7 @@ null_node_create(mp, lowervp, newvpp)
* to the alias vnode.
*/
#ifdef NULLFS_DIAGNOSTIC
- vprint("null_node_create: exists", NULLTOV(ap));
+ vprint("null_node_create: exists", aliasvp);
#endif
/* VREF(aliasvp); --- done in null_node_find */
} else {
@@ -277,19 +286,24 @@ null_node_create(mp, lowervp, newvpp)
if (lowervp->v_usecount < 1) {
/* Should never happen... */
vprint("null_node_create: alias", aliasvp);
- vprint("null_node_create: lower", lowervp);
panic("null_node_create: lower has 0 usecount.");
};
#endif
#ifdef NULLFS_DIAGNOSTIC
vprint("null_node_create: alias", aliasvp);
- vprint("null_node_create: lower", lowervp);
#endif
+ /* lower node was locked: mark it as locked and take
+ upper layer lock */
+ VTONULL(aliasvp)->null_flags |= NULL_LLOCK;
+ if (takelock)
+ VOP_LOCK(aliasvp);
*newvpp = aliasvp;
return (0);
}
+
+#ifdef notyet
#ifdef NULLFS_DIAGNOSTIC
struct vnode *
null_checkvp(vp, fil, lno)
@@ -339,3 +353,4 @@ null_checkvp(vp, fil, lno)
return a->null_lowervp;
}
#endif
+#endif
diff --git a/sys/miscfs/nullfs/null_vfsops.c b/sys/miscfs/nullfs/null_vfsops.c
index 9b36e636d15..e8a6b4d249a 100644
--- a/sys/miscfs/nullfs/null_vfsops.c
+++ b/sys/miscfs/nullfs/null_vfsops.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: null_vfsops.c,v 1.2 1996/02/27 07:58:03 niklas Exp $ */
-/* $NetBSD: null_vfsops.c,v 1.10 1996/02/09 22:40:31 christos Exp $ */
+/* $OpenBSD: null_vfsops.c,v 1.3 1996/05/22 12:04:36 deraadt Exp $ */
+/* $NetBSD: null_vfsops.c,v 1.11 1996/05/10 22:50:56 jtk Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -134,7 +134,7 @@ nullfs_mount(mp, path, data, ndp, p)
* Save reference. Each mount also holds
* a reference on the root vnode.
*/
- error = null_node_create(mp, lowerrootvp, &vp);
+ error = null_node_create(mp, lowerrootvp, &vp, 1);
/*
* Unlock the node (either the lower or the alias)
*/
diff --git a/sys/miscfs/nullfs/null_vnops.c b/sys/miscfs/nullfs/null_vnops.c
index 570693022c2..05a67aecab1 100644
--- a/sys/miscfs/nullfs/null_vnops.c
+++ b/sys/miscfs/nullfs/null_vnops.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: null_vnops.c,v 1.3 1996/04/21 22:28:16 deraadt Exp $ */
-/* $NetBSD: null_vnops.c,v 1.6 1996/04/13 01:34:06 cgd Exp $ */
+/* $OpenBSD: null_vnops.c,v 1.4 1996/05/22 12:04:36 deraadt Exp $ */
+/* $NetBSD: null_vnops.c,v 1.7 1996/05/10 22:51:01 jtk Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -189,6 +189,11 @@ int null_reclaim __P((void *));
int null_print __P((void *));
int null_strategy __P((void *));
int null_bwrite __P((void *));
+int null_lock __P((void *));
+int null_unlock __P((void *));
+int null_islocked __P((void *));
+int null_lookup __P((void *));
+
/*
* This is the 10-Apr-92 bypass routine.
* This version has been optimized for speed, throwing away some
@@ -292,8 +297,19 @@ null_bypass(v)
break; /* bail out at end of list */
if (old_vps[i] != NULLVP) {
*(vps_p[i]) = old_vps[i];
- if (reles & 1)
- vrele(*(vps_p[i]));
+ if (reles & 1) {
+ /* they really vput them, so we must drop
+ our locks (but mark underneath as
+ unlocked first).
+ Beware of vnode duplication--put it once,
+ and rele the rest. Check this
+ by looking at our upper flag. */
+ if (VTONULL(*(vps_p[i]))->null_flags & NULL_LOCKED) {
+ VTONULL(*(vps_p[i]))->null_flags &= ~NULL_LLOCK;
+ vput(*(vps_p[i]));
+ } else
+ vrele(*(vps_p[i]));
+ }
}
}
@@ -315,7 +331,17 @@ null_bypass(v)
goto out;
vppp = VOPARG_OFFSETTO(struct vnode***,
descp->vdesc_vpp_offset,ap);
- error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp);
+ /*
+ * This assumes that **vppp is a locked vnode (it is always
+ * so as of this writing, NetBSD-current 1995/02/16)
+ */
+ /*
+ * (don't want to lock it if being called on behalf
+ * of lookup--it plays weird locking games depending
+ * on whether or not it's looking up ".", "..", etc.
+ */
+ error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp,
+ descp == &vop_lookup_desc ? 0 : 1);
}
out:
@@ -397,8 +423,20 @@ null_print(v)
struct vnode *a_vp;
} */ *ap = v;
register struct vnode *vp = ap->a_vp;
- printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp,
- NULLVPTOLOWERVP(vp));
+ register struct null_node *nn = VTONULL(vp);
+
+ printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
+#ifdef DIAGNOSTIC
+ printf("%s%s owner pid %d retpc %p retret %p\n",
+ (nn->null_flags & NULL_LOCKED) ? "(LOCKED) " : "",
+ (nn->null_flags & NULL_LLOCK) ? "(LLOCK) " : "",
+ nn->null_pid, nn->null_lockpc, nn->null_lockpc2);
+#else
+ printf("%s%s\n",
+ (nn->null_flags & NULL_LOCKED) ? "(LOCKED) " : "",
+ (nn->null_flags & NULL_LLOCK) ? "(LLOCK) " : "");
+#endif
+ vprint("nullfs lowervp", NULLVPTOLOWERVP(vp));
return (0);
}
@@ -457,6 +495,258 @@ null_bwrite(v)
}
/*
+ * We need a separate null lock routine, to avoid deadlocks at reclaim time.
+ * If a process holds the lower-vnode locked when it tries to reclaim
+ * the null upper-vnode, _and_ null_bypass is used as the locking operation,
+ * then a process can end up locking against itself.
+ * This has been observed when a null mount is set up to "tunnel" beneath a
+ * union mount (that setup is useful if you still wish to be able to access
+ * the non-union version of either the above or below union layer)
+ */
+int
+null_lock(v)
+ void *v;
+{
+ struct vop_lock_args *ap = v;
+ struct vnode *vp = ap->a_vp;
+ struct null_node *nn;
+
+#ifdef NULLFS_DIAGNOSTIC
+ vprint("null_lock_e", ap->a_vp);
+ printf("retpc=%lx, retretpc=%lx\n",
+ RETURN_PC(0),
+ RETURN_PC(1));
+#endif
+start:
+ while (vp->v_flag & VXLOCK) {
+ vp->v_flag |= VXWANT;
+ tsleep((caddr_t)vp, PINOD, "nulllock1", 0);
+ }
+
+ nn = VTONULL(vp);
+
+ if ((nn->null_flags & NULL_LLOCK) == 0 &&
+ (vp->v_usecount != 0)) {
+ /*
+ * only lock underlying node if we haven't locked it yet
+ * for null ops, and our refcount is nonzero. If usecount
+ * is zero, we are probably being reclaimed so we need to
+ * keep our hands off the lower node.
+ */
+ VOP_LOCK(nn->null_lowervp);
+ nn->null_flags |= NULL_LLOCK;
+ }
+
+ if (nn->null_flags & NULL_LOCKED) {
+#ifdef DIAGNOSTIC
+ if (curproc && nn->null_pid == curproc->p_pid &&
+ nn->null_pid > -1 && curproc->p_pid > -1) {
+ vprint("self-lock", vp);
+ panic("null: locking against myself");
+ }
+#endif
+ nn->null_flags |= NULL_WANTED;
+ tsleep((caddr_t)nn, PINOD, "nulllock2", 0);
+ goto start;
+ }
+
+#ifdef DIAGNOSTIC
+ if (curproc)
+ nn->null_pid = curproc->p_pid;
+ else
+ nn->null_pid = -1;
+ nn->null_lockpc = RETURN_PC(0);
+ nn->null_lockpc2 = RETURN_PC(1);
+#endif
+
+ nn->null_flags |= NULL_LOCKED;
+ return (0);
+}
+
+int
+null_unlock(v)
+ void *v;
+{
+ struct vop_lock_args *ap = v;
+ struct null_node *nn = VTONULL(ap->a_vp);
+
+#ifdef NULLFS_DIAGNOSTIC
+ vprint("null_unlock_e", ap->a_vp);
+#endif
+#ifdef DIAGNOSTIC
+ if ((nn->null_flags & NULL_LOCKED) == 0) {
+ vprint("null_unlock", ap->a_vp);
+ panic("null: unlocking unlocked node");
+ }
+ if (curproc && nn->null_pid != curproc->p_pid &&
+ curproc->p_pid > -1 && nn->null_pid > -1) {
+ vprint("null_unlock", ap->a_vp);
+ panic("null: unlocking other process's null node");
+ }
+#endif
+ nn->null_flags &= ~NULL_LOCKED;
+
+ if ((nn->null_flags & NULL_LLOCK) != 0)
+ VOP_UNLOCK(nn->null_lowervp);
+
+ nn->null_flags &= ~NULL_LLOCK;
+
+ if (nn->null_flags & NULL_WANTED) {
+ nn->null_flags &= ~NULL_WANTED;
+ wakeup((caddr_t)nn);
+ }
+#ifdef DIAGNOSTIC
+ nn->null_pid = 0;
+ nn->null_lockpc = nn->null_lockpc2 = 0;
+#endif
+ return (0);
+}
+
+int
+null_islocked(v)
+ void *v;
+{
+ struct vop_islocked_args *ap = v;
+ return ((VTONULL(ap->a_vp)->null_flags & NULL_LOCKED) ? 1 : 0);
+}
+
+int
+null_lookup(v)
+ void *v;
+{
+ register struct vop_lookup_args /* {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ } */ *ap = v;
+ register int error;
+ register struct vnode *dvp;
+ int flags = ap->a_cnp->cn_flags;
+
+#ifdef NULLFS_DIAGNOSTIC
+ printf("null_lookup: dvp=%lx, name='%s'\n",
+ ap->a_dvp, ap->a_cnp->cn_nameptr);
+#endif
+ /*
+ * the starting dir (ap->a_dvp) comes in locked.
+ */
+
+ /* set LOCKPARENT to hold on to it until done below */
+ ap->a_cnp->cn_flags |= LOCKPARENT;
+ error = null_bypass(ap);
+ if (!(flags & LOCKPARENT))
+ ap->a_cnp->cn_flags &= ~LOCKPARENT;
+
+ if (error)
+ /*
+ * starting dir is still locked/has been relocked
+ * on error return.
+ */
+ return error;
+
+ if (ap->a_dvp != *ap->a_vpp) {
+ /*
+ * Lookup returns node locked; we mark both lower and
+ * upper nodes as locked by setting the lower lock
+ * flag (it came back locked), and then call lock to
+ * set upper lock flag & record pid, etc. see
+ * null_node_create()
+ */
+ VTONULL(*ap->a_vpp)->null_flags |= NULL_LLOCK;
+
+ dvp = ap->a_dvp;
+ if (flags & ISDOTDOT) {
+ /*
+ * If we're looking up `..' and this isn't the
+ * last component, then the starting directory
+ * ("parent") is _unlocked_ as a side-effect
+ * of lookups. This is to avoid deadlocks:
+ * lock order is always parent, child, so
+ * looking up `..' requires dropping the lock
+ * on the starting directory.
+ */
+ /* see ufs_lookup() for hairy ugly locking protocol
+ examples */
+ /*
+ * underlying starting dir comes back locked if flags &
+ * LOCKPARENT (which we artificially set above) and
+ * ISLASTCN.
+ */
+ if (flags & ISLASTCN) {
+ VTONULL(dvp)->null_flags |= NULL_LLOCK; /* no-op, right? */
+#ifdef NULLFS_DIAGNOSTIC
+ if (!VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
+ vprint("lowerdvp not locked after lookup\n", dvp);
+ panic("null_lookup not locked");
+ }
+#endif
+ } else {
+ VTONULL(dvp)->null_flags &= ~NULL_LLOCK;
+#ifdef NULLFS_DIAGNOSTIC
+ if (VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
+ vprint("lowerdvp locked after lookup?\n", dvp);
+ panic("null_lookup locked");
+ }
+#endif
+ }
+ /*
+ * locking order: drop lock on lower-in-tree
+ * element, then get lock on higher-in-tree
+ * element, then (if needed) re-fetch lower
+ * lock. No need for vget() since we hold a
+ * refcount to the starting directory
+ */
+ VOP_UNLOCK(dvp);
+ VOP_LOCK(*ap->a_vpp);
+ /*
+ * we should return our directory locked if
+ * (flags & LOCKPARENT) and (flags & ISLASTCN)
+ */
+ if ((flags & LOCKPARENT) && (flags & ISLASTCN))
+ VOP_LOCK(dvp);
+ } else {
+ /*
+ * Normal directory locking order: we hold the starting
+ * directory locked; now lock our layer of the target.
+ */
+ VOP_LOCK(*ap->a_vpp);
+ /*
+ * underlying starting dir comes back locked
+ * if lockparent (we set it) and no error
+ * (this leg) and ISLASTCN
+ */
+ if (flags & ISLASTCN) {
+ VTONULL(dvp)->null_flags |= NULL_LLOCK; /* no op, right? */
+#ifdef NULLFS_DIAGNOSTIC
+ if (!VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
+ vprint("lowerdvp not locked after lookup\n", dvp);
+ panic("null_lookup not locked");
+ }
+#endif
+ } else {
+ VTONULL(dvp)->null_flags &= ~NULL_LLOCK;
+#ifdef NULLFS_DIAGNOSTIC
+ if (VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) {
+ vprint("lowerdvp locked after lookup?\n", dvp);
+ panic("null_lookup locked");
+ }
+#endif
+ }
+ /*
+ * we should return our directory unlocked if
+ * our caller didn't want the parent locked,
+ * !(flags & LOCKPARENT), or we're not at the
+ * end yet, !(flags & ISLASTCN)
+ */
+ if (!(flags & LOCKPARENT) || !(flags & ISLASTCN))
+ VOP_UNLOCK(dvp);
+ }
+ }
+ return error;
+}
+
+/*
* Global vfs data structures
*/
int (**null_vnodeop_p) __P((void *));
@@ -468,6 +758,11 @@ struct vnodeopv_entry_desc null_vnodeop_entries[] = {
{ &vop_reclaim_desc, null_reclaim },
{ &vop_print_desc, null_print },
+ { &vop_lock_desc, null_lock },
+ { &vop_unlock_desc, null_unlock },
+ { &vop_islocked_desc, null_islocked },
+ { &vop_lookup_desc, null_lookup }, /* special locking frob */
+
{ &vop_strategy_desc, null_strategy },
{ &vop_bwrite_desc, null_bwrite },
diff --git a/sys/miscfs/union/union_vnops.c b/sys/miscfs/union/union_vnops.c
index d46d06f3f2e..5124de80be2 100644
--- a/sys/miscfs/union/union_vnops.c
+++ b/sys/miscfs/union/union_vnops.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: union_vnops.c,v 1.3 1996/02/27 08:09:03 niklas Exp $ */
-/* $NetBSD: union_vnops.c,v 1.28 1996/02/13 13:13:03 mycroft Exp $ */
+/* $OpenBSD: union_vnops.c,v 1.4 1996/05/22 12:04:37 deraadt Exp $ */
+/* $NetBSD: union_vnops.c,v 1.30 1996/05/13 07:13:23 mrg Exp $ */
/*
* Copyright (c) 1992, 1993, 1994 The Regents of the University of California.
@@ -347,7 +347,8 @@ union_lookup(v)
iswhiteout = 1;
} else if (lowerdvp != NULLVP) {
lerror = VOP_GETATTR(upperdvp, &va,
- cnp->cn_cred, cnp->cn_proc);
+ cnp->cn_cred,
+ cnp->cn_proc);
if (lerror == 0 && (va.va_flags & OPAQUE))
iswhiteout = 1;
}
@@ -1059,6 +1060,10 @@ union_seek(v)
return (VCALL(vp, VOFFSET(vop_seek), ap));
}
+/* a_dvp: directory in which to link
+ a_vp: new target of the link
+ a_cnp: name for the link
+ */
int
union_remove(v)
void *v;
@@ -1491,6 +1496,18 @@ union_readlink(v)
return (error);
}
+/*
+ * When operations want to vput() a union node yet retain a lock on
+ * the upper VP (say, to do some further operations like link(),
+ * mkdir(), ...), they set UN_KLOCK on the union node, then call
+ * vput() which calls VOP_UNLOCK() and comes here. union_unlock()
+ * unlocks the union node (leaving the upper VP alone), clears the
+ * KLOCK flag, and then returns to vput(). The caller then does whatever
+ * is left to do with the upper VP, and insures that it gets unlocked.
+ *
+ * If UN_KLOCK isn't set, then the upper VP is unlocked here.
+ */
+
int
union_abortop(v)
void *v;