/* $OpenBSD: vfs_default.c,v 1.24 2004/05/14 04:00:33 tedu Exp $ */ /* * Portions of this code are: * * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include extern struct simplelock spechash_slock; int filt_generic_readwrite(struct knote *kn, long hint); void filt_generic_detach(struct knote *kn); /* * Eliminate all activity associated with the requested vnode * and with all vnodes aliased to the requested vnode. */ int vop_generic_revoke(v) void *v; { struct vop_revoke_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; int a_flags; } */ *ap = v; struct vnode *vp, *vq; struct proc *p = curproc; #ifdef DIAGNOSTIC if ((ap->a_flags & REVOKEALL) == 0) panic("vop_generic_revoke"); #endif vp = ap->a_vp; simple_lock(&vp->v_interlock); if (vp->v_flag & VALIASED) { /* * If a vgone (or vclean) is already in progress, * wait until it is done and return. */ if (vp->v_flag & VXLOCK) { vp->v_flag |= VXWANT; simple_unlock(&vp->v_interlock); tsleep(vp, PINOD, "vop_generic_revokeall", 0); return(0); } /* * Ensure that vp will not be vgone'd while we * are eliminating its aliases. */ vp->v_flag |= VXLOCK; simple_unlock(&vp->v_interlock); while (vp->v_flag & VALIASED) { simple_lock(&spechash_slock); for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type || vp == vq) continue; simple_unlock(&spechash_slock); vgone(vq); break; } simple_unlock(&spechash_slock); } /* * Remove the lock so that vgone below will * really eliminate the vnode after which time * vgone will awaken any sleepers. */ simple_lock(&vp->v_interlock); vp->v_flag &= ~VXLOCK; } vgonel(vp, p); return (0); } int vop_generic_bwrite(v) void *v; { struct vop_bwrite_args *ap = v; return (bwrite(ap->a_bp)); } int vop_generic_abortop(v) void *v; { struct vop_abortop_args /* { struct vnodeop_desc *a_desc; struct vnode *a_dvp; struct componentname *a_cnp; } */ *ap = v; if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) pool_put(&namei_pool, ap->a_cnp->cn_pnbuf); return (0); } /* * Stubs to use when there is no locking to be done on the underlying object. * A minimal shared lock is necessary to ensure that the underlying object * is not revoked while an operation is in progress. So, an active shared * count is maintained in an auxiliary vnode lock structure. */ int vop_generic_lock(v) void *v; { struct vop_lock_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap = v; #ifdef notyet /* * This code cannot be used until all the non-locking filesystems * (notably NFS) are converted to properly lock and release nodes. * Also, certain vnode operations change the locking state within * the operation (create, mknod, remove, link, rename, mkdir, rmdir, * and symlink). Ideally these operations should not change the * lock state, but should be changed to let the caller of the * function unlock them. Otherwise all intermediate vnode layers * (such as union, umapfs, etc) must catch these functions to do * the necessary locking at their layer. Note that the inactive * and lookup operations also change their lock state, but this * cannot be avoided, so these two operations will always need * to be handled in intermediate layers. */ struct vnode *vp = ap->a_vp; int vnflags, flags = ap->a_flags; if (vp->v_vnlock == NULL) { if ((flags & LK_TYPE_MASK) == LK_DRAIN) return (0); MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), M_VNODE, M_WAITOK); lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); } switch (flags & LK_TYPE_MASK) { case LK_DRAIN: vnflags = LK_DRAIN; break; case LK_EXCLUSIVE: case LK_SHARED: vnflags = LK_SHARED; break; case LK_UPGRADE: case LK_EXCLUPGRADE: case LK_DOWNGRADE: return (0); case LK_RELEASE: default: panic("vop_generic_lock: bad operation %d", flags & LK_TYPE_MASK); } if (flags & LK_INTERLOCK) vnflags |= LK_INTERLOCK; return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); #else /* for now */ /* * Since we are not using the lock manager, we must clear * the interlock here. */ if (ap->a_flags & LK_INTERLOCK) simple_unlock(&ap->a_vp->v_interlock); return (0); #endif } /* * Decrement the active use count. */ int vop_generic_unlock(v) void *v; { struct vop_unlock_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap = v; struct vnode *vp = ap->a_vp; if (vp->v_vnlock == NULL) return (0); return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL, ap->a_p)); } /* * Return whether or not the node is in use. */ int vop_generic_islocked(v) void *v; { struct vop_islocked_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; } */ *ap = v; struct vnode *vp = ap->a_vp; if (vp->v_vnlock == NULL) return (0); return (lockstatus(vp->v_vnlock)); } struct filterops generic_filtops = { 1, NULL, filt_generic_detach, filt_generic_readwrite }; int vop_generic_kqfilter(v) void *v; { struct vop_kqfilter_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; struct knote *a_kn; } */ *ap = v; struct knote *kn = ap->a_kn; switch (kn->kn_filter) { case EVFILT_READ: case EVFILT_WRITE: kn->kn_fop = &generic_filtops; break; default: return (1); } return (0); } void filt_generic_detach(struct knote *kn) { } int filt_generic_readwrite(struct knote *kn, long hint) { /* * filesystem is gone, so set the EOF flag and schedule * the knote for deletion. */ if (hint == NOTE_REVOKE) { kn->kn_flags |= (EV_EOF | EV_ONESHOT); return (1); } kn->kn_data = 0; return (1); } int lease_check(void *); int lease_check(void *v) { return (0); } /* * vfs default ops * used to fill the vfs function table to get reasonable default return values. */ int vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) struct mount *mp; int cmd; struct vnode *filename_vp; int attrnamespace; const char *attrname; struct proc *td; { return(EOPNOTSUPP); }