diff options
Diffstat (limited to 'sys/fs/nullfs')
-rw-r--r-- | sys/fs/nullfs/null.h | 3 | ||||
-rw-r--r-- | sys/fs/nullfs/null_subr.c | 20 | ||||
-rw-r--r-- | sys/fs/nullfs/null_vnops.c | 153 |
3 files changed, 108 insertions, 68 deletions
diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h index dd6cb4f71f07..ad3f7779e108 100644 --- a/sys/fs/nullfs/null.h +++ b/sys/fs/nullfs/null.h @@ -53,7 +53,7 @@ struct null_mount { * A cache of vnode references */ struct null_node { - CK_LIST_ENTRY(null_node) null_hash; /* Hash list */ + CK_SLIST_ENTRY(null_node) null_hash; /* Hash list */ struct vnode *null_lowervp; /* VREFed once */ struct vnode *null_vnode; /* Back pointer */ u_int null_flags; @@ -64,6 +64,7 @@ struct null_node { #define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data)) #define VTONULL(vp) ((struct null_node *)(vp)->v_data) +#define VTONULL_SMR(vp) ((struct null_node *)vn_load_v_data_smr(vp)) #define NULLTOV(xp) ((xp)->null_vnode) int nullfs_init(struct vfsconf *vfsp); diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c index 146d3bbdaedd..d7f847d449d0 100644 --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -36,12 +36,12 @@ #include <sys/systm.h> #include <sys/kernel.h> #include <sys/lock.h> -#include <sys/rwlock.h> #include <sys/malloc.h> #include <sys/mount.h> #include <sys/proc.h> -#include <sys/vnode.h> +#include <sys/rwlock.h> #include <sys/smr.h> +#include <sys/vnode.h> #include <fs/nullfs/null.h> @@ -59,7 +59,7 @@ VFS_SMR_DECLARE; #define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp) & null_hash_mask]) -static CK_LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; +static CK_SLIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; static struct rwlock null_hash_lock; static u_long null_hash_mask; @@ -116,7 +116,7 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp) * reference count (but NOT the lower vnode's VREF counter). */ hd = NULL_NHASH(lowervp); - CK_LIST_FOREACH(a, hd, null_hash) { + CK_SLIST_FOREACH(a, hd, null_hash) { if (a->null_lowervp != lowervp) continue; /* @@ -143,12 +143,12 @@ null_hashget(struct mount *mp, struct vnode *lowervp) struct vnode *vp; enum vgetstate vs; - ASSERT_VOP_LOCKED(lowervp, "null_hashget"); + ASSERT_VOP_LOCKED(lowervp, __func__); rw_assert(&null_hash_lock, RA_UNLOCKED); vfs_smr_enter(); hd = NULL_NHASH(lowervp); - CK_LIST_FOREACH(a, hd, null_hash) { + CK_SLIST_FOREACH(a, hd, null_hash) { if (a->null_lowervp != lowervp) continue; /* @@ -181,7 +181,7 @@ null_hashins(struct mount *mp, struct null_node *xp) hd = NULL_NHASH(xp->null_lowervp); #ifdef INVARIANTS - CK_LIST_FOREACH(oxp, hd, null_hash) { + CK_SLIST_FOREACH(oxp, hd, null_hash) { if (oxp->null_lowervp == xp->null_lowervp && NULLTOV(oxp)->v_mount == mp) { VNASSERT(0, NULLTOV(oxp), @@ -189,7 +189,7 @@ null_hashins(struct mount *mp, struct null_node *xp) } } #endif - CK_LIST_INSERT_HEAD(hd, xp, null_hash); + CK_SLIST_INSERT_HEAD(hd, xp, null_hash); } static void @@ -305,9 +305,11 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp) void null_hashrem(struct null_node *xp) { + struct null_node_hashhead *hd; + hd = NULL_NHASH(xp->null_lowervp); rw_wlock(&null_hash_lock); - CK_LIST_REMOVE(xp, null_hash); + CK_SLIST_REMOVE(hd, xp, null_node, null_hash); rw_wunlock(&null_hash_lock); } diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index dd176b34e4eb..ec8a6b10b13f 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -174,6 +174,8 @@ #include <sys/mount.h> #include <sys/mutex.h> #include <sys/namei.h> +#include <sys/proc.h> +#include <sys/smr.h> #include <sys/sysctl.h> #include <sys/vnode.h> #include <sys/stat.h> @@ -185,6 +187,8 @@ #include <vm/vm_object.h> #include <vm/vnode_pager.h> +VFS_SMR_DECLARE; + static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, &null_bug_bypass, 0, ""); @@ -768,75 +772,108 @@ null_rmdir(struct vop_rmdir_args *ap) } /* - * We need to process our own vnode lock and then clear the - * interlock flag as it applies only to our vnode, not the - * vnodes below us on the stack. + * We need to process our own vnode lock and then clear the interlock flag as + * it applies only to our vnode, not the vnodes below us on the stack. + * + * We have to hold the vnode here to solve a potential reclaim race. If we're + * forcibly vgone'd while we still have refs, a thread could be sleeping inside + * the lowervp's vop_lock routine. When we vgone we will drop our last ref to + * the lowervp, which would allow it to be reclaimed. The lowervp could then + * be recycled, in which case it is not legal to be sleeping in its VOP. We + * prevent it from being recycled by holding the vnode here. */ +static struct vnode * +null_lock_prep_with_smr(struct vop_lock1_args *ap) +{ + struct null_node *nn; + struct vnode *lvp; + + lvp = NULL; + + vfs_smr_enter(); + + nn = VTONULL_SMR(ap->a_vp); + if (__predict_true(nn != NULL)) { + lvp = nn->null_lowervp; + if (lvp != NULL && !vhold_smr(lvp)) + lvp = NULL; + } + + vfs_smr_exit(); + return (lvp); +} + +static struct vnode * +null_lock_prep_with_interlock(struct vop_lock1_args *ap) +{ + struct null_node *nn; + struct vnode *lvp; + + ASSERT_VI_LOCKED(ap->a_vp, __func__); + + ap->a_flags &= ~LK_INTERLOCK; + + lvp = NULL; + + nn = VTONULL(ap->a_vp); + if (__predict_true(nn != NULL)) { + lvp = nn->null_lowervp; + if (lvp != NULL) + vholdnz(lvp); + } + VI_UNLOCK(ap->a_vp); + return (lvp); +} + static int null_lock(struct vop_lock1_args *ap) { - struct vnode *vp = ap->a_vp; - int flags; - struct null_node *nn; struct vnode *lvp; - int error; + int error, flags; - if ((ap->a_flags & LK_INTERLOCK) == 0) - VI_LOCK(vp); - else - ap->a_flags &= ~LK_INTERLOCK; - flags = ap->a_flags; - nn = VTONULL(vp); + if (__predict_true((ap->a_flags & LK_INTERLOCK) == 0)) { + lvp = null_lock_prep_with_smr(ap); + if (__predict_false(lvp == NULL)) { + VI_LOCK(ap->a_vp); + lvp = null_lock_prep_with_interlock(ap); + } + } else { + lvp = null_lock_prep_with_interlock(ap); + } + + ASSERT_VI_UNLOCKED(ap->a_vp, __func__); + + if (__predict_false(lvp == NULL)) + return (vop_stdlock(ap)); + + VNPASS(lvp->v_holdcnt > 0, lvp); + error = VOP_LOCK(lvp, ap->a_flags); /* - * If we're still active we must ask the lower layer to - * lock as ffs has special lock considerations in its - * vop lock. + * We might have slept to get the lock and someone might have + * clean our vnode already, switching vnode lock from one in + * lowervp to v_lock in our own vnode structure. Handle this + * case by reacquiring correct lock in requested mode. */ - if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { - /* - * We have to hold the vnode here to solve a potential - * reclaim race. If we're forcibly vgone'd while we - * still have refs, a thread could be sleeping inside - * the lowervp's vop_lock routine. When we vgone we will - * drop our last ref to the lowervp, which would allow it - * to be reclaimed. The lowervp could then be recycled, - * in which case it is not legal to be sleeping in its VOP. - * We prevent it from being recycled by holding the vnode - * here. - */ - vholdnz(lvp); - VI_UNLOCK(vp); - error = VOP_LOCK(lvp, flags); - - /* - * We might have slept to get the lock and someone might have - * clean our vnode already, switching vnode lock from one in - * lowervp to v_lock in our own vnode structure. Handle this - * case by reacquiring correct lock in requested mode. - */ - if (VTONULL(vp) == NULL && error == 0) { - ap->a_flags &= ~LK_TYPE_MASK; - switch (flags & LK_TYPE_MASK) { - case LK_SHARED: - ap->a_flags |= LK_SHARED; - break; - case LK_UPGRADE: - case LK_EXCLUSIVE: - ap->a_flags |= LK_EXCLUSIVE; - break; - default: - panic("Unsupported lock request %d\n", - ap->a_flags); - } - VOP_UNLOCK(lvp); - error = vop_stdlock(ap); + if (VTONULL(ap->a_vp) == NULL && error == 0) { + VOP_UNLOCK(lvp); + + flags = ap->a_flags; + ap->a_flags &= ~LK_TYPE_MASK; + switch (flags & LK_TYPE_MASK) { + case LK_SHARED: + ap->a_flags |= LK_SHARED; + break; + case LK_UPGRADE: + case LK_EXCLUSIVE: + ap->a_flags |= LK_EXCLUSIVE; + break; + default: + panic("Unsupported lock request %d\n", + flags); } - vdrop(lvp); - } else { - VI_UNLOCK(vp); error = vop_stdlock(ap); } - + vdrop(lvp); return (error); } |