diff options
Diffstat (limited to 'sys/fs')
-rw-r--r-- | sys/fs/cuse/cuse.c | 2 | ||||
-rw-r--r-- | sys/fs/devfs/devfs_dir.c | 2 | ||||
-rw-r--r-- | sys/fs/fuse/fuse_device.c | 2 | ||||
-rw-r--r-- | sys/fs/fuse/fuse_vnops.c | 4 | ||||
-rw-r--r-- | sys/fs/nullfs/null.h | 23 | ||||
-rw-r--r-- | sys/fs/nullfs/null_subr.c | 98 | ||||
-rw-r--r-- | sys/fs/nullfs/null_vfsops.c | 20 | ||||
-rw-r--r-- | sys/fs/nullfs/null_vnops.c | 183 | ||||
-rw-r--r-- | sys/fs/p9fs/p9_transport.c | 3 | ||||
-rw-r--r-- | sys/fs/udf/osta.c | 4 | ||||
-rw-r--r-- | sys/fs/unionfs/union_subr.c | 2 | ||||
-rw-r--r-- | sys/fs/unionfs/union_vnops.c | 12 |
12 files changed, 232 insertions, 123 deletions
diff --git a/sys/fs/cuse/cuse.c b/sys/fs/cuse/cuse.c index d63a7d4691cf..b2524324584a 100644 --- a/sys/fs/cuse/cuse.c +++ b/sys/fs/cuse/cuse.c @@ -195,12 +195,14 @@ static const struct filterops cuse_client_kqfilter_read_ops = { .f_isfd = 1, .f_detach = cuse_client_kqfilter_read_detach, .f_event = cuse_client_kqfilter_read_event, + .f_copy = knote_triv_copy, }; static const struct filterops cuse_client_kqfilter_write_ops = { .f_isfd = 1, .f_detach = cuse_client_kqfilter_write_detach, .f_event = cuse_client_kqfilter_write_event, + .f_copy = knote_triv_copy, }; static d_open_t cuse_client_open; diff --git a/sys/fs/devfs/devfs_dir.c b/sys/fs/devfs/devfs_dir.c index 3dc87538017d..aad87606e738 100644 --- a/sys/fs/devfs/devfs_dir.c +++ b/sys/fs/devfs/devfs_dir.c @@ -162,7 +162,7 @@ int devfs_pathpath(const char *p1, const char *p2) { - for (;;p1++, p2++) { + for (;; p1++, p2++) { if (*p1 != *p2) { if (*p1 == '/' && *p2 == '\0') return (1); diff --git a/sys/fs/fuse/fuse_device.c b/sys/fs/fuse/fuse_device.c index 57b3559731f7..75bc0357571f 100644 --- a/sys/fs/fuse/fuse_device.c +++ b/sys/fs/fuse/fuse_device.c @@ -126,11 +126,13 @@ static const struct filterops fuse_device_rfiltops = { .f_isfd = 1, .f_detach = fuse_device_filt_detach, .f_event = fuse_device_filt_read, + .f_copy = knote_triv_copy, }; static const struct filterops fuse_device_wfiltops = { .f_isfd = 1, .f_event = fuse_device_filt_write, + .f_copy = knote_triv_copy, }; /**************************** diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c index 5c28db29fc63..683ee2f7ad56 100644 --- a/sys/fs/fuse/fuse_vnops.c +++ b/sys/fs/fuse/fuse_vnops.c @@ -284,7 +284,7 @@ fuse_flush(struct vnode *vp, struct ucred *cred, pid_t pid, int fflag) struct mount *mp = vnode_mount(vp); int err; - if (fsess_not_impl(vnode_mount(vp), FUSE_FLUSH)) + if (fsess_not_impl(mp, FUSE_FLUSH)) return 0; err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid); @@ -292,7 +292,7 @@ fuse_flush(struct vnode *vp, struct ucred *cred, pid_t pid, int fflag) return err; if (fufh->fuse_open_flags & FOPEN_NOFLUSH && - (!fsess_opt_writeback(vnode_mount(vp)))) + (!fsess_opt_writeback(mp))) return (0); fdisp_init(&fdi, sizeof(*ffi)); diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h index 0a93878c859f..7bfdc20a3f67 100644 --- a/sys/fs/nullfs/null.h +++ b/sys/fs/nullfs/null.h @@ -35,7 +35,11 @@ #ifndef FS_NULL_H #define FS_NULL_H -#define NULLM_CACHE 0x0001 +#include <sys/ck.h> +#include <vm/uma.h> + +#define NULLM_CACHE 0x0001 +#define NULLM_NOUNPBYPASS 0x0002 struct null_mount { struct mount *nullm_vfs; @@ -50,7 +54,7 @@ struct null_mount { * A cache of vnode references */ struct null_node { - LIST_ENTRY(null_node) null_hash; /* Hash list */ + CK_SLIST_ENTRY(null_node) null_hash; /* Hash list */ struct vnode *null_lowervp; /* VREFed once */ struct vnode *null_vnode; /* Back pointer */ u_int null_flags; @@ -61,6 +65,7 @@ struct null_node { #define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data)) #define VTONULL(vp) ((struct null_node *)(vp)->v_data) +#define VTONULL_SMR(vp) ((struct null_node *)vn_load_v_data_smr(vp)) #define NULLTOV(xp) ((xp)->null_vnode) int nullfs_init(struct vfsconf *vfsp); @@ -78,10 +83,18 @@ struct vnode *null_checkvp(struct vnode *vp, char *fil, int lno); #endif extern struct vop_vector null_vnodeops; +extern struct vop_vector null_vnodeops_no_unp_bypass; -#ifdef MALLOC_DECLARE -MALLOC_DECLARE(M_NULLFSNODE); -#endif +static inline bool +null_is_nullfs_vnode(struct vnode *vp) +{ + const struct vop_vector *op; + + op = vp->v_op; + return (op == &null_vnodeops || op == &null_vnodeops_no_unp_bypass); +} + +extern uma_zone_t null_node_zone; #ifdef NULLFS_DEBUG #define NULLFSDEBUG(format, args...) printf(format ,## args) diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c index 053614b6910d..a843ae44f121 100644 --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -36,14 +36,19 @@ #include <sys/systm.h> #include <sys/kernel.h> #include <sys/lock.h> -#include <sys/rwlock.h> #include <sys/malloc.h> #include <sys/mount.h> #include <sys/proc.h> +#include <sys/rwlock.h> +#include <sys/smr.h> #include <sys/vnode.h> #include <fs/nullfs/null.h> +#include <vm/uma.h> + +VFS_SMR_DECLARE; + /* * Null layer cache: * Each cache entry holds a reference to the lower vnode @@ -54,12 +59,12 @@ #define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp) & null_hash_mask]) -static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; +static CK_SLIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; static struct rwlock null_hash_lock; static u_long null_hash_mask; static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table"); -MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part"); +uma_zone_t __read_mostly null_node_zone; static void null_hashins(struct mount *, struct null_node *); @@ -73,6 +78,10 @@ nullfs_init(struct vfsconf *vfsp) null_node_hashtbl = hashinit(desiredvnodes, M_NULLFSHASH, &null_hash_mask); rw_init(&null_hash_lock, "nullhs"); + null_node_zone = uma_zcreate("nullfs node", sizeof(struct null_node), + NULL, NULL, NULL, NULL, 0, UMA_ZONE_ZINIT); + VFS_SMR_ZONE_SET(null_node_zone); + return (0); } @@ -80,6 +89,7 @@ int nullfs_uninit(struct vfsconf *vfsp) { + uma_zdestroy(null_node_zone); rw_destroy(&null_hash_lock); hashdestroy(null_node_hashtbl, M_NULLFSHASH, null_hash_mask); return (0); @@ -96,7 +106,7 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp) struct null_node *a; struct vnode *vp; - ASSERT_VOP_LOCKED(lowervp, "null_hashget"); + ASSERT_VOP_LOCKED(lowervp, __func__); rw_assert(&null_hash_lock, RA_LOCKED); /* @@ -106,18 +116,21 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp) * reference count (but NOT the lower vnode's VREF counter). */ hd = NULL_NHASH(lowervp); - LIST_FOREACH(a, hd, null_hash) { - if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) { - /* - * Since we have the lower node locked the nullfs - * node can not be in the process of recycling. If - * it had been recycled before we grabed the lower - * lock it would not have been found on the hash. - */ - vp = NULLTOV(a); - vref(vp); - return (vp); - } + CK_SLIST_FOREACH(a, hd, null_hash) { + if (a->null_lowervp != lowervp) + continue; + /* + * Since we have the lower node locked the nullfs + * node can not be in the process of recycling. If + * it had been recycled before we grabed the lower + * lock it would not have been found on the hash. + */ + vp = NULLTOV(a); + VNPASS(!VN_IS_DOOMED(vp), vp); + if (vp->v_mount != mp) + continue; + vref(vp); + return (vp); } return (NULL); } @@ -126,17 +139,34 @@ struct vnode * null_hashget(struct mount *mp, struct vnode *lowervp) { struct null_node_hashhead *hd; + struct null_node *a; struct vnode *vp; + enum vgetstate vs; - hd = NULL_NHASH(lowervp); - if (LIST_EMPTY(hd)) - return (NULL); - - rw_rlock(&null_hash_lock); - vp = null_hashget_locked(mp, lowervp); - rw_runlock(&null_hash_lock); + ASSERT_VOP_LOCKED(lowervp, __func__); + rw_assert(&null_hash_lock, RA_UNLOCKED); - return (vp); + vfs_smr_enter(); + hd = NULL_NHASH(lowervp); + CK_SLIST_FOREACH(a, hd, null_hash) { + if (a->null_lowervp != lowervp) + continue; + /* + * See null_hashget_locked as to why the nullfs vnode can't be + * doomed here. + */ + vp = NULLTOV(a); + VNPASS(!VN_IS_DOOMED(vp), vp); + if (vp->v_mount != mp) + continue; + vs = vget_prep_smr(vp); + vfs_smr_exit(); + VNPASS(vs != VGET_NONE, vp); + vget_finish_ref(vp, vs); + return (vp); + } + vfs_smr_exit(); + return (NULL); } static void @@ -151,7 +181,7 @@ null_hashins(struct mount *mp, struct null_node *xp) hd = NULL_NHASH(xp->null_lowervp); #ifdef INVARIANTS - LIST_FOREACH(oxp, hd, null_hash) { + CK_SLIST_FOREACH(oxp, hd, null_hash) { if (oxp->null_lowervp == xp->null_lowervp && NULLTOV(oxp)->v_mount == mp) { VNASSERT(0, NULLTOV(oxp), @@ -159,7 +189,7 @@ null_hashins(struct mount *mp, struct null_node *xp) } } #endif - LIST_INSERT_HEAD(hd, xp, null_hash); + CK_SLIST_INSERT_HEAD(hd, xp, null_hash); } static void @@ -174,7 +204,7 @@ null_destroy_proto(struct vnode *vp, void *xp) VI_UNLOCK(vp); vgone(vp); vput(vp); - free(xp, M_NULLFSNODE); + uma_zfree_smr(null_node_zone, xp); } /* @@ -208,12 +238,14 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp) * Note that duplicate can only appear in hash if the lowervp is * locked LK_SHARED. */ - xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK); + xp = uma_zalloc_smr(null_node_zone, M_WAITOK); - error = getnewvnode("nullfs", mp, &null_vnodeops, &vp); + error = getnewvnode("nullfs", mp, (MOUNTTONULLMOUNT(mp)->nullm_flags & + NULLM_NOUNPBYPASS) != 0 ? &null_vnodeops_no_unp_bypass : + &null_vnodeops, &vp); if (error) { vput(lowervp); - free(xp, M_NULLFSNODE); + uma_zfree_smr(null_node_zone, xp); return (error); } @@ -261,8 +293,8 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp) return (error); } - null_hashins(mp, xp); vn_set_state(vp, VSTATE_CONSTRUCTED); + null_hashins(mp, xp); rw_wunlock(&null_hash_lock); *vpp = vp; @@ -275,9 +307,11 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp) void null_hashrem(struct null_node *xp) { + struct null_node_hashhead *hd; + hd = NULL_NHASH(xp->null_lowervp); rw_wlock(&null_hash_lock); - LIST_REMOVE(xp, null_hash); + CK_SLIST_REMOVE(hd, xp, null_node, null_hash); rw_wunlock(&null_hash_lock); } diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c index 4cddf24a5745..170a3dd51cd8 100644 --- a/sys/fs/nullfs/null_vfsops.c +++ b/sys/fs/nullfs/null_vfsops.c @@ -85,6 +85,10 @@ nullfs_mount(struct mount *mp) char *target; int error, len; bool isvnunlocked; + static const char cache_opt_name[] = "cache"; + static const char nocache_opt_name[] = "nocache"; + static const char unixbypass_opt_name[] = "unixbypass"; + static const char nounixbypass_opt_name[] = "nounixbypass"; NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp); @@ -116,7 +120,7 @@ nullfs_mount(struct mount *mp) /* * Unlock lower node to avoid possible deadlock. */ - if (mp->mnt_vnodecovered->v_op == &null_vnodeops && + if (null_is_nullfs_vnode(mp->mnt_vnodecovered) && VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) { VOP_UNLOCK(mp->mnt_vnodecovered); isvnunlocked = true; @@ -150,7 +154,7 @@ nullfs_mount(struct mount *mp) /* * Check multi null mount to avoid `lock against myself' panic. */ - if (mp->mnt_vnodecovered->v_op == &null_vnodeops) { + if (null_is_nullfs_vnode(mp->mnt_vnodecovered)) { nn = VTONULL(mp->mnt_vnodecovered); if (nn == NULL || lowerrootvp == nn->null_lowervp) { NULLFSDEBUG("nullfs_mount: multi null mount?\n"); @@ -205,9 +209,10 @@ nullfs_mount(struct mount *mp) MNT_IUNLOCK(mp); } - if (vfs_getopt(mp->mnt_optnew, "cache", NULL, NULL) == 0) { + if (vfs_getopt(mp->mnt_optnew, cache_opt_name, NULL, NULL) == 0) { xmp->nullm_flags |= NULLM_CACHE; - } else if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0) { + } else if (vfs_getopt(mp->mnt_optnew, nocache_opt_name, NULL, + NULL) == 0) { ; } else if (null_cache_vnodes && (xmp->nullm_vfs->mnt_kern_flag & MNTK_NULL_NOCACHE) == 0) { @@ -219,6 +224,13 @@ nullfs_mount(struct mount *mp) &xmp->notify_node); } + if (vfs_getopt(mp->mnt_optnew, unixbypass_opt_name, NULL, NULL) == 0) { + ; + } else if (vfs_getopt(mp->mnt_optnew, nounixbypass_opt_name, NULL, + NULL) == 0) { + xmp->nullm_flags |= NULLM_NOUNPBYPASS; + } + if (lowerrootvp == mp->mnt_vnodecovered) { vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); lowerrootvp->v_vflag |= VV_CROSSLOCK; diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index e9d598014a2f..d4baabeb40ab 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -174,6 +174,8 @@ #include <sys/mount.h> #include <sys/mutex.h> #include <sys/namei.h> +#include <sys/proc.h> +#include <sys/smr.h> #include <sys/sysctl.h> #include <sys/vnode.h> #include <sys/stat.h> @@ -185,6 +187,8 @@ #include <vm/vm_object.h> #include <vm/vnode_pager.h> +VFS_SMR_DECLARE; + static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, &null_bug_bypass, 0, ""); @@ -274,7 +278,7 @@ null_bypass(struct vop_generic_args *ap) * that aren't. (We must always map first vp or vclean fails.) */ if (i != 0 && (*this_vp_p == NULL || - (*this_vp_p)->v_op != &null_vnodeops)) { + !null_is_nullfs_vnode(*this_vp_p))) { old_vps[i] = NULL; } else { old_vps[i] = *this_vp_p; @@ -768,83 +772,111 @@ null_rmdir(struct vop_rmdir_args *ap) } /* - * We need to process our own vnode lock and then clear the - * interlock flag as it applies only to our vnode, not the - * vnodes below us on the stack. + * We need to process our own vnode lock and then clear the interlock flag as + * it applies only to our vnode, not the vnodes below us on the stack. + * + * We have to hold the vnode here to solve a potential reclaim race. If we're + * forcibly vgone'd while we still have refs, a thread could be sleeping inside + * the lowervp's vop_lock routine. When we vgone we will drop our last ref to + * the lowervp, which would allow it to be reclaimed. The lowervp could then + * be recycled, in which case it is not legal to be sleeping in its VOP. We + * prevent it from being recycled by holding the vnode here. */ +static struct vnode * +null_lock_prep_with_smr(struct vop_lock1_args *ap) +{ + struct null_node *nn; + struct vnode *lvp; + + lvp = NULL; + + vfs_smr_enter(); + + nn = VTONULL_SMR(ap->a_vp); + if (__predict_true(nn != NULL)) { + lvp = nn->null_lowervp; + if (lvp != NULL && !vhold_smr(lvp)) + lvp = NULL; + } + + vfs_smr_exit(); + return (lvp); +} + +static struct vnode * +null_lock_prep_with_interlock(struct vop_lock1_args *ap) +{ + struct null_node *nn; + struct vnode *lvp; + + ASSERT_VI_LOCKED(ap->a_vp, __func__); + + ap->a_flags &= ~LK_INTERLOCK; + + lvp = NULL; + + nn = VTONULL(ap->a_vp); + if (__predict_true(nn != NULL)) { + lvp = nn->null_lowervp; + if (lvp != NULL) + vholdnz(lvp); + } + VI_UNLOCK(ap->a_vp); + return (lvp); +} + static int null_lock(struct vop_lock1_args *ap) { - struct vnode *vp = ap->a_vp; - int flags; - struct null_node *nn; struct vnode *lvp; - int error; + int error, flags; - if ((ap->a_flags & LK_INTERLOCK) == 0) - VI_LOCK(vp); - else - ap->a_flags &= ~LK_INTERLOCK; - flags = ap->a_flags; - nn = VTONULL(vp); + if (__predict_true((ap->a_flags & LK_INTERLOCK) == 0)) { + lvp = null_lock_prep_with_smr(ap); + if (__predict_false(lvp == NULL)) { + VI_LOCK(ap->a_vp); + lvp = null_lock_prep_with_interlock(ap); + } + } else { + lvp = null_lock_prep_with_interlock(ap); + } + + ASSERT_VI_UNLOCKED(ap->a_vp, __func__); + + if (__predict_false(lvp == NULL)) + return (vop_stdlock(ap)); + + VNPASS(lvp->v_holdcnt > 0, lvp); + error = VOP_LOCK(lvp, ap->a_flags); /* - * If we're still active we must ask the lower layer to - * lock as ffs has special lock considerations in its - * vop lock. + * We might have slept to get the lock and someone might have + * clean our vnode already, switching vnode lock from one in + * lowervp to v_lock in our own vnode structure. Handle this + * case by reacquiring correct lock in requested mode. */ - if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { - /* - * We have to hold the vnode here to solve a potential - * reclaim race. If we're forcibly vgone'd while we - * still have refs, a thread could be sleeping inside - * the lowervp's vop_lock routine. When we vgone we will - * drop our last ref to the lowervp, which would allow it - * to be reclaimed. The lowervp could then be recycled, - * in which case it is not legal to be sleeping in its VOP. - * We prevent it from being recycled by holding the vnode - * here. - */ - vholdnz(lvp); - VI_UNLOCK(vp); - error = VOP_LOCK(lvp, flags); - - /* - * We might have slept to get the lock and someone might have - * clean our vnode already, switching vnode lock from one in - * lowervp to v_lock in our own vnode structure. Handle this - * case by reacquiring correct lock in requested mode. - */ - if (VTONULL(vp) == NULL && error == 0) { - ap->a_flags &= ~LK_TYPE_MASK; - switch (flags & LK_TYPE_MASK) { - case LK_SHARED: - ap->a_flags |= LK_SHARED; - break; - case LK_UPGRADE: - case LK_EXCLUSIVE: - ap->a_flags |= LK_EXCLUSIVE; - break; - default: - panic("Unsupported lock request %d\n", - ap->a_flags); - } - VOP_UNLOCK(lvp); - error = vop_stdlock(ap); + if (VTONULL(ap->a_vp) == NULL && error == 0) { + VOP_UNLOCK(lvp); + + flags = ap->a_flags; + ap->a_flags &= ~LK_TYPE_MASK; + switch (flags & LK_TYPE_MASK) { + case LK_SHARED: + ap->a_flags |= LK_SHARED; + break; + case LK_UPGRADE: + case LK_EXCLUSIVE: + ap->a_flags |= LK_EXCLUSIVE; + break; + default: + panic("Unsupported lock request %d\n", + flags); } - vdrop(lvp); - } else { - VI_UNLOCK(vp); error = vop_stdlock(ap); } - + vdrop(lvp); return (error); } -/* - * We need to process our own vnode unlock and then clear the - * interlock flag as it applies only to our vnode, not the - * vnodes below us on the stack. - */ static int null_unlock(struct vop_unlock_args *ap) { @@ -853,11 +885,20 @@ null_unlock(struct vop_unlock_args *ap) struct vnode *lvp; int error; + /* + * Contrary to null_lock, we don't need to hold the vnode around + * unlock. + * + * We hold the lock, which means we can't be racing against vgone. + * + * At the same time VOP_UNLOCK promises to not touch anything after + * it finishes unlock, just like we don't. + * + * vop_stdunlock for a doomed vnode matches doomed locking in null_lock. + */ nn = VTONULL(vp); if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { - vholdnz(lvp); error = VOP_UNLOCK(lvp); - vdrop(lvp); } else { error = vop_stdunlock(ap); } @@ -961,7 +1002,7 @@ null_reclaim(struct vop_reclaim_args *ap) vunref(lowervp); else vput(lowervp); - free(xp, M_NULLFSNODE); + uma_zfree_smr(null_node_zone, xp); return (0); } @@ -1215,3 +1256,11 @@ struct vop_vector null_vnodeops = { .vop_copy_file_range = VOP_PANIC, }; VFS_VOP_VECTOR_REGISTER(null_vnodeops); + +struct vop_vector null_vnodeops_no_unp_bypass = { + .vop_default = &null_vnodeops, + .vop_unp_bind = vop_stdunp_bind, + .vop_unp_connect = vop_stdunp_connect, + .vop_unp_detach = vop_stdunp_detach, +}; +VFS_VOP_VECTOR_REGISTER(null_vnodeops_no_unp_bypass); diff --git a/sys/fs/p9fs/p9_transport.c b/sys/fs/p9fs/p9_transport.c index c82d81fedcd7..25eee984265c 100644 --- a/sys/fs/p9fs/p9_transport.c +++ b/sys/fs/p9fs/p9_transport.c @@ -34,9 +34,8 @@ TAILQ_HEAD(, p9_trans_module) transports; static void -p9_transport_init(void) +p9_transport_init(void *dummy __unused) { - TAILQ_INIT(&transports); } diff --git a/sys/fs/udf/osta.c b/sys/fs/udf/osta.c index f79b86993367..1a083d8c26b1 100644 --- a/sys/fs/udf/osta.c +++ b/sys/fs/udf/osta.c @@ -383,7 +383,7 @@ int UDFTransName( int maxFilenameLen; /* Translate extension, and store it in ext. */ for(index = 0; index<EXT_SIZE && - extIndex + index +1 < udfLen; index++ ) { + extIndex + index +1 < udfLen; index++) { current = udfName[extIndex + index + 1]; if (IsIllegal(current) || !UnicodeIsPrint(current)) { @@ -432,7 +432,7 @@ int UDFTransName( /* Place a translated extension at end, if found. */ if (hasExt) { newName[newIndex++] = PERIOD; - for (index = 0;index < localExtIndex ;index++ ) { + for (index = 0; index < localExtIndex; index++) { newName[newIndex++] = ext[index]; } } diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c index a14f9ca74305..b6d6db60ca3d 100644 --- a/sys/fs/unionfs/union_subr.c +++ b/sys/fs/unionfs/union_subr.c @@ -587,6 +587,7 @@ unionfs_find_node_status(struct unionfs_node *unp, struct thread *td) struct unionfs_node_status *unsp; pid_t pid; + MPASS(td != NULL); pid = td->td_proc->p_pid; ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), __func__); @@ -612,6 +613,7 @@ unionfs_get_node_status(struct unionfs_node *unp, struct thread *td, struct unionfs_node_status *unsp; pid_t pid; + MPASS(td != NULL); pid = td->td_proc->p_pid; KASSERT(NULL != unspp, ("%s: NULL status", __func__)); diff --git a/sys/fs/unionfs/union_vnops.c b/sys/fs/unionfs/union_vnops.c index 627b2f6e9a1d..66fee97a07d5 100644 --- a/sys/fs/unionfs/union_vnops.c +++ b/sys/fs/unionfs/union_vnops.c @@ -814,7 +814,7 @@ unionfs_close(struct vop_close_args *ap) unp = VTOUNIONFS(vp); lvp = unp->un_lowervp; uvp = unp->un_uppervp; - unsp = unionfs_find_node_status(unp, td); + unsp = (td != NULL) ? unionfs_find_node_status(unp, td) : NULL; if (unsp == NULL || (unsp->uns_lower_opencnt <= 0 && unsp->uns_upper_opencnt <= 0)) { @@ -2208,7 +2208,6 @@ unionfs_lock_restart: vholdnz(tvp); VI_UNLOCK(vp); error = VOP_LOCK(tvp, flags); - vdrop(tvp); if (error == 0 && (lvp_locked || VTOUNIONFS(vp) == NULL)) { /* * After dropping the interlock above, there exists a window @@ -2234,6 +2233,7 @@ unionfs_lock_restart: unp = VTOUNIONFS(vp); if (unp == NULL || unp->un_uppervp != NULL) { VOP_UNLOCK(tvp); + vdrop(tvp); /* * If we previously held the lock, the upgrade may * have temporarily dropped the lock, in which case @@ -2249,6 +2249,7 @@ unionfs_lock_restart: goto unionfs_lock_restart; } } + vdrop(tvp); return (error); } @@ -2259,7 +2260,6 @@ unionfs_unlock(struct vop_unlock_args *ap) struct vnode *vp; struct vnode *tvp; struct unionfs_node *unp; - int error; KASSERT_UNIONFS_VNODE(ap->a_vp); @@ -2271,11 +2271,7 @@ unionfs_unlock(struct vop_unlock_args *ap) tvp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp); - vholdnz(tvp); - error = VOP_UNLOCK(tvp); - vdrop(tvp); - - return (error); + return (VOP_UNLOCK(tvp)); } static int |