aboutsummaryrefslogtreecommitdiff
path: root/sys/fs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/fs')
-rw-r--r--sys/fs/nullfs/null.h17
-rw-r--r--sys/fs/nullfs/null_subr.c24
-rw-r--r--sys/fs/nullfs/null_vfsops.c20
-rw-r--r--sys/fs/nullfs/null_vnops.c17
4 files changed, 57 insertions, 21 deletions
diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h
index aa7a689bec34..7bfdc20a3f67 100644
--- a/sys/fs/nullfs/null.h
+++ b/sys/fs/nullfs/null.h
@@ -35,11 +35,12 @@
#ifndef FS_NULL_H
#define FS_NULL_H
-#define NULLM_CACHE 0x0001
-
#include <sys/ck.h>
#include <vm/uma.h>
+#define NULLM_CACHE 0x0001
+#define NULLM_NOUNPBYPASS 0x0002
+
struct null_mount {
struct mount *nullm_vfs;
struct vnode *nullm_lowerrootvp; /* Ref to lower root vnode */
@@ -53,7 +54,7 @@ struct null_mount {
* A cache of vnode references
*/
struct null_node {
- CK_LIST_ENTRY(null_node) null_hash; /* Hash list */
+ CK_SLIST_ENTRY(null_node) null_hash; /* Hash list */
struct vnode *null_lowervp; /* VREFed once */
struct vnode *null_vnode; /* Back pointer */
u_int null_flags;
@@ -82,6 +83,16 @@ struct vnode *null_checkvp(struct vnode *vp, char *fil, int lno);
#endif
extern struct vop_vector null_vnodeops;
+extern struct vop_vector null_vnodeops_no_unp_bypass;
+
+static inline bool
+null_is_nullfs_vnode(struct vnode *vp)
+{
+ const struct vop_vector *op;
+
+ op = vp->v_op;
+ return (op == &null_vnodeops || op == &null_vnodeops_no_unp_bypass);
+}
extern uma_zone_t null_node_zone;
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index 146d3bbdaedd..a843ae44f121 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -36,12 +36,12 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/rwlock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/proc.h>
-#include <sys/vnode.h>
+#include <sys/rwlock.h>
#include <sys/smr.h>
+#include <sys/vnode.h>
#include <fs/nullfs/null.h>
@@ -59,7 +59,7 @@ VFS_SMR_DECLARE;
#define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp) & null_hash_mask])
-static CK_LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
+static CK_SLIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
static struct rwlock null_hash_lock;
static u_long null_hash_mask;
@@ -116,7 +116,7 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp)
* reference count (but NOT the lower vnode's VREF counter).
*/
hd = NULL_NHASH(lowervp);
- CK_LIST_FOREACH(a, hd, null_hash) {
+ CK_SLIST_FOREACH(a, hd, null_hash) {
if (a->null_lowervp != lowervp)
continue;
/*
@@ -143,12 +143,12 @@ null_hashget(struct mount *mp, struct vnode *lowervp)
struct vnode *vp;
enum vgetstate vs;
- ASSERT_VOP_LOCKED(lowervp, "null_hashget");
+ ASSERT_VOP_LOCKED(lowervp, __func__);
rw_assert(&null_hash_lock, RA_UNLOCKED);
vfs_smr_enter();
hd = NULL_NHASH(lowervp);
- CK_LIST_FOREACH(a, hd, null_hash) {
+ CK_SLIST_FOREACH(a, hd, null_hash) {
if (a->null_lowervp != lowervp)
continue;
/*
@@ -181,7 +181,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
hd = NULL_NHASH(xp->null_lowervp);
#ifdef INVARIANTS
- CK_LIST_FOREACH(oxp, hd, null_hash) {
+ CK_SLIST_FOREACH(oxp, hd, null_hash) {
if (oxp->null_lowervp == xp->null_lowervp &&
NULLTOV(oxp)->v_mount == mp) {
VNASSERT(0, NULLTOV(oxp),
@@ -189,7 +189,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
}
}
#endif
- CK_LIST_INSERT_HEAD(hd, xp, null_hash);
+ CK_SLIST_INSERT_HEAD(hd, xp, null_hash);
}
static void
@@ -240,7 +240,9 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
*/
xp = uma_zalloc_smr(null_node_zone, M_WAITOK);
- error = getnewvnode("nullfs", mp, &null_vnodeops, &vp);
+ error = getnewvnode("nullfs", mp, (MOUNTTONULLMOUNT(mp)->nullm_flags &
+ NULLM_NOUNPBYPASS) != 0 ? &null_vnodeops_no_unp_bypass :
+ &null_vnodeops, &vp);
if (error) {
vput(lowervp);
uma_zfree_smr(null_node_zone, xp);
@@ -305,9 +307,11 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
void
null_hashrem(struct null_node *xp)
{
+ struct null_node_hashhead *hd;
+ hd = NULL_NHASH(xp->null_lowervp);
rw_wlock(&null_hash_lock);
- CK_LIST_REMOVE(xp, null_hash);
+ CK_SLIST_REMOVE(hd, xp, null_node, null_hash);
rw_wunlock(&null_hash_lock);
}
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index 4cddf24a5745..170a3dd51cd8 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -85,6 +85,10 @@ nullfs_mount(struct mount *mp)
char *target;
int error, len;
bool isvnunlocked;
+ static const char cache_opt_name[] = "cache";
+ static const char nocache_opt_name[] = "nocache";
+ static const char unixbypass_opt_name[] = "unixbypass";
+ static const char nounixbypass_opt_name[] = "nounixbypass";
NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp);
@@ -116,7 +120,7 @@ nullfs_mount(struct mount *mp)
/*
* Unlock lower node to avoid possible deadlock.
*/
- if (mp->mnt_vnodecovered->v_op == &null_vnodeops &&
+ if (null_is_nullfs_vnode(mp->mnt_vnodecovered) &&
VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) {
VOP_UNLOCK(mp->mnt_vnodecovered);
isvnunlocked = true;
@@ -150,7 +154,7 @@ nullfs_mount(struct mount *mp)
/*
* Check multi null mount to avoid `lock against myself' panic.
*/
- if (mp->mnt_vnodecovered->v_op == &null_vnodeops) {
+ if (null_is_nullfs_vnode(mp->mnt_vnodecovered)) {
nn = VTONULL(mp->mnt_vnodecovered);
if (nn == NULL || lowerrootvp == nn->null_lowervp) {
NULLFSDEBUG("nullfs_mount: multi null mount?\n");
@@ -205,9 +209,10 @@ nullfs_mount(struct mount *mp)
MNT_IUNLOCK(mp);
}
- if (vfs_getopt(mp->mnt_optnew, "cache", NULL, NULL) == 0) {
+ if (vfs_getopt(mp->mnt_optnew, cache_opt_name, NULL, NULL) == 0) {
xmp->nullm_flags |= NULLM_CACHE;
- } else if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0) {
+ } else if (vfs_getopt(mp->mnt_optnew, nocache_opt_name, NULL,
+ NULL) == 0) {
;
} else if (null_cache_vnodes &&
(xmp->nullm_vfs->mnt_kern_flag & MNTK_NULL_NOCACHE) == 0) {
@@ -219,6 +224,13 @@ nullfs_mount(struct mount *mp)
&xmp->notify_node);
}
+ if (vfs_getopt(mp->mnt_optnew, unixbypass_opt_name, NULL, NULL) == 0) {
+ ;
+ } else if (vfs_getopt(mp->mnt_optnew, nounixbypass_opt_name, NULL,
+ NULL) == 0) {
+ xmp->nullm_flags |= NULLM_NOUNPBYPASS;
+ }
+
if (lowerrootvp == mp->mnt_vnodecovered) {
vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
lowerrootvp->v_vflag |= VV_CROSSLOCK;
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index 375b6aa27531..d4baabeb40ab 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -278,7 +278,7 @@ null_bypass(struct vop_generic_args *ap)
* that aren't. (We must always map first vp or vclean fails.)
*/
if (i != 0 && (*this_vp_p == NULL ||
- (*this_vp_p)->v_op != &null_vnodeops)) {
+ !null_is_nullfs_vnode(*this_vp_p))) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
@@ -788,10 +788,10 @@ null_lock_prep_with_smr(struct vop_lock1_args *ap)
struct null_node *nn;
struct vnode *lvp;
- vfs_smr_enter();
-
lvp = NULL;
+ vfs_smr_enter();
+
nn = VTONULL_SMR(ap->a_vp);
if (__predict_true(nn != NULL)) {
lvp = nn->null_lowervp;
@@ -855,6 +855,8 @@ null_lock(struct vop_lock1_args *ap)
* case by reacquiring correct lock in requested mode.
*/
if (VTONULL(ap->a_vp) == NULL && error == 0) {
+ VOP_UNLOCK(lvp);
+
flags = ap->a_flags;
ap->a_flags &= ~LK_TYPE_MASK;
switch (flags & LK_TYPE_MASK) {
@@ -869,7 +871,6 @@ null_lock(struct vop_lock1_args *ap)
panic("Unsupported lock request %d\n",
flags);
}
- VOP_UNLOCK(lvp);
error = vop_stdlock(ap);
}
vdrop(lvp);
@@ -1255,3 +1256,11 @@ struct vop_vector null_vnodeops = {
.vop_copy_file_range = VOP_PANIC,
};
VFS_VOP_VECTOR_REGISTER(null_vnodeops);
+
+struct vop_vector null_vnodeops_no_unp_bypass = {
+ .vop_default = &null_vnodeops,
+ .vop_unp_bind = vop_stdunp_bind,
+ .vop_unp_connect = vop_stdunp_connect,
+ .vop_unp_detach = vop_stdunp_detach,
+};
+VFS_VOP_VECTOR_REGISTER(null_vnodeops_no_unp_bypass);