aboutsummaryrefslogtreecommitdiff
path: root/sys/fs/nullfs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/fs/nullfs')
-rw-r--r--sys/fs/nullfs/null.h15
-rw-r--r--sys/fs/nullfs/null_subr.c4
-rw-r--r--sys/fs/nullfs/null_vfsops.c20
-rw-r--r--sys/fs/nullfs/null_vnops.c10
4 files changed, 41 insertions, 8 deletions
diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h
index ad3f7779e108..7bfdc20a3f67 100644
--- a/sys/fs/nullfs/null.h
+++ b/sys/fs/nullfs/null.h
@@ -35,11 +35,12 @@
#ifndef FS_NULL_H
#define FS_NULL_H
-#define NULLM_CACHE 0x0001
-
#include <sys/ck.h>
#include <vm/uma.h>
+#define NULLM_CACHE 0x0001
+#define NULLM_NOUNPBYPASS 0x0002
+
struct null_mount {
struct mount *nullm_vfs;
struct vnode *nullm_lowerrootvp; /* Ref to lower root vnode */
@@ -82,6 +83,16 @@ struct vnode *null_checkvp(struct vnode *vp, char *fil, int lno);
#endif
extern struct vop_vector null_vnodeops;
+extern struct vop_vector null_vnodeops_no_unp_bypass;
+
+static inline bool
+null_is_nullfs_vnode(struct vnode *vp)
+{
+ const struct vop_vector *op;
+
+ op = vp->v_op;
+ return (op == &null_vnodeops || op == &null_vnodeops_no_unp_bypass);
+}
extern uma_zone_t null_node_zone;
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index d7f847d449d0..a843ae44f121 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -240,7 +240,9 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
*/
xp = uma_zalloc_smr(null_node_zone, M_WAITOK);
- error = getnewvnode("nullfs", mp, &null_vnodeops, &vp);
+ error = getnewvnode("nullfs", mp, (MOUNTTONULLMOUNT(mp)->nullm_flags &
+ NULLM_NOUNPBYPASS) != 0 ? &null_vnodeops_no_unp_bypass :
+ &null_vnodeops, &vp);
if (error) {
vput(lowervp);
uma_zfree_smr(null_node_zone, xp);
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index 4cddf24a5745..170a3dd51cd8 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -85,6 +85,10 @@ nullfs_mount(struct mount *mp)
char *target;
int error, len;
bool isvnunlocked;
+ static const char cache_opt_name[] = "cache";
+ static const char nocache_opt_name[] = "nocache";
+ static const char unixbypass_opt_name[] = "unixbypass";
+ static const char nounixbypass_opt_name[] = "nounixbypass";
NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp);
@@ -116,7 +120,7 @@ nullfs_mount(struct mount *mp)
/*
* Unlock lower node to avoid possible deadlock.
*/
- if (mp->mnt_vnodecovered->v_op == &null_vnodeops &&
+ if (null_is_nullfs_vnode(mp->mnt_vnodecovered) &&
VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) {
VOP_UNLOCK(mp->mnt_vnodecovered);
isvnunlocked = true;
@@ -150,7 +154,7 @@ nullfs_mount(struct mount *mp)
/*
* Check multi null mount to avoid `lock against myself' panic.
*/
- if (mp->mnt_vnodecovered->v_op == &null_vnodeops) {
+ if (null_is_nullfs_vnode(mp->mnt_vnodecovered)) {
nn = VTONULL(mp->mnt_vnodecovered);
if (nn == NULL || lowerrootvp == nn->null_lowervp) {
NULLFSDEBUG("nullfs_mount: multi null mount?\n");
@@ -205,9 +209,10 @@ nullfs_mount(struct mount *mp)
MNT_IUNLOCK(mp);
}
- if (vfs_getopt(mp->mnt_optnew, "cache", NULL, NULL) == 0) {
+ if (vfs_getopt(mp->mnt_optnew, cache_opt_name, NULL, NULL) == 0) {
xmp->nullm_flags |= NULLM_CACHE;
- } else if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0) {
+ } else if (vfs_getopt(mp->mnt_optnew, nocache_opt_name, NULL,
+ NULL) == 0) {
;
} else if (null_cache_vnodes &&
(xmp->nullm_vfs->mnt_kern_flag & MNTK_NULL_NOCACHE) == 0) {
@@ -219,6 +224,13 @@ nullfs_mount(struct mount *mp)
&xmp->notify_node);
}
+ if (vfs_getopt(mp->mnt_optnew, unixbypass_opt_name, NULL, NULL) == 0) {
+ ;
+ } else if (vfs_getopt(mp->mnt_optnew, nounixbypass_opt_name, NULL,
+ NULL) == 0) {
+ xmp->nullm_flags |= NULLM_NOUNPBYPASS;
+ }
+
if (lowerrootvp == mp->mnt_vnodecovered) {
vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
lowerrootvp->v_vflag |= VV_CROSSLOCK;
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index ec8a6b10b13f..d4baabeb40ab 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -278,7 +278,7 @@ null_bypass(struct vop_generic_args *ap)
* that aren't. (We must always map first vp or vclean fails.)
*/
if (i != 0 && (*this_vp_p == NULL ||
- (*this_vp_p)->v_op != &null_vnodeops)) {
+ !null_is_nullfs_vnode(*this_vp_p))) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
@@ -1256,3 +1256,11 @@ struct vop_vector null_vnodeops = {
.vop_copy_file_range = VOP_PANIC,
};
VFS_VOP_VECTOR_REGISTER(null_vnodeops);
+
+struct vop_vector null_vnodeops_no_unp_bypass = {
+ .vop_default = &null_vnodeops,
+ .vop_unp_bind = vop_stdunp_bind,
+ .vop_unp_connect = vop_stdunp_connect,
+ .vop_unp_detach = vop_stdunp_detach,
+};
+VFS_VOP_VECTOR_REGISTER(null_vnodeops_no_unp_bypass);