summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/conf/options2
-rw-r--r--sys/kern/vfs_cache.c8
-rw-r--r--sys/kern/vfs_extattr.c2
-rw-r--r--sys/kern/vfs_syscalls.c2
-rw-r--r--sys/kern/vfs_vnops.c10
5 files changed, 12 insertions, 12 deletions
diff --git a/sys/conf/options b/sys/conf/options
index dc617f34d824..29984680593b 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -389,7 +389,7 @@ BLKDEV_IOSIZE opt_global.h
DEBUG opt_global.h
DEBUG_LOCKS opt_global.h
DEBUG_VFS_LOCKS opt_global.h
-LOOKUP_EXCLUSIVE opt_global.h
+LOOKUP_SHARED opt_global.h
DIAGNOSTIC opt_global.h
ENABLE_VFS_IOOPT opt_global.h
INVARIANT_SUPPORT opt_global.h
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 49bd83f0b080..6fd537f90b21 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -566,7 +566,7 @@ vfs_cache_lookup(ap)
error = cache_lookup(dvp, vpp, cnp);
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
if (!error) {
/* We do this because the rest of the system now expects to get
* a shared lock, which is later upgraded if LOCKSHARED is not
@@ -608,7 +608,7 @@ vfs_cache_lookup(ap)
} else if (flags & ISDOTDOT) {
VOP_UNLOCK(dvp, 0, td);
cnp->cn_flags |= PDIRUNLOCK;
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
if ((flags & ISLASTCN) && (flags & LOCKSHARED))
error = vget(vp, LK_SHARED, td);
else
@@ -622,7 +622,7 @@ vfs_cache_lookup(ap)
cnp->cn_flags &= ~PDIRUNLOCK;
}
} else {
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
if ((flags & ISLASTCN) && (flags & LOCKSHARED))
error = vget(vp, LK_SHARED, td);
else
@@ -654,7 +654,7 @@ vfs_cache_lookup(ap)
return (error);
cnp->cn_flags &= ~PDIRUNLOCK;
}
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
error = VOP_CACHEDLOOKUP(dvp, vpp, cnp);
if (!error) {
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index f256e9e44844..44dec6d4d845 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -2585,7 +2585,7 @@ stat(td, uap)
int error;
struct nameidata nd;
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF | NOOBJ,
UIO_USERSPACE, SCARG(uap, path), td);
#else
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index f256e9e44844..44dec6d4d845 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -2585,7 +2585,7 @@ stat(td, uap)
int error;
struct nameidata nd;
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF | NOOBJ,
UIO_USERSPACE, SCARG(uap, path), td);
#else
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index d2d735d480b6..64e26be32ff3 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -106,7 +106,7 @@ vn_open_cred(ndp, flagp, cmode, cred)
struct vattr vat;
struct vattr *vap = &vat;
int mode, fmode, error;
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
int exclusive; /* The current intended lock state */
exclusive = 0;
@@ -149,7 +149,7 @@ restart:
ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
fmode &= ~O_TRUNC;
vp = ndp->ni_vp;
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
exclusive = 1;
#endif
} else {
@@ -167,7 +167,7 @@ restart:
}
} else {
ndp->ni_cnd.cn_nameiop = LOOKUP;
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
ndp->ni_cnd.cn_flags =
((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
LOCKSHARED | LOCKLEAF;
@@ -213,7 +213,7 @@ restart:
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
int flock;
if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0)
@@ -236,7 +236,7 @@ restart:
*flagp = fmode;
return (error);
}
-#ifndef LOOKUP_EXCLUSIVE
+#ifdef LOOKUP_SHARED
flock = VOP_ISLOCKED(vp, td);
if (!exclusive && flock == LK_EXCLUSIVE)
VOP_LOCK(vp, LK_DOWNGRADE, td);