diff options
| author | Jeff Roberson <jeff@FreeBSD.org> | 2002-08-21 06:19:29 +0000 |
|---|---|---|
| committer | Jeff Roberson <jeff@FreeBSD.org> | 2002-08-21 06:19:29 +0000 |
| commit | 71ea4ba57c8473b817189625b25f02e6d3493a08 (patch) | |
| tree | 9f225f3ee185d5bca6d6ef1adac486148979854b | |
| parent | f0ac20f7d2bfc439be9af110470c4784becebe34 (diff) | |
Notes
| -rw-r--r-- | sys/kern/vfs_subr.c | 65 | ||||
| -rw-r--r-- | sys/kern/vnode_if.src | 8 | ||||
| -rw-r--r-- | sys/sys/vnode.h | 24 | ||||
| -rw-r--r-- | sys/tools/vnode_if.awk | 1 |
4 files changed, 90 insertions, 8 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 696c2942cde1..75b819b61b59 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -234,14 +234,24 @@ int (*softdep_process_worklist_hook)(struct mount *); #ifdef DEBUG_VFS_LOCKS /* Print lock violations */ int vfs_badlock_print = 1; + /* Panic on violation */ int vfs_badlock_panic = 1; +/* Check for interlock across VOPs */ +int vfs_badlock_mutex = 0; + void vop_rename_pre(void *ap) { struct vop_rename_args *a = ap; + if (a->a_tvp) + ASSERT_VI_UNLOCKED(a->a_tvp); + ASSERT_VI_UNLOCKED(a->a_tdvp); + ASSERT_VI_UNLOCKED(a->a_fvp); + ASSERT_VI_UNLOCKED(a->a_fdvp); + /* Check the source (from) */ if (a->a_tdvp != a->a_fdvp) ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n"); @@ -285,6 +295,7 @@ vop_lookup_pre(void *ap) dvp = a->a_dvp; + ASSERT_VI_UNLOCKED(dvp); ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); } @@ -303,6 +314,7 @@ vop_lookup_post(void *ap, int rc) flags = cnp->cn_flags; + ASSERT_VI_UNLOCKED(dvp); /* * If this is the last path component for this lookup and LOCPARENT * is set, OR if there is an error the directory has to be locked. @@ -317,8 +329,49 @@ vop_lookup_post(void *ap, int rc) if (flags & PDIRUNLOCK) ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)"); - if (rc == 0) + if (rc == 0) { + ASSERT_VI_UNLOCKED(vp); ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (vpp)"); + } +} + +void +vop_unlock_pre(void *ap) +{ + struct vop_unlock_args *a = ap; + + if ((a->a_flags & LK_INTERLOCK) == 0) + ASSERT_VI_UNLOCKED(a->a_vp); + else + ASSERT_VI_LOCKED(a->a_vp); +} + +void +vop_unlock_post(void *ap, int rc) +{ + struct vop_unlock_args *a = ap; + + ASSERT_VI_UNLOCKED(a->a_vp); +} + +void +vop_lock_pre(void *ap) +{ + struct vop_lock_args *a = ap; + + if ((a->a_flags & LK_INTERLOCK) == 0) + ASSERT_VI_UNLOCKED(a->a_vp); + else + ASSERT_VI_LOCKED(a->a_vp); +} + +void +vop_lock_post(void *ap, int rc) +{ + struct vop_lock_args *a = ap; + + ASSERT_VI_UNLOCKED(a->a_vp); + ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); } #endif /* DEBUG_VFS_LOCKS */ @@ -1478,7 +1531,7 @@ vn_syncer_add_to_worklist(struct vnode *vp, int delay) int s, slot; s = splbio(); - mtx_assert(VI_MTX(vp), MA_OWNED); + ASSERT_VI_LOCKED(vp); if (vp->v_iflag & VI_ONWORKLST) LIST_REMOVE(vp, v_synclist); @@ -2242,7 +2295,7 @@ vclean(vp, flags, td) { int active; - mtx_assert(VI_MTX(vp), MA_OWNED); + ASSERT_VI_LOCKED(vp); /* * Check to see if the vnode is in use. If so we have to reference it * before we clean it out so that its count cannot fall to zero and @@ -2440,7 +2493,7 @@ vgonel(vp, td) * If a vgone (or vclean) is already in progress, * wait until it is done and return. */ - mtx_assert(VI_MTX(vp), MA_OWNED); + ASSERT_VI_LOCKED(vp); if (vp->v_iflag & VI_XLOCK) { vp->v_iflag |= VI_XWANT; VI_UNLOCK(vp); @@ -2980,7 +3033,7 @@ vfree(vp) { int s; - mtx_assert(VI_MTX(vp), MA_OWNED); + ASSERT_VI_LOCKED(vp); s = splbio(); mtx_lock(&vnode_free_list_mtx); KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free")); @@ -3006,7 +3059,7 @@ vbusy(vp) int s; s = splbio(); - mtx_assert(VI_MTX(vp), MA_OWNED); + ASSERT_VI_LOCKED(vp); mtx_lock(&vnode_free_list_mtx); KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free")); TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src index ce7c277c7661..8971627c207c 100644 --- a/sys/kern/vnode_if.src +++ b/sys/kern/vnode_if.src @@ -356,7 +356,9 @@ vop_reclaim { }; # -#% lock vp ? ? ? +#lock vp ? ? ? +#! lock pre vop_lock_pre +#! lock post vop_lock_post # vop_lock { IN struct vnode *vp; @@ -365,7 +367,9 @@ vop_lock { }; # -#% unlock vp L ? L +#unlock vp L ? L +#! unlock pre vop_unlock_pre +#! unlock post vop_unlock_post # vop_unlock { IN struct vnode *vp; diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index d977be3d0910..261d6eb30e93 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -510,6 +510,24 @@ struct vop_generic_args { extern int vfs_badlock_panic; extern int vfs_badlock_print; +extern int vfs_badlock_mutex; + +#define ASSERT_VI_UNLOCKED(vp) \ +do { \ + struct vnode *_vp = (vp); \ + \ + if (vfs_badlock_mutex) \ + mtx_assert(VI_MTX(_vp), MA_NOTOWNED); \ +} while (0) \ + +#define ASSERT_VI_LOCKED(vp) \ +do { \ + struct vnode *_vp = (vp); \ + \ + if (vfs_badlock_mutex) \ + mtx_assert(VI_MTX(_vp), MA_OWNED); \ +} while (0) \ + /* * This only exists to supress warnings from unlocked specfs accesses. It is @@ -594,11 +612,17 @@ void vop_rename_pre(void *a); void vop_strategy_pre(void *a); void vop_lookup_pre(void *a); void vop_lookup_post(void *a, int rc); +void vop_lock_pre(void *a); +void vop_lock_post(void *a, int rc); +void vop_unlock_pre(void *a); +void vop_unlock_post(void *a, int rc); #else #define ASSERT_VOP_LOCKED(vp, str) #define ASSERT_VOP_UNLOCKED(vp, str) +#define ASSERT_VI_UNLOCKED(vp) +#define ASSERT_VI_LOCKED(vp) #endif diff --git a/sys/tools/vnode_if.awk b/sys/tools/vnode_if.awk index 93a737639ad7..d40bd4106785 100644 --- a/sys/tools/vnode_if.awk +++ b/sys/tools/vnode_if.awk @@ -66,6 +66,7 @@ function printh(s) {print s > hfile;} function add_debug_code(name, arg, pos) { if (lockdata[name, arg, pos]) { + printh("\tASSERT_VI_UNLOCKED("arg");"); # Add assertions for locking if (lockdata[name, arg, pos] == "L") printh("\tASSERT_VOP_LOCKED("arg", \""uname"\");"); |
