summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-10-24 13:30:37 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-10-24 13:30:37 +0000
commit7cc171861341b4f3414f83db7e6772dae824571c (patch)
tree1a407214d3838f8baa026310defb2209eb1b72d6
parentab79c9061c9666d6b6ce5b1b7d271a88b856fe27 (diff)
Notes
-rw-r--r--sys/kern/vfs_subr.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index bb338933b371..944b2b806695 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -109,6 +109,7 @@ static void syncer_shutdown(void *arg, int howto);
static int vtryrecycle(struct vnode *vp);
static void v_init_counters(struct vnode *);
static void vgonel(struct vnode *);
+static bool vhold_recycle(struct vnode *);
static void vfs_knllock(void *arg);
static void vfs_knlunlock(void *arg);
static void vfs_knl_assert_locked(void *arg);
@@ -1126,7 +1127,8 @@ restart:
goto next_iter;
}
- vhold(vp);
+ if (!vhold_recycle(vp))
+ goto next_iter;
TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
mtx_unlock(&vnode_list_mtx);
@@ -1231,7 +1233,8 @@ restart:
if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
continue;
}
- vhold(vp);
+ if (!vhold_recycle(vp))
+ continue;
count--;
mtx_unlock(&vnode_list_mtx);
vtryrecycle(vp);
@@ -3248,13 +3251,11 @@ vholdnz(struct vnode *vp)
* However, while this is more performant, it hinders debugging by eliminating
* the previously mentioned invariant.
*/
-bool
-vhold_smr(struct vnode *vp)
+static bool __always_inline
+_vhold_cond(struct vnode *vp)
{
int count;
- VFS_SMR_ASSERT_ENTERED();
-
count = atomic_load_int(&vp->v_holdcnt);
for (;;) {
if (count & VHOLD_NO_SMR) {
@@ -3272,6 +3273,28 @@ vhold_smr(struct vnode *vp)
}
}
+bool
+vhold_smr(struct vnode *vp)
+{
+
+ VFS_SMR_ASSERT_ENTERED();
+ return (_vhold_cond(vp));
+}
+
+/*
+ * Special case for vnode recycling.
+ *
+ * Vnodes are present on the global list until UMA takes them out.
+ * Attempts to recycle only need the relevant lock and have no use for SMR.
+ */
+static bool
+vhold_recycle(struct vnode *vp)
+{
+
+ mtx_assert(&vnode_list_mtx, MA_OWNED);
+ return (_vhold_cond(vp));
+}
+
static void __noinline
vdbatch_process(struct vdbatch *vd)
{