summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2022-03-07 10:33:59 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2022-03-25 14:03:46 +0000
commitb2516d2f7dcf15ec090373257417a77bf77c7009 (patch)
treed3375238cf8adf8fc682ec2565ed106512df69dc /sys/kern
parentbbf3b7bdf87669026ade4ad0fb6fefde331e68c9 (diff)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_subr.c55
1 files changed, 45 insertions, 10 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index a7a432d31a55..5c5faf455f2b 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -113,6 +113,8 @@ static void vn_seqc_init(struct vnode *);
static void vn_seqc_write_end_free(struct vnode *vp);
static void vgonel(struct vnode *);
static bool vhold_recycle_free(struct vnode *);
+static void vdropl_recycle(struct vnode *vp);
+static void vdrop_recycle(struct vnode *vp);
static void vfs_knllock(void *arg);
static void vfs_knlunlock(void *arg);
static void vfs_knl_assert_lock(void *arg, int what);
@@ -1203,11 +1205,11 @@ restart:
mtx_unlock(&vnode_list_mtx);
if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
- vdrop(vp);
+ vdrop_recycle(vp);
goto next_iter_unlocked;
}
if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) {
- vdrop(vp);
+ vdrop_recycle(vp);
vn_finished_write(mp);
goto next_iter_unlocked;
}
@@ -1218,14 +1220,14 @@ restart:
(vp->v_object != NULL && vp->v_object->handle == vp &&
vp->v_object->resident_page_count > trigger)) {
VOP_UNLOCK(vp);
- vdropl(vp);
+ vdropl_recycle(vp);
vn_finished_write(mp);
goto next_iter_unlocked;
}
counter_u64_add(recycles_count, 1);
vgonel(vp);
VOP_UNLOCK(vp);
- vdropl(vp);
+ vdropl_recycle(vp);
vn_finished_write(mp);
done++;
next_iter_unlocked:
@@ -1636,7 +1638,7 @@ vtryrecycle(struct vnode *vp)
CTR2(KTR_VFS,
"%s: impossible to recycle, vp %p lock is already held",
__func__, vp);
- vdrop(vp);
+ vdrop_recycle(vp);
return (EWOULDBLOCK);
}
/*
@@ -1647,7 +1649,7 @@ vtryrecycle(struct vnode *vp)
CTR2(KTR_VFS,
"%s: impossible to recycle, cannot start the write for %p",
__func__, vp);
- vdrop(vp);
+ vdrop_recycle(vp);
return (EBUSY);
}
/*
@@ -1659,7 +1661,7 @@ vtryrecycle(struct vnode *vp)
VI_LOCK(vp);
if (vp->v_usecount) {
VOP_UNLOCK(vp);
- vdropl(vp);
+ vdropl_recycle(vp);
vn_finished_write(vnmp);
CTR2(KTR_VFS,
"%s: impossible to recycle, %p is already referenced",
@@ -1671,7 +1673,7 @@ vtryrecycle(struct vnode *vp)
vgonel(vp);
}
VOP_UNLOCK(vp);
- vdropl(vp);
+ vdropl_recycle(vp);
vn_finished_write(vnmp);
return (0);
}
@@ -3624,8 +3626,8 @@ vdrop(struct vnode *vp)
vdropl(vp);
}
-void
-vdropl(struct vnode *vp)
+static void __always_inline
+vdropl_impl(struct vnode *vp, bool enqueue)
{
ASSERT_VI_LOCKED(vp, __func__);
@@ -3653,6 +3655,39 @@ vdropl(struct vnode *vp)
vdbatch_enqueue(vp);
}
+void
+vdropl(struct vnode *vp)
+{
+
+ vdropl_impl(vp, true);
+}
+
+/*
+ * vdrop a vnode when recycling
+ *
+ * This is a special case routine only to be used when recycling, differs from
+ * regular vdrop by not requeieing the vnode on LRU.
+ *
+ * Consider a case where vtryrecycle continuously fails with all vnodes (due to
+ * e.g., frozen writes on the filesystem), filling the batch and causing it to
+ * be requeued. Then vnlru will end up revisiting the same vnodes. This is a
+ * loop which can last for as long as writes are frozen.
+ */
+static void
+vdropl_recycle(struct vnode *vp)
+{
+
+ vdropl_impl(vp, false);
+}
+
+static void
+vdrop_recycle(struct vnode *vp)
+{
+
+ VI_LOCK(vp);
+ vdropl_recycle(vp);
+}
+
/*
* Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
* flags. DOINGINACT prevents us from recursing in calls to vinactive.