summaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
authorRyan Libby <rlibby@FreeBSD.org>2024-05-24 15:52:58 +0000
committerMark Johnston <markj@FreeBSD.org>2025-07-10 21:00:34 +0000
commit103763fab4f7349df53f7367816f1f4ca2881005 (patch)
tree5fb0d74e8cc601058a8d3a6b7bf531f76aff715a /sys/vm
parent791ac36668d518b3fff35c21176a31cab66c4e9b (diff)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_pageout.c16
-rw-r--r--sys/vm/vm_pagequeue.h6
2 files changed, 21 insertions, 1 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index c2c5281b87af..83f655eb852e 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1468,7 +1468,21 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
vm_pagequeue_lock(pq);
vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
- while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
+ while (page_shortage > 0) {
+ /*
+ * If we need to refill the scan batch queue, release any
+ * optimistically held object lock. This gives someone else a
+ * chance to grab the lock, and also avoids holding it while we
+ * do unrelated work.
+ */
+ if (object != NULL && vm_batchqueue_empty(&ss.bq)) {
+ VM_OBJECT_WUNLOCK(object);
+ object = NULL;
+ }
+
+ m = vm_pageout_next(&ss, true);
+ if (m == NULL)
+ break;
KASSERT((m->flags & PG_MARKER) == 0,
("marker page %p was dequeued", m));
diff --git a/sys/vm/vm_pagequeue.h b/sys/vm/vm_pagequeue.h
index 70122fef9fff..43cb67a252b5 100644
--- a/sys/vm/vm_pagequeue.h
+++ b/sys/vm/vm_pagequeue.h
@@ -357,6 +357,12 @@ vm_batchqueue_init(struct vm_batchqueue *bq)
bq->bq_cnt = 0;
}
+static inline bool
+vm_batchqueue_empty(const struct vm_batchqueue *bq)
+{
+ return (bq->bq_cnt == 0);
+}
+
static inline int
vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
{