summaryrefslogtreecommitdiff
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c763
1 files changed, 358 insertions, 405 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 229a4090922c..c6817768c2bb 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.13 1994/02/10 08:08:37 davidg Exp $
+ * $Id: vm_pageout.c,v 1.24 1994/06/17 13:29:15 davidg Exp $
*/
/*
@@ -85,22 +85,35 @@
extern vm_map_t kmem_map;
int vm_pages_needed; /* Event on which pageout daemon sleeps */
+int vm_pagescanner; /* Event on which pagescanner sleeps */
int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */
int vm_pageout_pages_needed = 0; /* flag saying that the pageout daemon needs pages */
int vm_page_pagesfreed;
+extern int vm_page_count;
extern int npendingio;
extern int hz;
int vm_pageout_proc_limit;
extern int nswiodone;
+extern int swap_pager_full;
+extern int swap_pager_ready();
#define MAXREF 32767
-#define DEACT_MAX (DEACT_START * 4)
-#define MINSCAN 512 /* minimum number of pages to scan in active queue */
- /* set the "clock" hands to be (MINSCAN * 4096) Bytes */
-static int minscan;
-void vm_pageout_deact_bump(vm_page_t m) ;
+
+#define MAXSCAN 512 /* maximum number of pages to scan in active queue */
+ /* set the "clock" hands to be (MAXSCAN * 4096) Bytes */
+#define ACT_DECLINE 1
+#define ACT_ADVANCE 3
+#define ACT_MAX 300
+
+#define LOWATER ((2048*1024)/NBPG)
+
+#define VM_PAGEOUT_PAGE_COUNT 8
+int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
+static vm_offset_t vm_space_needed;
+int vm_pageout_req_do_stats;
+int vm_pageout_do_stats;
/*
@@ -108,9 +121,9 @@ void vm_pageout_deact_bump(vm_page_t m) ;
* cleans a vm_page
*/
int
-vm_pageout_clean(m, wait)
+vm_pageout_clean(m, sync)
register vm_page_t m;
- int wait;
+ int sync;
{
/*
* Clean the page and remove it from the
@@ -130,7 +143,12 @@ vm_pageout_clean(m, wait)
register vm_object_t object;
register vm_pager_t pager;
- int pageout_status;
+ int pageout_status[VM_PAGEOUT_PAGE_COUNT];
+ vm_page_t ms[VM_PAGEOUT_PAGE_COUNT];
+ int pageout_count;
+ int anyok=0;
+ int i;
+ vm_offset_t offset = m->offset;
object = m->object;
if (!object) {
@@ -153,37 +171,60 @@ vm_pageout_clean(m, wait)
vm_page_free_count < vm_pageout_free_min)
return 0;
-collapseagain:
if (!object->pager &&
object->shadow &&
object->shadow->paging_in_progress)
return 0;
- if (object->shadow) {
- vm_offset_t offset = m->offset;
- vm_object_collapse(object);
- if (!vm_page_lookup(object, offset))
+ if( !sync) {
+ if (object->shadow) {
+ vm_object_collapse(object);
+ if (!vm_page_lookup(object, offset))
+ return 0;
+ }
+
+ if ((m->flags & PG_BUSY) || (m->hold_count != 0)) {
return 0;
+ }
}
-waitagain:
- if (!wait && (m->flags & PG_BUSY)) {
- return 0;
- } else if (m->flags & PG_BUSY) {
- int s = splhigh();
- m->flags |= PG_WANTED;
- tsleep((caddr_t)m, PVM, "clnslp", 0);
- splx(s);
- goto waitagain;
- }
+ pageout_count = 1;
+ ms[0] = m;
- m->flags |= PG_BUSY;
+ if( pager = object->pager) {
+ for(i=1;i<vm_pageout_page_count;i++) {
+ if( ms[i] = vm_page_lookup( object, offset+i*NBPG)) {
+ if( ((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE)
+ && (ms[i]->wire_count == 0)
+ && (ms[i]->hold_count == 0))
+ pageout_count++;
+ else
+ break;
+ } else
+ break;
+ }
+ for(i=0;i<pageout_count;i++) {
+ ms[i]->flags |= PG_BUSY;
+ pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ);
+ }
+ object->paging_in_progress += pageout_count;
+ vm_stat.pageouts += pageout_count;
+ } else {
+
+ m->flags |= PG_BUSY;
- pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ);
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ);
- vm_stat.pageouts++;
+ vm_stat.pageouts++;
- object->paging_in_progress++;
+ object->paging_in_progress++;
+
+ pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
+ object->size, VM_PROT_ALL, 0);
+ if (pager != NULL) {
+ vm_object_setpager(object, pager, 0, FALSE);
+ }
+ }
/*
* If there is no pager for the page,
@@ -194,160 +235,83 @@ waitagain:
* later.
*/
- if ((pager = object->pager) == NULL) {
- pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
- object->size, VM_PROT_ALL, 0);
- if (pager != NULL) {
- vm_object_setpager(object, pager, 0, FALSE);
- }
- }
if ((pager && pager->pg_type == PG_SWAP) ||
vm_page_free_count >= vm_pageout_free_min) {
- pageout_status = pager ?
- vm_pager_put(pager, m, (((object == kernel_object) || wait) ? TRUE: FALSE)) :
- VM_PAGER_FAIL;
- } else
- pageout_status = VM_PAGER_FAIL;
-
- switch (pageout_status) {
- case VM_PAGER_OK:
- m->flags &= ~PG_LAUNDRY;
- break;
- case VM_PAGER_PEND:
- m->flags &= ~PG_LAUNDRY;
- break;
- case VM_PAGER_BAD:
- /*
- * Page outside of range of object.
- * Right now we essentially lose the
- * changes by pretending it worked.
- */
- m->flags &= ~PG_LAUNDRY;
- m->flags |= PG_CLEAN;
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
- break;
- case VM_PAGER_FAIL:
- /*
- * If page couldn't be paged out, then
- * reactivate the page so it doesn't
- * clog the inactive list. (We will
- * try paging out it again later).
- */
- if ((m->flags & PG_ACTIVE) == 0)
- vm_page_activate(m);
- break;
- case VM_PAGER_TRYAGAIN:
- break;
- }
-
-
- /*
- * If the operation is still going, leave
- * the page busy to block all other accesses.
- * Also, leave the paging in progress
- * indicator set so that we don't attempt an
- * object collapse.
- */
- if (pageout_status != VM_PAGER_PEND) {
- if ((m->flags & PG_ACTIVE) == 0 &&
- pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
- vm_page_activate(m);
+ if( pageout_count == 1) {
+ pageout_status[0] = pager ?
+ vm_pager_put(pager, m,
+ ((sync || (object == kernel_object)) ? TRUE: FALSE)) :
+ VM_PAGER_FAIL;
+ } else {
+ if( !pager) {
+ for(i=0;i<pageout_count;i++)
+ pageout_status[i] = VM_PAGER_FAIL;
+ } else {
+ vm_pager_putmulti(pager, ms, pageout_count,
+ ((sync || (object == kernel_object)) ? TRUE : FALSE),
+ pageout_status);
+ }
}
- PAGE_WAKEUP(m);
- if (--object->paging_in_progress == 0)
- wakeup((caddr_t) object);
+
+ } else {
+ for(i=0;i<pageout_count;i++)
+ pageout_status[i] = VM_PAGER_FAIL;
}
- return (pageout_status == VM_PAGER_PEND ||
- pageout_status == VM_PAGER_OK) ? 1 : 0;
-}
-int
-vm_fault_object_deactivate_pages(map, object, dummy)
- vm_map_t map;
- vm_object_t object;
- int dummy;
-{
- register vm_page_t p, next;
- int rcount;
- int s;
- int dcount;
- int count;
-
- dcount = 0;
- /*
- * deactivate the pages in the objects shadow
- */
+ for(i=0;i<pageout_count;i++) {
+ switch (pageout_status[i]) {
+ case VM_PAGER_OK:
+ ms[i]->flags &= ~PG_LAUNDRY;
+ ++anyok;
+ break;
+ case VM_PAGER_PEND:
+ ms[i]->flags &= ~PG_LAUNDRY;
+ ++anyok;
+ break;
+ case VM_PAGER_BAD:
+ /*
+ * Page outside of range of object.
+ * Right now we essentially lose the
+ * changes by pretending it worked.
+ */
+ ms[i]->flags &= ~PG_LAUNDRY;
+ ms[i]->flags |= PG_CLEAN;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i]));
+ break;
+ case VM_PAGER_FAIL:
+ /*
+ * If page couldn't be paged out, then
+ * reactivate the page so it doesn't
+ * clog the inactive list. (We will
+ * try paging out it again later).
+ */
+ if (ms[i]->flags & PG_INACTIVE)
+ vm_page_activate(ms[i]);
+ break;
+ case VM_PAGER_TRYAGAIN:
+ break;
+ }
- if (object->shadow)
- dcount += vm_fault_object_deactivate_pages(map, object->shadow, 0);
- /*
- * scan the objects memory queue and remove 20% of the active pages
- */
- rcount = object->resident_page_count;
- count = rcount;
- if (count == 0)
- return dcount;
-#define MINOBJWRITE 10
-#define OBJDIVISOR 5
- if (count > MINOBJWRITE) {
- count = MINOBJWRITE + ((count - MINOBJWRITE) / OBJDIVISOR);
- }
- p = (vm_page_t) queue_first(&object->memq);
- while ((rcount-- > 0) && !queue_end(&object->memq, (queue_entry_t) p) ) {
- next = (vm_page_t) queue_next(&p->listq);
- vm_page_lock_queues();
/*
- * if a page is active, not wired and is in the processes pmap,
- * then deactivate the page.
+ * If the operation is still going, leave
+ * the page busy to block all other accesses.
+ * Also, leave the paging in progress
+ * indicator set so that we don't attempt an
+ * object collapse.
*/
- if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE &&
- p->wire_count == 0 &&
- pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
- if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p))) {
- vm_page_deactivate(p);
- if ((p->flags & PG_CLEAN) == 0) {
- vm_pageout_clean(p, 0);
- }
- ++dcount;
- if (--count <= 0) {
- vm_page_unlock_queues();
- s = splbio();
- while (object->paging_in_progress) {
- tsleep((caddr_t) object,PVM,"vmfobw",0);
- }
- splx(s);
- return dcount;
- }
- } else {
- vm_pageout_deact_bump(p);
- pmap_clear_reference(VM_PAGE_TO_PHYS(p));
- queue_remove(&object->memq, p, vm_page_t, listq);
- queue_enter(&object->memq, p, vm_page_t, listq);
- queue_remove(&vm_page_queue_active, p, vm_page_t, pageq);
- queue_enter(&vm_page_queue_active, p, vm_page_t, pageq);
+ if (pageout_status[i] != VM_PAGER_PEND) {
+ PAGE_WAKEUP(ms[i]);
+ if (--object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i]));
+ if( ms[i]->flags & PG_INACTIVE)
+ vm_page_activate(ms[i]);
}
- /*
- * if a page is inactive and has been modified, clean it now
- */
- } else if ((p->flags & (PG_INACTIVE|PG_BUSY)) == PG_INACTIVE) {
- if ((p->flags & PG_CLEAN) &&
- pmap_is_modified(VM_PAGE_TO_PHYS(p)))
- p->flags &= ~PG_CLEAN;
-
- if ((p->flags & PG_CLEAN) == 0)
- vm_pageout_clean(p, 0);
}
-
- vm_page_unlock_queues();
- p = next;
- }
- s = splbio();
- while (object->paging_in_progress) {
- tsleep((caddr_t)object,PVM,"vmfobw",0);
}
- splx(s);
- return dcount;
+ return anyok;
}
/*
@@ -376,7 +340,11 @@ vm_pageout_object_deactivate_pages(map, object, count)
count = 1;
if (object->shadow) {
- dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count);
+ int scount = count;
+ if( object->shadow->ref_count > 1)
+ scount /= object->shadow->ref_count;
+ if( scount)
+ dcount += vm_pageout_object_deactivate_pages(map, object->shadow, scount);
}
if (object->paging_in_progress)
@@ -396,15 +364,28 @@ vm_pageout_object_deactivate_pages(map, object, count)
*/
if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE &&
p->wire_count == 0 &&
+ p->hold_count == 0 &&
pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p))) {
- if (object->ref_count <= 1)
+ p->act_count -= min(p->act_count, ACT_DECLINE);
+ /*
+ * if the page act_count is zero -- then we deactivate
+ */
+ if (!p->act_count) {
vm_page_deactivate(p);
- else
- vm_page_pageout_deactivate(p);
- if (((p->flags & PG_INACTIVE)) &&
- (p->flags & PG_CLEAN) == 0)
- vm_pageout_clean(p, 0);
+ pmap_page_protect(VM_PAGE_TO_PHYS(p),
+ VM_PROT_NONE);
+ /*
+ * else if on the next go-around we will deactivate the page
+ * we need to place the page on the end of the queue to age
+ * the other pages in memory.
+ */
+ } else {
+ queue_remove(&vm_page_queue_active, p, vm_page_t, pageq);
+ queue_enter(&vm_page_queue_active, p, vm_page_t, pageq);
+ queue_remove(&object->memq, p, vm_page_t, listq);
+ queue_enter(&object->memq, p, vm_page_t, listq);
+ }
/*
* see if we are done yet
*/
@@ -419,23 +400,18 @@ vm_pageout_object_deactivate_pages(map, object, count)
}
} else {
- vm_pageout_deact_bump(p);
+ /*
+ * Move the page to the bottom of the queue.
+ */
pmap_clear_reference(VM_PAGE_TO_PHYS(p));
+ if (p->act_count < ACT_MAX)
+ p->act_count += ACT_ADVANCE;
+
queue_remove(&object->memq, p, vm_page_t, listq);
queue_enter(&object->memq, p, vm_page_t, listq);
queue_remove(&vm_page_queue_active, p, vm_page_t, pageq);
queue_enter(&vm_page_queue_active, p, vm_page_t, pageq);
}
- /*
- * if a page is inactive and has been modified, clean it now
- */
- } else if ((p->flags & (PG_INACTIVE|PG_BUSY)) == PG_INACTIVE) {
- if ((p->flags & PG_CLEAN) &&
- pmap_is_modified(VM_PAGE_TO_PHYS(p)))
- p->flags &= ~PG_CLEAN;
-
- if ((p->flags & PG_CLEAN) == 0)
- vm_pageout_clean(p, 0);
}
vm_page_unlock_queues();
@@ -488,50 +464,28 @@ vm_pageout_map_deactivate_pages(map, entry, count, freeer)
return;
}
-void
-vm_fault_free_pages(p)
- struct proc *p;
-{
- int overage = 1;
- vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
- (vm_map_entry_t) 0, &overage, vm_fault_object_deactivate_pages);
-}
-
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
-void
+int
vm_pageout_scan()
{
vm_page_t m;
int page_shortage, maxscan, maxlaunder;
- int pages_freed, free, nproc, nbusy;
+ int pages_freed, free, nproc;
+ int desired_free;
vm_page_t next;
struct proc *p;
vm_object_t object;
int s;
+ int force_wakeup = 0;
+morefree:
/*
- * deactivate objects with ref_counts == 0
- */
- object = (vm_object_t) queue_first(&vm_object_list);
- while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
- if (object->ref_count == 0)
- vm_object_deactivate_pages(object);
- object = (vm_object_t) queue_next(&object->object_list);
- }
-
-rerun:
-#if 1
- /*
- * next scan the processes for exceeding their rlimits or if process
+ * scan the processes for exceeding their rlimits or if process
* is swapped out -- deactivate pages
*/
-rescanproc1a:
- for (p = allproc; p != NULL; p = p->p_nxt)
- p->p_flag &= ~SPAGEDAEMON;
-
rescanproc1:
for (p = allproc; p != NULL; p = p->p_nxt) {
vm_offset_t size;
@@ -572,22 +526,17 @@ rescanproc1:
overage = (size - limit) / NBPG;
vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
(vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
- p->p_flag |= SPAGEDAEMON;
- goto rescanproc1;
}
- p->p_flag |= SPAGEDAEMON;
+
}
-#if 0
if (((vm_page_free_count + vm_page_inactive_count) >=
(vm_page_inactive_target + vm_page_free_target)) &&
(vm_page_free_count >= vm_page_free_target))
- return;
-#endif
-
-#endif
+ return force_wakeup;
pages_freed = 0;
+ desired_free = vm_page_free_target;
/*
* Start scanning the inactive queue for pages we can free.
@@ -597,26 +546,37 @@ rescanproc1:
*/
maxlaunder = (vm_page_free_target - vm_page_free_count);
-rescan:
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
maxscan = vm_page_inactive_count;
+rescan1:
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
while (maxscan-- > 0) {
vm_page_t next;
-
if (queue_end(&vm_page_queue_inactive, (queue_entry_t) m)
- || (vm_page_free_count >= vm_page_free_target)) {
+ || (vm_page_free_count >= desired_free)) {
break;
}
next = (vm_page_t) queue_next(&m->pageq);
+ if( (m->flags & PG_INACTIVE) == 0) {
+ printf("vm_pageout_scan: page not inactive?");
+ continue;
+ }
+
+ /*
+ * activate held pages
+ */
+ if (m->hold_count != 0) {
+ vm_page_activate(m);
+ m = next;
+ continue;
+ }
+
/*
* dont mess with busy pages
*/
if (m->flags & PG_BUSY) {
- queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
- queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
m = next;
continue;
}
@@ -628,34 +588,25 @@ rescan:
* vm system.
*/
if (m->flags & PG_CLEAN) {
- if ((vm_page_free_count > vm_pageout_free_min)
+ if ((vm_page_free_count > vm_pageout_free_min) /* XXX */
&& pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
vm_page_activate(m);
- ++vm_stat.reactivations;
- m = next;
- continue;
- }
- else {
+ } else if (!m->act_count) {
pmap_page_protect(VM_PAGE_TO_PHYS(m),
VM_PROT_NONE);
vm_page_free(m);
++pages_freed;
- m = next;
- continue;
+ } else {
+ m->act_count -= min(m->act_count, ACT_DECLINE);
}
} else if ((m->flags & PG_LAUNDRY) && maxlaunder > 0) {
- /*
- * if a page has been used even if it is in the laundry,
- * activate it.
- */
-
+ int written;
if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
vm_page_activate(m);
- m->flags &= ~PG_LAUNDRY;
m = next;
continue;
}
-
/*
* If a page is dirty, then it is either
* being washed (but not yet cleaned)
@@ -664,17 +615,18 @@ rescan:
* cleaning operation.
*/
- if (vm_pageout_clean(m,0)) {
- --maxlaunder;
- /*
- * if the next page has been re-activated, start scanning again
- */
- if ((next->flags & PG_INACTIVE) == 0)
- goto rescan;
+ if (written = vm_pageout_clean(m,0)) {
+ maxlaunder -= written;
}
- } else if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ /*
+ * if the next page has been re-activated, start scanning again
+ */
+ if (!next || (next->flags & PG_INACTIVE) == 0)
+ goto rescan1;
+ } else if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
vm_page_activate(m);
- }
+ }
m = next;
}
@@ -682,42 +634,11 @@ rescan:
* now check malloc area or swap processes out if we are in low
* memory conditions
*/
- free = vm_page_free_count;
- if (free <= vm_page_free_min) {
- /*
- * Be sure the pmap system is updated so
- * we can scan the inactive queue.
- */
- pmap_update();
-
+ if (vm_page_free_count < vm_page_free_min) {
/*
* swap out inactive processes
*/
swapout_threads();
-
-#if 0
- /*
- * see if malloc has anything for us
- */
- if (free <= vm_page_free_reserved)
- malloc_gc();
-#endif
- }
-
-skipfree:
- /*
- * If we did not free any pages, but we need to do so, we grow the
- * inactive target. But as we successfully free pages, then we
- * shrink the inactive target.
- */
- if (pages_freed == 0 && vm_page_free_count < vm_page_free_min) {
- vm_page_inactive_target += (vm_page_free_min - vm_page_free_count);
- if (vm_page_inactive_target > vm_page_free_target*5)
- vm_page_inactive_target = vm_page_free_target*5;
- } else if (pages_freed > 0) {
- vm_page_inactive_target -= vm_page_free_min/2;
- if (vm_page_inactive_target < vm_page_free_target*2)
- vm_page_inactive_target = vm_page_free_target*2;
}
/*
@@ -726,35 +647,27 @@ skipfree:
* to inactive.
*/
-restart_inactivate_all:
-
- page_shortage = vm_page_inactive_target - vm_page_inactive_count;
- page_shortage -= vm_page_free_count;
+ page_shortage = vm_page_inactive_target -
+ (vm_page_free_count + vm_page_inactive_count);
if (page_shortage <= 0) {
- if (pages_freed == 0 &&
- ((vm_page_free_count + vm_page_inactive_count) <
+ if (pages_freed == 0) {
+ if( vm_page_free_count < vm_page_free_min) {
+ page_shortage = vm_page_free_min - vm_page_free_count;
+ } else if(((vm_page_free_count + vm_page_inactive_count) <
(vm_page_free_min + vm_page_inactive_target))) {
- page_shortage = 1;
- } else {
- page_shortage = 0;
+ page_shortage = 1;
+ } else {
+ page_shortage = 0;
+ }
}
+
}
- maxscan = vm_page_active_count;
-
- /*
- * deactivate pages that are active, but have not been used
- * for a while.
- */
-restart_inactivate:
m = (vm_page_t) queue_first(&vm_page_queue_active);
- while (maxscan-- > 0) {
+ maxscan = vm_page_active_count;
+ while (maxscan-- && (page_shortage > 0)) {
- if (page_shortage <= 0 &&
- maxscan < (vm_page_active_count - minscan) )
- break;
-
if (queue_end(&vm_page_queue_active, (queue_entry_t) m)) {
break;
}
@@ -762,109 +675,156 @@ restart_inactivate:
next = (vm_page_t) queue_next(&m->pageq);
/*
- * dont mess with pages that are busy
+ * Don't deactivate pages that are busy.
*/
- if (m->flags & PG_BUSY) {
+ if ((m->flags & PG_BUSY) || (m->hold_count != 0)) {
m = next;
continue;
}
- /*
- * Move some more pages from active to inactive.
- */
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ if (m->act_count < ACT_MAX)
+ m->act_count += ACT_ADVANCE;
+ queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_remove(&m->object->memq, m, vm_page_t, listq);
+ queue_enter(&m->object->memq, m, vm_page_t, listq);
+ } else {
+ m->act_count -= min(m->act_count, ACT_DECLINE);
- /*
- * see if there are any pages that are able to be deactivated
- */
- /*
- * the referenced bit is the one that say that the page
- * has been used.
- */
- if (!pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
/*
- * if the page has not been referenced, call the
- * vm_page_pageout_deactivate routine. It might
- * not deactivate the page every time. There is
- * a policy associated with it.
+ * if the page act_count is zero -- then we deactivate
*/
- if (page_shortage > 0) {
- if (vm_page_pageout_deactivate(m)) {
- /*
- * if the page was really deactivated, then
- * decrement the page_shortage
- */
- if ((m->flags & PG_ACTIVE) == 0) {
- --page_shortage;
- }
- }
- }
- } else {
+ if (!m->act_count) {
+ vm_page_deactivate(m);
+ --page_shortage;
/*
- * if the page was recently referenced, set our
- * deactivate count and clear reference for a future
- * check for deactivation.
+ * else if on the next go-around we will deactivate the page
+ * we need to place the page on the end of the queue to age
+ * the other pages in memory.
*/
- vm_pageout_deact_bump(m);
- if (page_shortage > 0 || m->deact >= (DEACT_MAX/2))
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
- queue_remove(&m->object->memq, m, vm_page_t, listq);
- queue_enter(&m->object->memq, m, vm_page_t, listq);
- queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
- queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ } else {
+ queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_remove(&m->object->memq, m, vm_page_t, listq);
+ queue_enter(&m->object->memq, m, vm_page_t, listq);
+ }
}
+
m = next;
}
- vm_page_pagesfreed += pages_freed;
-}
+ /*
+ * if we have not freed any pages and we are desparate for memory
+ * then we keep trying until we get some (any) memory.
+ */
-/*
- * this code maintains a dynamic reference count per page
- */
-void
-vm_pageout_deact_bump(vm_page_t m) {
- if( m->deact >= DEACT_START) {
- m->deact += 1;
- if( m->deact > DEACT_MAX)
- m->deact = DEACT_MAX;
- } else {
- m->deact += DEACT_START;
+ if( !force_wakeup && (swap_pager_full || !force_wakeup ||
+ (pages_freed == 0 && (vm_page_free_count < vm_page_free_min)))){
+ vm_pager_sync();
+ force_wakeup = 1;
+ goto morefree;
}
+ vm_page_pagesfreed += pages_freed;
+ return force_wakeup;
}
-/*
- * optionally do a deactivate if the deactivate has been done
- * enough to justify it.
- */
-int
-vm_page_pageout_deactivate(m)
- vm_page_t m;
+void
+vm_pagescan()
{
+ int maxscan, pages_scanned, pages_referenced, nextscan, scantick = hz/20;
+ int m_ref, next_ref;
+ vm_page_t m, next;
+
+ (void) splnone();
+
+ nextscan = scantick;
- switch (m->deact) {
-case DEACT_FREE:
- vm_page_deactivate(m);
- return 1;
-case DEACT_CLEAN:
- break;
-case DEACT_DELAY:
- vm_page_makefault(m);
-case DEACT_START:
- break;
+scanloop:
+
+ pages_scanned = 0;
+ pages_referenced = 0;
+ maxscan = min(vm_page_active_count, MAXSCAN);
+
+ /*
+ * Gather statistics on page usage.
+ */
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ while (maxscan-- > 0) {
+
+ if (queue_end(&vm_page_queue_active, (queue_entry_t) m)) {
+ break;
+ }
+
+ ++pages_scanned;
+
+ next = (vm_page_t) queue_next(&m->pageq);
+
+ /*
+ * Dont mess with pages that are busy.
+ */
+ if ((m->flags & PG_BUSY) || (m->hold_count != 0)) {
+ m = next;
+ continue;
+ }
+
+ /*
+ * Advance pages that have been referenced, decline pages that
+ * have not.
+ */
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ pages_referenced++;
+ if (m->act_count < ACT_MAX)
+ m->act_count += ACT_ADVANCE;
+ queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_remove(&m->object->memq, m, vm_page_t, listq);
+ queue_enter(&m->object->memq, m, vm_page_t, listq);
+ } else {
+ m->act_count -= min(m->act_count, ACT_DECLINE);
+ /*
+ * if the page act_count is zero, and we are low on mem -- then we deactivate
+ */
+ if (!m->act_count &&
+ (vm_page_free_count+vm_page_inactive_count < vm_page_free_target+vm_page_inactive_target )) {
+ vm_page_deactivate(m);
+ /*
+ * else if on the next go-around we will deactivate the page
+ * we need to place the page on the end of the queue to age
+ * the other pages in memory.
+ */
+ } else {
+ queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ queue_remove(&m->object->memq, m, vm_page_t, listq);
+ queue_enter(&m->object->memq, m, vm_page_t, listq);
+ }
+ }
+ m = next;
}
- --m->deact;
- return 0;
+
+ if (pages_referenced) {
+ nextscan = (pages_scanned / pages_referenced) * scantick;
+ nextscan = max(nextscan, scantick);
+ nextscan = min(nextscan, hz);
+ } else
+ nextscan = hz;
+ tsleep((caddr_t) &vm_pagescanner, PVM, "scanw", nextscan);
+
+ goto scanloop;
}
/*
* vm_pageout is the high level pageout daemon.
*/
-
void
vm_pageout()
{
extern npendingio, swiopend;
extern int vm_page_count;
+ static nowakeup;
(void) spl0();
/*
@@ -872,49 +832,42 @@ vm_pageout()
*/
vmretry:
- vm_page_free_min = npendingio/3;
-#ifdef VSMALL
- vm_page_free_min = 8;
-#endif
+ vm_page_free_min = 12;
vm_page_free_reserved = 8;
if (vm_page_free_min < 8)
vm_page_free_min = 8;
if (vm_page_free_min > 32)
vm_page_free_min = 32;
- vm_pageout_free_min = 3;
+ vm_pageout_free_min = 4;
vm_page_free_target = 2*vm_page_free_min + vm_page_free_reserved;
- vm_page_inactive_target = 3*vm_page_free_min + vm_page_free_reserved;
+ vm_page_inactive_target = vm_page_free_count / 12;
vm_page_free_min += vm_page_free_reserved;
- minscan = MINSCAN;
- if (minscan > vm_page_count/3)
- minscan = vm_page_count/3;
+
+ (void) swap_pager_alloc(0, 0, 0, 0);
/*
* The pageout daemon is never done, so loop
* forever.
*/
-
-
while (TRUE) {
+ int force_wakeup;
- splhigh();
- if (vm_page_free_count > vm_page_free_min) {
- wakeup((caddr_t) &vm_page_free_count);
- tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0);
- } else {
- if (nswiodone) {
- spl0();
- goto dosync;
- }
- tsleep((caddr_t) &vm_pages_needed, PVM, "pslp1", 5);
- }
- spl0();
-
+ tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0);
+
vm_pager_sync();
- vm_pageout_scan();
- dosync:
+ /*
+ * The force wakeup hack added to eliminate delays and potiential
+ * deadlock. It was possible for the page daemon to indefintely
+ * postpone waking up a process that it might be waiting for memory
+ * on. The putmulti stuff seems to have aggravated the situation.
+ */
+ force_wakeup = vm_pageout_scan();
vm_pager_sync();
+ if( force_wakeup)
+ wakeup( (caddr_t) &vm_page_free_count);
+ vm_pageout_do_stats = 0;
cnt.v_scan++;
wakeup((caddr_t) kmem_map);
}
}
+