summaryrefslogtreecommitdiff
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c203
1 files changed, 70 insertions, 133 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index c953559da668..46f192389e6f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.115 1999/01/08 17:31:27 eivind Exp $
+ * $Id: vm_page.c,v 1.106 1998/08/24 08:39:38 dfr Exp $
*/
/*
@@ -352,7 +352,6 @@ vm_page_startup(starta, enda, vaddr)
* Distributes the object/offset key pair among hash buckets.
*
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
- * This routine may not block.
*/
static __inline int
vm_page_hash(object, pindex)
@@ -365,15 +364,10 @@ vm_page_hash(object, pindex)
/*
* vm_page_insert: [ internal use only ]
*
- * Inserts the given mem entry into the object and object list.
- *
- * The pagetables are not updated but will presumably fault the page
- * in if necessary, or if a kernel page the caller will at some point
- * enter the page into the kernel's pmap. We are not allowed to block
- * here so we *can't* do this anyway.
+ * Inserts the given mem entry into the object/object-page
+ * table and object list.
*
* The object and page must be locked, and must be splhigh.
- * This routine may not block.
*/
void
@@ -384,8 +378,10 @@ vm_page_insert(m, object, pindex)
{
register struct pglist *bucket;
- if (m->object != NULL)
+#if !defined(MAX_PERF)
+ if (m->flags & PG_TABLED)
panic("vm_page_insert: already inserted");
+#endif
/*
* Record the object/offset pair in this page
@@ -407,6 +403,7 @@ vm_page_insert(m, object, pindex)
*/
TAILQ_INSERT_TAIL(&object->memq, m, listq);
+ vm_page_flag_set(m, PG_TABLED);
m->object->page_hint = m;
m->object->generation++;
@@ -431,9 +428,6 @@ vm_page_insert(m, object, pindex)
* table and the object page list.
*
* The object and page must be locked, and at splhigh.
- * This routine may not block.
- *
- * I do not think the underlying pmap entry (if any) is removed here.
*/
void
@@ -443,7 +437,7 @@ vm_page_remove(m)
register struct pglist *bucket;
vm_object_t object;
- if (m->object == NULL)
+ if (!(m->flags & PG_TABLED))
return;
#if !defined(MAX_PERF)
@@ -488,8 +482,9 @@ vm_page_remove(m)
object->resident_page_count--;
object->generation++;
-
m->object = NULL;
+
+ vm_page_flag_clear(m, PG_TABLED);
}
/*
@@ -499,7 +494,6 @@ vm_page_remove(m)
* pair specified; if none is found, NULL is returned.
*
* The object must be locked. No side effects.
- * This routine may not block.
*/
vm_page_t
@@ -510,6 +504,7 @@ vm_page_lookup(object, pindex)
register vm_page_t m;
register struct pglist *bucket;
int generation;
+ int s;
/*
* Search the hash table for this object/offset pair
@@ -542,11 +537,7 @@ retry:
* current object to the specified target object/offset.
*
* The object must be locked.
- * This routine may not block.
- *
- * Note: this routine will raise itself to splvm(), the caller need not.
*/
-
void
vm_page_rename(m, new_object, new_pindex)
register vm_page_t m;
@@ -562,14 +553,8 @@ vm_page_rename(m, new_object, new_pindex)
}
/*
- * vm_page_unqueue_nowakeup:
- *
- * vm_page_unqueue() without any wakeup
- *
- * This routine must be called at splhigh().
- * This routine may not block.
+ * vm_page_unqueue without any wakeup
*/
-
void
vm_page_unqueue_nowakeup(m)
vm_page_t m;
@@ -590,14 +575,8 @@ vm_page_unqueue_nowakeup(m)
}
/*
- * vm_page_unqueue:
- *
- * Remove a page from its queue.
- *
- * This routine must be called at splhigh().
- * This routine may not block.
+ * vm_page_unqueue must be called at splhigh();
*/
-
void
vm_page_unqueue(m)
vm_page_t m;
@@ -621,12 +600,7 @@ vm_page_unqueue(m)
}
/*
- * vm_page_list_find:
- *
- * Find a page on the specified queue with color optimization.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a page on the specified queue with color optimization.
*/
vm_page_t
vm_page_list_find(basequeue, index)
@@ -680,12 +654,7 @@ vm_page_list_find(basequeue, index)
}
/*
- * vm_page_select:
- *
- * Find a page on the specified queue with color optimization.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a page on the specified queue with color optimization.
*/
vm_page_t
vm_page_select(object, pindex, basequeue)
@@ -706,14 +675,9 @@ vm_page_select(object, pindex, basequeue)
}
/*
- * vm_page_select_cache:
- *
- * Find a page on the cache queue with color optimization. As pages
- * might be found, but not applicable, they are deactivated. This
- * keeps us from using potentially busy cached pages.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a page on the cache queue with color optimization. As pages
+ * might be found, but not applicable, they are deactivated. This
+ * keeps us from using potentially busy cached pages.
*/
vm_page_t
vm_page_select_cache(object, pindex)
@@ -741,14 +705,8 @@ vm_page_select_cache(object, pindex)
}
/*
- * vm_page_select_free:
- *
- * Find a free or zero page, with specified preference.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a free or zero page, with specified preference.
*/
-
static vm_page_t
vm_page_select_free(object, pindex, prefqueue)
vm_object_t object;
@@ -852,11 +810,6 @@ vm_page_select_free(object, pindex, prefqueue)
* VM_ALLOC_ZERO zero page
*
* Object must be locked.
- * This routine may not block.
- *
- * Additional special handling is required when called from an
- * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with
- * the page cache in this case.
*/
vm_page_t
vm_page_alloc(object, pindex, page_req)
@@ -870,8 +823,11 @@ vm_page_alloc(object, pindex, page_req)
int queue, qtype;
int s;
- KASSERT(!vm_page_lookup(object, pindex),
- ("vm_page_alloc: page already allocated"));
+#ifdef DIAGNOSTIC
+ m = vm_page_lookup(object, pindex);
+ if (m)
+ panic("vm_page_alloc: page already allocated");
+#endif
if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
page_req = VM_ALLOC_SYSTEM;
@@ -884,7 +840,10 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_NORMAL:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = vm_page_select_free(object, pindex, PQ_FREE);
- KASSERT(m != NULL, ("vm_page_alloc(NORMAL): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(NORMAL): missing page on free queue\n");
+#endif
} else {
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
@@ -903,7 +862,10 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_ZERO:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = vm_page_select_free(object, pindex, PQ_ZERO);
- KASSERT(m != NULL, ("vm_page_alloc(ZERO): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(ZERO): missing page on free queue\n");
+#endif
} else {
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
@@ -924,7 +886,10 @@ vm_page_alloc(object, pindex, page_req)
((cnt.v_cache_count == 0) &&
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
m = vm_page_select_free(object, pindex, PQ_FREE);
- KASSERT(m != NULL, ("vm_page_alloc(SYSTEM): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
+#endif
} else {
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
@@ -943,7 +908,10 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_INTERRUPT:
if (cnt.v_free_count > 0) {
m = vm_page_select_free(object, pindex, PQ_FREE);
- KASSERT(m != NULL, ("vm_page_alloc(INTERRUPT): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(INTERRUPT): missing page on free queue\n");
+#endif
} else {
splx(s);
vm_pageout_deficit++;
@@ -986,13 +954,7 @@ vm_page_alloc(object, pindex, page_req)
m->dirty = 0;
m->queue = PQ_NONE;
- /*
- * vm_page_insert() is safe prior to the splx(). Note also that
- * inserting a page here does not insert it into the pmap (which
- * could cause us to block allocating memory). We cannot block
- * anywhere.
- */
-
+ /* XXX before splx until vm_page_insert is safe */
vm_page_insert(m, object, pindex);
/*
@@ -1022,12 +984,6 @@ vm_page_alloc(object, pindex, page_req)
return (m);
}
-/*
- * vm_wait: (also see VM_WAIT macro)
- *
- * Block until free pages are available for allocation
- */
-
void
vm_wait()
{
@@ -1047,14 +1003,9 @@ vm_wait()
splx(s);
}
-/*
- * vm_page_sleep:
- *
- * Block until page is no longer busy.
- */
-
int
vm_page_sleep(vm_page_t m, char *msg, char *busy) {
+ vm_object_t object = m->object;
int slept = 0;
if ((busy && *busy) || (m->flags & PG_BUSY)) {
int s;
@@ -1075,13 +1026,14 @@ vm_page_sleep(vm_page_t m, char *msg, char *busy) {
* Put the specified page on the active list (if appropriate).
*
* The page queues must be locked.
- * This routine may not block.
*/
void
vm_page_activate(m)
register vm_page_t m;
{
int s;
+ vm_page_t np;
+ vm_object_t object;
s = splvm();
if (m->queue != PQ_ACTIVE) {
@@ -1107,9 +1059,7 @@ vm_page_activate(m)
}
/*
- * helper routine for vm_page_free and vm_page_free_zero.
- *
- * This routine may not block.
+ * helper routine for vm_page_free and vm_page_free_zero
*/
static int
vm_page_freechk_and_unqueue(m)
@@ -1149,7 +1099,6 @@ vm_page_freechk_and_unqueue(m)
m->wire_count, m->pindex);
}
#endif
- printf("vm_page_free: freeing wired page\n");
m->wire_count = 0;
if (m->object)
m->object->wire_count--;
@@ -1176,9 +1125,7 @@ vm_page_freechk_and_unqueue(m)
}
/*
- * helper routine for vm_page_free and vm_page_free_zero.
- *
- * This routine may not block.
+ * helper routine for vm_page_free and vm_page_free_zero
*/
static __inline void
vm_page_free_wakeup()
@@ -1211,7 +1158,6 @@ vm_page_free_wakeup()
* disassociating it with any VM object.
*
* Object and page must be locked prior to entry.
- * This routine may not block.
*/
void
vm_page_free(m)
@@ -1284,7 +1230,6 @@ vm_page_free_zero(m)
* as necessary.
*
* The page queues must be locked.
- * This routine may not block.
*/
void
vm_page_wire(m)
@@ -1292,16 +1237,16 @@ vm_page_wire(m)
{
int s;
- s = splvm();
if (m->wire_count == 0) {
+ s = splvm();
vm_page_unqueue(m);
+ splx(s);
cnt.v_wire_count++;
if (m->object)
m->object->wire_count++;
}
- m->wire_count++;
- splx(s);
(*vm_page_queues[PQ_NONE].lcnt)++;
+ m->wire_count++;
vm_page_flag_set(m, PG_MAPPED);
}
@@ -1312,12 +1257,10 @@ vm_page_wire(m)
* enabling it to be paged again.
*
* The page queues must be locked.
- * This routine may not block.
*/
void
-vm_page_unwire(m, activate)
+vm_page_unwire(m)
register vm_page_t m;
- int activate;
{
int s;
@@ -1329,17 +1272,10 @@ vm_page_unwire(m, activate)
if (m->object)
m->object->wire_count--;
cnt.v_wire_count--;
- if (activate) {
- TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
- m->queue = PQ_ACTIVE;
- (*vm_page_queues[PQ_ACTIVE].lcnt)++;
- cnt.v_active_count++;
- } else {
- TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
- m->queue = PQ_INACTIVE;
- (*vm_page_queues[PQ_INACTIVE].lcnt)++;
- cnt.v_inactive_count++;
- }
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ m->queue = PQ_ACTIVE;
+ (*vm_page_queues[PQ_ACTIVE].lcnt)++;
+ cnt.v_active_count++;
}
} else {
#if !defined(MAX_PERF)
@@ -1351,9 +1287,13 @@ vm_page_unwire(m, activate)
/*
- * Move the specified page to the inactive queue.
+ * vm_page_deactivate:
+ *
+ * Returns the given page to the inactive list,
+ * indicating that no physical maps have access
+ * to this page. [Used by the physical mapping system.]
*
- * This routine may not block.
+ * The page queues must be locked.
*/
void
vm_page_deactivate(m)
@@ -1362,7 +1302,11 @@ vm_page_deactivate(m)
int s;
/*
- * Ignore if already inactive.
+ * Only move active pages -- ignore locked or already inactive ones.
+ *
+ * XXX: sometimes we get pages which aren't wired down or on any queue -
+ * we need to put them on the inactive queue also, otherwise we lose
+ * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
*/
if (m->queue == PQ_INACTIVE)
return;
@@ -1383,8 +1327,7 @@ vm_page_deactivate(m)
/*
* vm_page_cache
*
- * Put the specified page onto the page cache queue (if appropriate).
- * This routine may not block.
+ * Put the specified page onto the page cache queue (if appropriate).
*/
void
vm_page_cache(m)
@@ -1422,8 +1365,6 @@ vm_page_cache(m)
* Grab a page, waiting until we are waken up due to the page
* changing state. We keep on waiting, if the page continues
* to be in the object. If the page doesn't exist, allocate it.
- *
- * This routine may block.
*/
vm_page_t
vm_page_grab(object, pindex, allocflags)
@@ -1471,7 +1412,7 @@ retrylookup:
/*
* mapping function for valid bits or for dirty bits in
- * a page. May not block.
+ * a page
*/
__inline int
vm_page_bits(int base, int size)
@@ -1493,7 +1434,7 @@ vm_page_bits(int base, int size)
}
/*
- * set a page valid and clean. May not block.
+ * set a page valid and clean
*/
void
vm_page_set_validclean(m, base, size)
@@ -1509,7 +1450,7 @@ vm_page_set_validclean(m, base, size)
}
/*
- * set a page (partially) invalid. May not block.
+ * set a page (partially) invalid
*/
void
vm_page_set_invalid(m, base, size)
@@ -1526,7 +1467,7 @@ vm_page_set_invalid(m, base, size)
}
/*
- * is (partial) page valid? May not block.
+ * is (partial) page valid?
*/
int
vm_page_is_valid(m, base, size)
@@ -1542,10 +1483,6 @@ vm_page_is_valid(m, base, size)
return 0;
}
-/*
- * update dirty bits from pmap/mmu. May not block.
- */
-
void
vm_page_test_dirty(m)
vm_page_t m;