summaryrefslogtreecommitdiff
path: root/sys/vm/vm_fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_fault.c')
-rw-r--r--sys/vm/vm_fault.c468
1 files changed, 346 insertions, 122 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 2151a5fa50e8..c7254bce4c51 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1,6 +1,10 @@
/*
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
+ * Copyright (c) John S. Dyson
+ * All rights reserved.
+ * Copyright (c) David Greenman
+ * All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
@@ -33,11 +37,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)vm_fault.c 7.6 (Berkeley) 5/7/91
- * $Id: vm_fault.c,v 1.5 1993/10/16 16:20:24 rgrimes Exp $
- */
-
-/*
+ * @(#)vm_fault.c 7.6 (Berkeley) 5/7/91
+ *
+ *
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
*
@@ -63,6 +65,9 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
+/*
+ * $Id: vm_fault.c,v 1.14.2.1 1994/03/24 07:20:29 rgrimes Exp $
+ */
/*
* Page fault handling module.
@@ -73,7 +78,18 @@
#include "vm.h"
#include "vm_page.h"
#include "vm_pageout.h"
+#include "proc.h"
+#include "resource.h"
+#include "resourcevar.h"
+#define VM_FAULT_READ_AHEAD 3
+#define VM_FAULT_READ_AHEAD_MIN 1
+#define VM_FAULT_READ_BEHIND 2
+#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
+extern int swap_pager_full;
+extern int vm_pageout_proc_limit;
+
+vm_statistics_data_t vm_stat;
/*
* vm_fault:
*
@@ -92,6 +108,7 @@
* The map in question must be referenced, and remains so.
* Caller may hold no locks.
*/
+int
vm_fault(map, vaddr, fault_type, change_wiring)
vm_map_t map;
vm_offset_t vaddr;
@@ -113,6 +130,9 @@ vm_fault(map, vaddr, fault_type, change_wiring)
boolean_t page_exists;
vm_page_t old_m;
vm_object_t next_object;
+ vm_page_t marray[VM_FAULT_READ];
+ int reqpage;
+ int spl;
vm_stat.faults++; /* needs lock XXX */
/*
@@ -141,11 +161,15 @@ vm_fault(map, vaddr, fault_type, change_wiring)
#define UNLOCK_THINGS { \
object->paging_in_progress--; \
+ if (object->paging_in_progress == 0) \
+ wakeup((caddr_t)object); \
vm_object_unlock(object); \
if (object != first_object) { \
vm_object_lock(first_object); \
FREE_PAGE(first_m); \
first_object->paging_in_progress--; \
+ if (first_object->paging_in_progress == 0) \
+ wakeup((caddr_t)first_object); \
vm_object_unlock(first_object); \
} \
UNLOCK_MAP; \
@@ -156,6 +180,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
vm_object_deallocate(first_object); \
}
+
RetryFault: ;
/*
@@ -164,8 +189,8 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
- &first_object, &first_offset,
- &prot, &wired, &su)) != KERN_SUCCESS) {
+ &first_object, &first_offset,
+ &prot, &wired, &su)) != KERN_SUCCESS) {
return(result);
}
lookup_still_valid = TRUE;
@@ -240,98 +265,76 @@ vm_fault(map, vaddr, fault_type, change_wiring)
* If the page is being brought in,
* wait for it and then retry.
*/
- if (m->busy) {
-#ifdef DOTHREADS
- int wait_result;
-
- PAGE_ASSERT_WAIT(m, !change_wiring);
- UNLOCK_THINGS;
- thread_block();
- wait_result = current_thread()->wait_result;
- vm_object_deallocate(first_object);
- if (wait_result != THREAD_AWAKENED)
- return(KERN_SUCCESS);
- goto RetryFault;
-#else
- PAGE_ASSERT_WAIT(m, !change_wiring);
+ if (m->flags & PG_BUSY) {
UNLOCK_THINGS;
-thread_wakeup(&vm_pages_needed); /* XXX! */
- thread_block();
+ if (m->flags & PG_BUSY) {
+ m->flags |= PG_WANTED;
+ tsleep((caddr_t)m,PSWP,"vmpfw",0);
+ }
vm_object_deallocate(first_object);
goto RetryFault;
-#endif
}
- if (m->absent)
+ if (m->flags & PG_ABSENT)
panic("vm_fault: absent");
/*
- * If the desired access to this page has
- * been locked out, request that it be unlocked.
- */
-
- if (fault_type & m->page_lock) {
-#ifdef DOTHREADS
- int wait_result;
-
- if ((fault_type & m->unlock_request) != fault_type)
- panic("vm_fault: pager_data_unlock");
-
- PAGE_ASSERT_WAIT(m, !change_wiring);
- UNLOCK_THINGS;
- thread_block();
- wait_result = current_thread()->wait_result;
- vm_object_deallocate(first_object);
- if (wait_result != THREAD_AWAKENED)
- return(KERN_SUCCESS);
- goto RetryFault;
-#else
- if ((fault_type & m->unlock_request) != fault_type)
- panic("vm_fault: pager_data_unlock");
-
- PAGE_ASSERT_WAIT(m, !change_wiring);
- UNLOCK_THINGS;
-thread_wakeup(&vm_pages_needed); /* XXX */
- thread_block();
- vm_object_deallocate(first_object);
- goto RetryFault;
-#endif
- }
-
- /*
* Remove the page from the pageout daemon's
* reach while we play with it.
*/
vm_page_lock_queues();
- if (m->inactive) {
+ spl = vm_disable_intr();
+ if (m->flags & PG_INACTIVE) {
queue_remove(&vm_page_queue_inactive, m,
vm_page_t, pageq);
- m->inactive = FALSE;
+ m->flags &= ~PG_INACTIVE;
vm_page_inactive_count--;
vm_stat.reactivations++;
}
- if (m->active) {
+ if (m->flags & PG_ACTIVE) {
queue_remove(&vm_page_queue_active, m,
vm_page_t, pageq);
- m->active = FALSE;
+ m->flags &= ~PG_ACTIVE;
vm_page_active_count--;
}
+ vm_set_intr(spl);
vm_page_unlock_queues();
/*
* Mark page busy for other threads.
*/
- m->busy = TRUE;
- m->absent = FALSE;
+ m->flags |= PG_BUSY;
+ m->flags &= ~PG_ABSENT;
break;
}
if (((object->pager != NULL) &&
- (!change_wiring || wired))
+ (!change_wiring || wired))
|| (object == first_object)) {
+#if 0
+ if (curproc && (vaddr < VM_MAXUSER_ADDRESS) &&
+ (curproc->p_rlimit[RLIMIT_RSS].rlim_max <
+ curproc->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG)) {
+ UNLOCK_AND_DEALLOCATE;
+ vm_fault_free_pages(curproc);
+ goto RetryFault;
+ }
+#endif
+
+ if (swap_pager_full && !object->shadow && (!object->pager ||
+ (object->pager && object->pager->pg_type == PG_SWAP &&
+ !vm_pager_has_page(object->pager, offset+object->paging_offset)))) {
+ if (vaddr < VM_MAXUSER_ADDRESS && curproc && curproc->p_pid >= 48) /* XXX */ {
+ UNLOCK_AND_DEALLOCATE;
+ printf("Process %d killed by vm_fault -- out of swap\n", curproc->p_pid);
+ psignal(curproc, SIGKILL);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ }
+
/*
* Allocate a new page for this object/offset
* pair.
@@ -346,15 +349,29 @@ thread_wakeup(&vm_pages_needed); /* XXX */
}
}
- if ((object->pager != NULL) &&
- (!change_wiring || wired)) {
+ if ((object->pager != NULL) && (!change_wiring || wired)) {
int rv;
+ int faultcount;
+ int reqpage;
/*
* Now that we have a busy page, we can
* release the object lock.
*/
vm_object_unlock(object);
+ /*
+ * now we find out if any other pages should
+ * be paged in at this time
+ * this routine checks to see if the pages surrounding this fault
+ * reside in the same object as the page for this fault. If
+ * they do, then they are faulted in also into the
+ * object. The array "marray" returned contains an array of vm_page_t structs
+ * where one of them is the vm_page_t passed to the routine. The reqpage
+ * return value is the index into the marray for the vm_page_t passed to the
+ * routine.
+ */
+ faultcount = vm_fault_additional_pages(first_object, first_offset, m,
+ VM_FAULT_READ_BEHIND, VM_FAULT_READ_AHEAD, marray, &reqpage);
/*
* Call the pager to retrieve the data, if any,
@@ -362,7 +379,13 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
UNLOCK_MAP;
- rv = vm_pager_get(object->pager, m, TRUE);
+ if (faultcount != 1) {
+ rv = faultcount ?
+ vm_pager_getmulti(object->pager, marray, faultcount, reqpage, TRUE):
+ VM_PAGER_FAIL;
+ } else {
+ rv = vm_pager_get(object->pager, m, TRUE);
+ }
if (rv == VM_PAGER_OK) {
/*
* Found the page.
@@ -378,7 +401,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
m = vm_page_lookup(object, offset);
vm_stat.pageins++;
- m->fake = FALSE;
+ m->flags &= ~PG_FAKE;
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
break;
}
@@ -438,6 +461,8 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
if (object != first_object) {
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
vm_object_unlock(object);
object = first_object;
@@ -449,21 +474,24 @@ thread_wakeup(&vm_pages_needed); /* XXX */
vm_page_zero_fill(m);
vm_stat.zero_fill_count++;
- m->fake = FALSE;
- m->absent = FALSE;
+ m->flags &= ~(PG_FAKE|PG_ABSENT);
break;
}
else {
vm_object_lock(next_object);
- if (object != first_object)
+ if (object != first_object) {
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
+ }
vm_object_unlock(object);
object = next_object;
object->paging_in_progress++;
}
}
- if (m->absent || m->active || m->inactive || !m->busy)
+ if ((m->flags & (PG_ABSENT|PG_ACTIVE|PG_INACTIVE) != 0) ||
+ (m->flags & PG_BUSY) == 0)
panic("vm_fault: absent or active or inactive or not busy after main loop");
/*
@@ -511,8 +539,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
vm_page_copy(m, first_m);
- first_m->fake = FALSE;
- first_m->absent = FALSE;
+ first_m->flags &= ~(PG_FAKE|PG_ABSENT);
/*
* If another map is truly sharing this
@@ -528,9 +555,11 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
vm_page_lock_queues();
+
vm_page_activate(m);
- vm_page_deactivate(m);
pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+ if ((m->flags & PG_CLEAN) == 0)
+ m->flags |= PG_LAUNDRY;
vm_page_unlock_queues();
/*
@@ -538,6 +567,8 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
PAGE_WAKEUP(m);
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
vm_object_unlock(object);
/*
@@ -559,16 +590,18 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* paging_in_progress to do that...
*/
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
vm_object_collapse(object);
object->paging_in_progress++;
}
else {
prot &= (~VM_PROT_WRITE);
- m->copy_on_write = TRUE;
+ m->flags |= PG_COPY_ON_WRITE;
}
}
- if (m->active || m->inactive)
+ if (m->flags & (PG_ACTIVE|PG_INACTIVE))
panic("vm_fault: active or inactive before copy object handling");
/*
@@ -586,7 +619,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
if ((fault_type & VM_PROT_WRITE) == 0) {
prot &= ~VM_PROT_WRITE;
- m->copy_on_write = TRUE;
+ m->flags |= PG_COPY_ON_WRITE;
}
else {
/*
@@ -613,40 +646,19 @@ thread_wakeup(&vm_pages_needed); /* XXX */
- copy_object->shadow_offset;
copy_m = vm_page_lookup(copy_object, copy_offset);
if (page_exists = (copy_m != NULL)) {
- if (copy_m->busy) {
-#ifdef DOTHREADS
- int wait_result;
-
+ if (copy_m->flags & PG_BUSY) {
/*
* If the page is being brought
* in, wait for it and then retry.
*/
- PAGE_ASSERT_WAIT(copy_m, !change_wiring);
+ PAGE_ASSERT_WAIT(copy_m, !change_wiring);
RELEASE_PAGE(m);
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_THINGS;
- thread_block();
- wait_result = current_thread()->wait_result;
+ thread_block("fltcpy");
vm_object_deallocate(first_object);
- if (wait_result != THREAD_AWAKENED)
- return(KERN_SUCCESS);
goto RetryFault;
-#else
- /*
- * If the page is being brought
- * in, wait for it and then retry.
- */
- PAGE_ASSERT_WAIT(copy_m, !change_wiring);
- RELEASE_PAGE(m);
- copy_object->ref_count--;
- vm_object_unlock(copy_object);
- UNLOCK_THINGS;
-thread_wakeup(&vm_pages_needed); /* XXX */
- thread_block();
- vm_object_deallocate(first_object);
- goto RetryFault;
-#endif
}
}
@@ -668,8 +680,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* found that the copy_object's pager
* doesn't have the page...
*/
- copy_m = vm_page_alloc(copy_object,
- copy_offset);
+ copy_m = vm_page_alloc(copy_object, copy_offset);
if (copy_m == NULL) {
/*
* Wait for a page, then retry.
@@ -730,8 +741,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* Must copy page into copy-object.
*/
vm_page_copy(m, copy_m);
- copy_m->fake = FALSE;
- copy_m->absent = FALSE;
+ copy_m->flags &= ~(PG_FAKE|PG_ABSENT);
/*
* Things to remember:
@@ -744,10 +754,16 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* pmaps use it.)
*/
vm_page_lock_queues();
+
+ vm_page_activate(old_m);
+
+
pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
VM_PROT_NONE);
- copy_m->clean = FALSE;
- vm_page_activate(copy_m); /* XXX */
+ if ((old_m->flags & PG_CLEAN) == 0)
+ old_m->flags |= PG_LAUNDRY;
+ copy_m->flags &= ~PG_CLEAN;
+ vm_page_activate(copy_m);
vm_page_unlock_queues();
PAGE_WAKEUP(copy_m);
@@ -761,11 +777,11 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
copy_object->ref_count--;
vm_object_unlock(copy_object);
- m->copy_on_write = FALSE;
+ m->flags &= ~PG_COPY_ON_WRITE;
}
}
- if (m->active || m->inactive)
+ if (m->flags & (PG_ACTIVE|PG_INACTIVE))
panic("vm_fault: active or inactive before retrying lookup");
/*
@@ -830,7 +846,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* can't mark the page write-enabled after all.
*/
prot &= retry_prot;
- if (m->copy_on_write)
+ if (m->flags & PG_COPY_ON_WRITE)
prot &= ~VM_PROT_WRITE;
}
@@ -841,15 +857,16 @@ thread_wakeup(&vm_pages_needed); /* XXX */
/* XXX This distorts the meaning of the copy_on_write bit */
- if (prot & VM_PROT_WRITE)
- m->copy_on_write = FALSE;
+ if (prot & VM_PROT_WRITE) {
+ m->flags &= ~PG_COPY_ON_WRITE;
+ }
/*
* It's critically important that a wired-down page be faulted
* only once in each map for which it is wired.
*/
- if (m->active || m->inactive)
+ if (m->flags & (PG_ACTIVE|PG_INACTIVE))
panic("vm_fault: active or inactive before pmap_enter");
vm_object_unlock(object);
@@ -862,8 +879,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* that the page-out daemon won't find us (yet).
*/
- pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m),
- prot & ~(m->page_lock), wired);
+ pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
/*
* If the page is not wired down, then put it where the
@@ -877,8 +893,10 @@ thread_wakeup(&vm_pages_needed); /* XXX */
else
vm_page_unwire(m);
}
- else
+ else {
vm_page_activate(m);
+ vm_pageout_deact_bump(m);
+ }
vm_page_unlock_queues();
/*
@@ -897,7 +915,8 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*
* Wire down a range of virtual addresses in a map.
*/
-void vm_fault_wire(map, start, end)
+void
+vm_fault_wire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
{
@@ -931,7 +950,8 @@ void vm_fault_wire(map, start, end)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, start, end)
+void
+vm_fault_unwire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
{
@@ -980,13 +1000,13 @@ void vm_fault_unwire(map, start, end)
* entry corresponding to a main map entry that is wired down).
*/
-void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
+void
+vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
vm_map_t dst_map;
vm_map_t src_map;
vm_map_entry_t dst_entry;
vm_map_entry_t src_entry;
{
-
vm_object_t dst_object;
vm_object_t src_object;
vm_offset_t dst_offset;
@@ -1069,5 +1089,209 @@ void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
PAGE_WAKEUP(dst_m);
vm_object_unlock(dst_object);
}
+}
+
+
+/*
+ * looks page up in shadow chain
+ */
+
+int
+vm_fault_page_lookup(object, offset, rtobject, rtoffset, rtm)
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_object_t *rtobject;
+ vm_offset_t *rtoffset;
+ vm_page_t *rtm;
+{
+ vm_page_t m;
+ vm_object_t first_object = object;
+
+ *rtm = 0;
+ *rtobject = 0;
+ *rtoffset = 0;
+
+
+ while (!(m=vm_page_lookup(object, offset))) {
+ if (object->pager) {
+ if (vm_pager_has_page(object->pager, object->paging_offset+offset)) {
+ *rtobject = object;
+ *rtoffset = offset;
+ return 1;
+ }
+ }
+
+ if (!object->shadow)
+ return 0;
+ else {
+ offset += object->shadow_offset;
+ object = object->shadow;
+ }
+ }
+ *rtobject = object;
+ *rtoffset = offset;
+ *rtm = m;
+ return 1;
+}
+
+/*
+ * This routine checks around the requested page for other pages that
+ * might be able to be faulted in.
+ *
+ * Inputs:
+ * first_object, first_offset, m, rbehind, rahead
+ *
+ * Outputs:
+ * marray (array of vm_page_t), reqpage (index of requested page)
+ *
+ * Return value:
+ * number of pages in marray
+ */
+int
+vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marray, reqpage)
+ vm_object_t first_object;
+ vm_offset_t first_offset;
+ vm_page_t m;
+ int rbehind;
+ int raheada;
+ vm_page_t *marray;
+ int *reqpage;
+{
+ int i;
+ vm_page_t tmpm;
+ vm_object_t object;
+ vm_offset_t offset, startoffset, endoffset, toffset, size;
+ vm_object_t rtobject;
+ vm_page_t rtm;
+ vm_offset_t rtoffset;
+ vm_offset_t offsetdiff;
+ int rahead;
+ int treqpage;
+
+ object = m->object;
+ offset = m->offset;
+
+ offsetdiff = offset - first_offset;
+
+ /*
+ * if the requested page is not available, then give up now
+ */
+
+ if (!vm_pager_has_page(object->pager, object->paging_offset+offset))
+ return 0;
+
+ /*
+ * if there is no getmulti routine for this pager, then just allow
+ * one page to be read.
+ */
+ if (!object->pager->pg_ops->pgo_getmulti) {
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
+ }
+
+ /*
+ * try to do any readahead that we might have free pages for.
+ */
+ rahead = raheada;
+ if (rahead > (vm_page_free_count - vm_page_free_reserved)) {
+ rahead = vm_page_free_count - vm_page_free_reserved;
+ rbehind = 0;
+ }
+ if (vm_page_free_count < vm_page_free_min) {
+ if (rahead > VM_FAULT_READ_AHEAD_MIN)
+ rahead = VM_FAULT_READ_AHEAD_MIN;
+ rbehind = 0;
+ }
+
+ /*
+ * if we don't have any free pages, then just read one page.
+ */
+ if (rahead <= 0) {
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
+ }
+
+ /*
+ * scan backward for the read behind pages --
+ * in memory or on disk not in same object
+ */
+ toffset = offset - NBPG;
+ if( rbehind*NBPG > offset)
+ rbehind = offset / NBPG;
+ startoffset = offset - rbehind*NBPG;
+ while (toffset >= startoffset) {
+ if (!vm_fault_page_lookup(first_object, toffset - offsetdiff, &rtobject, &rtoffset, &rtm) ||
+ rtm != 0 || rtobject != object) {
+ startoffset = toffset + NBPG;
+ break;
+ }
+ if( toffset == 0)
+ break;
+ toffset -= NBPG;
+ }
+
+ /*
+ * scan forward for the read ahead pages --
+ * in memory or on disk not in same object
+ */
+ toffset = offset + NBPG;
+ endoffset = offset + (rahead+1)*NBPG;
+ while (toffset < object->size && toffset < endoffset) {
+ if (!vm_fault_page_lookup(first_object, toffset - offsetdiff, &rtobject, &rtoffset, &rtm) ||
+ rtm != 0 || rtobject != object) {
+ break;
+ }
+ toffset += NBPG;
+ }
+ endoffset = toffset;
+
+ /* calculate number of bytes of pages */
+ size = (endoffset - startoffset) / NBPG;
+
+ /* calculate the page offset of the required page */
+ treqpage = (offset - startoffset) / NBPG;
+
+ /* see if we have space (again) */
+ if (vm_page_free_count >= vm_page_free_reserved + size) {
+ bzero(marray, (rahead + rbehind + 1) * sizeof(vm_page_t));
+ /*
+ * get our pages and don't block for them
+ */
+ for (i = 0; i < size; i++) {
+ if (i != treqpage)
+ rtm = vm_page_alloc(object, startoffset + i * NBPG);
+ else
+ rtm = m;
+ marray[i] = rtm;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (marray[i] == 0)
+ break;
+ }
+
+ /*
+ * if we could not get our block of pages, then
+ * free the readahead/readbehind pages.
+ */
+ if (i < size) {
+ for (i = 0; i < size; i++) {
+ if (i != treqpage && marray[i])
+ FREE_PAGE(marray[i]);
+ }
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
+ }
+
+ *reqpage = treqpage;
+ return size;
+ }
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
}
+