summaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h3
-rw-r--r--sys/vm/vm_fault.c73
-rw-r--r--sys/vm/vm_glue.c18
-rw-r--r--sys/vm/vm_kern.c32
-rw-r--r--sys/vm/vm_map.c413
-rw-r--r--sys/vm/vm_map.h24
-rw-r--r--sys/vm/vm_meter.c4
-rw-r--r--sys/vm/vm_mmap.c33
-rw-r--r--sys/vm/vm_object.c72
-rw-r--r--sys/vm/vm_page.c55
-rw-r--r--sys/vm/vm_pageout.c26
-rw-r--r--sys/vm/vm_pageout.h35
-rw-r--r--sys/vm/vnode_pager.c4
13 files changed, 506 insertions, 286 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 5a706917de747..3d5a335278528 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
- * $Id: vm_extern.h,v 1.26 1996/09/14 11:54:54 bde Exp $
+ * $Id: vm_extern.h,v 1.27 1996/09/15 11:24:21 bde Exp $
*/
#ifndef _VM_EXTERN_H_
@@ -80,6 +80,7 @@ int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
+int vm_fault_user_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fork __P((struct proc *, struct proc *));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, caddr_t, vm_ooffset_t));
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index e7fdd373e34bf..561b496b4ffcd 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.57 1996/09/08 20:44:37 dyson Exp $
+ * $Id: vm_fault.c,v 1.57.2.1 1996/12/15 09:57:11 davidg Exp $
*/
/*
@@ -197,11 +197,37 @@ RetryFault:;
return (result);
}
- if (entry->nofault) {
+ if (entry->eflags & MAP_ENTRY_NOFAULT) {
panic("vm_fault: fault on nofault entry, addr: %lx",
vaddr);
}
+ /*
+ * If we are user-wiring a r/w segment, and it is COW, then
+ * we need to do the COW operation. Note that we don't COW
+ * currently RO sections now, because it is NOT desirable
+ * to COW .text. We simply keep .text from ever being COW'ed
+ * and take the heat that one cannot debug wired .text sections.
+ */
+ if ((change_wiring == VM_FAULT_USER_WIRE) && (entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ if(entry->protection & VM_PROT_WRITE) {
+ int tresult;
+ vm_map_lookup_done(map, entry);
+
+ tresult = vm_map_lookup(&map, vaddr, VM_PROT_READ|VM_PROT_WRITE,
+ &entry, &first_object, &first_pindex, &prot, &wired, &su);
+ if (tresult != KERN_SUCCESS)
+ return tresult;
+ } else {
+ /*
+ * If we don't COW now, on a user wire, the user will never
+ * be able to write to the mapping. If we don't make this
+ * restriction, the bookkeeping would be nearly impossible.
+ */
+ entry->max_protection &= ~VM_PROT_WRITE;
+ }
+ }
+
vp = vnode_pager_lock(first_object);
lookup_still_valid = TRUE;
@@ -839,7 +865,48 @@ vm_fault_wire(map, start, end)
*/
for (va = start; va < end; va += PAGE_SIZE) {
- rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
+ VM_FAULT_CHANGE_WIRING);
+ if (rv) {
+ if (va != start)
+ vm_fault_unwire(map, start, va);
+ return (rv);
+ }
+ }
+ return (KERN_SUCCESS);
+}
+
+/*
+ * vm_fault_user_wire:
+ *
+ * Wire down a range of virtual addresses in a map. This
+ * is for user mode though, so we only ask for read access
+ * on currently read only sections.
+ */
+int
+vm_fault_user_wire(map, start, end)
+ vm_map_t map;
+ vm_offset_t start, end;
+{
+
+ register vm_offset_t va;
+ register pmap_t pmap;
+ int rv;
+
+ pmap = vm_map_pmap(map);
+
+ /*
+ * Inform the physical mapping system that the range of addresses may
+ * not fault, so that page tables and such can be locked down as well.
+ */
+ pmap_pageable(pmap, start, end, FALSE);
+
+ /*
+ * We simulate a fault to get the page and enter it in the physical
+ * map.
+ */
+ for (va = start; va < end; va += PAGE_SIZE) {
+ rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE);
if (rv) {
if (va != start)
vm_fault_unwire(map, start, va);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 22091d772737f..71bcb7c67c4b4 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.55.2.1 1996/12/22 23:21:25 joerg Exp $
+ * $Id: vm_glue.c,v 1.55.2.2 1997/02/13 08:17:31 bde Exp $
*/
#include "opt_rlimit.h"
@@ -126,7 +126,9 @@ kernacc(addr, len, rw)
saddr = trunc_page(addr);
eaddr = round_page(addr + len);
+ vm_map_lock_read(kernel_map);
rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
+ vm_map_unlock_read(kernel_map);
return (rv == TRUE);
}
@@ -137,6 +139,8 @@ useracc(addr, len, rw)
{
boolean_t rv;
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
+ vm_map_t map;
+ vm_map_entry_t save_hint;
/*
* XXX - check separately to disallow access to user area and user
@@ -151,8 +155,18 @@ useracc(addr, len, rw)
|| (vm_offset_t) addr + len < (vm_offset_t) addr) {
return (FALSE);
}
- rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
+ map = &curproc->p_vmspace->vm_map;
+ vm_map_lock_read(map);
+ /*
+ * We save the map hint, and restore it. Useracc appears to distort
+ * the map hint unnecessarily.
+ */
+ save_hint = map->hint;
+ rv = vm_map_check_protection(map,
trunc_page(addr), round_page(addr + len), prot);
+ map->hint = save_hint;
+ vm_map_unlock_read(map);
+
return (rv == TRUE);
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 21939f6e4da97..c5096cb5c93c8 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.27 1996/07/02 02:08:02 dyson Exp $
+ * $Id: vm_kern.c,v 1.27.2.1 1997/01/17 19:28:38 davidg Exp $
*/
/*
@@ -89,17 +89,17 @@
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
-vm_map_t buffer_map;
-vm_map_t kernel_map;
-vm_map_t kmem_map;
-vm_map_t mb_map;
-int mb_map_full;
-vm_map_t io_map;
-vm_map_t clean_map;
-vm_map_t phys_map;
-vm_map_t exec_map;
-vm_map_t exech_map;
-vm_map_t u_map;
+vm_map_t kernel_map=0;
+vm_map_t kmem_map=0;
+vm_map_t exec_map=0;
+vm_map_t exech_map=0;
+vm_map_t clean_map=0;
+vm_map_t u_map=0;
+vm_map_t buffer_map=0;
+vm_map_t mb_map=0;
+int mb_map_full=0;
+vm_map_t io_map=0;
+vm_map_t phys_map=0;
/*
* kmem_alloc_pageable:
@@ -199,11 +199,6 @@ kmem_alloc(map, size)
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
- /*
- * Try to coalesce the map
- */
- vm_map_simplify(map, addr);
-
return (addr);
}
@@ -362,6 +357,8 @@ retry:
panic("kmem_malloc: entry not found or misaligned");
entry->wired_count++;
+ vm_map_simplify_entry(map, entry);
+
/*
* Loop thru pages, entering them in the pmap. (We cannot add them to
* the wired count without wrapping the vm_page_queue_lock in
@@ -377,7 +374,6 @@ retry:
}
vm_map_unlock(map);
- vm_map_simplify(map, addr);
return (addr);
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index b66b770110fd9..2ec6926b5baf8 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.57.2.2 1997/01/26 03:14:59 dyson Exp $
+ * $Id: vm_map.c,v 1.57.2.3 1997/01/31 04:17:20 dyson Exp $
*/
/*
@@ -153,6 +153,7 @@ vm_size_t kentry_data_size;
static vm_map_entry_t kentry_free;
static vm_map_t kmap_free;
extern char kstack[];
+extern int inmprotect;
static int kentry_count;
static vm_offset_t mapvm_start, mapvm, mapvmmax;
@@ -170,7 +171,6 @@ static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
vm_map_entry_t));
-static void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void
vm_map_startup()
@@ -606,6 +606,7 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
register vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
vm_object_t prev_object;
+ u_char protoeflags;
if ((object != NULL) && (cow & MAP_NOFAULT)) {
panic("vm_map_insert: paradoxical MAP_NOFAULT request");
@@ -637,48 +638,61 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
(prev_entry->next->start < end))
return (KERN_NO_SPACE);
- if ((prev_entry != &map->header) &&
- (prev_entry->end == start) &&
- (object == NULL) &&
- (prev_entry->is_a_map == FALSE) &&
- (prev_entry->is_sub_map == FALSE) &&
- (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
- (prev_entry->protection == prot) &&
- (prev_entry->max_protection == max) &&
- (prev_entry->wired_count == 0)) {
+ protoeflags = 0;
+ if (cow & MAP_COPY_NEEDED)
+ protoeflags |= MAP_ENTRY_NEEDS_COPY;
+
+ if (cow & MAP_COPY_ON_WRITE)
+ protoeflags |= MAP_ENTRY_COW;
+
+ if (cow & MAP_NOFAULT)
+ protoeflags |= MAP_ENTRY_NOFAULT;
-
/*
* See if we can avoid creating a new entry by extending one of our
- * neighbors.
+ * neighbors. Or at least extend the object.
*/
- u_char needs_copy = (cow & MAP_COPY_NEEDED) != 0;
- u_char copy_on_write = (cow & MAP_COPY_ON_WRITE) != 0;
- u_char nofault = (cow & MAP_NOFAULT) != 0;
-
- if ((needs_copy == prev_entry->needs_copy) &&
- (copy_on_write == prev_entry->copy_on_write) &&
- (nofault == prev_entry->nofault) &&
- (nofault || vm_object_coalesce(prev_entry->object.vm_object,
- OFF_TO_IDX(prev_entry->offset),
- (vm_size_t) (prev_entry->end
- - prev_entry->start),
- (vm_size_t) (end - prev_entry->end)))) {
- /*
- * Coalesced the two objects - can extend the
- * previous map entry to include the new
- * range.
- */
+ if ((object == NULL) &&
+ (prev_entry != &map->header) &&
+ (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
+ (prev_entry->end == start) &&
+ (prev_entry->wired_count == 0)) {
+
+
+ if ((protoeflags == prev_entry->eflags) &&
+ ((cow & MAP_NOFAULT) ||
+ vm_object_coalesce(prev_entry->object.vm_object,
+ OFF_TO_IDX(prev_entry->offset),
+ (vm_size_t) (prev_entry->end - prev_entry->start),
+ (vm_size_t) (end - prev_entry->end)))) {
+
+ /*
+ * Coalesced the two objects. Can we extend the
+ * previous map entry to include the new range?
+ */
+ if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
+ (prev_entry->protection == prot) &&
+ (prev_entry->max_protection == max)) {
+
map->size += (end - prev_entry->end);
prev_entry->end = end;
- if (!nofault) {
+ if ((cow & MAP_NOFAULT) == 0) {
prev_object = prev_entry->object.vm_object;
default_pager_convert_to_swapq(prev_object);
}
return (KERN_SUCCESS);
}
+ else {
+ object = prev_entry->object.vm_object;
+ offset = prev_entry->offset + (prev_entry->end -
+ prev_entry->start);
+
+ vm_object_reference(object);
+ }
+ }
}
+
/*
* Create a new entry
*/
@@ -687,26 +701,10 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
new_entry->start = start;
new_entry->end = end;
- new_entry->is_a_map = FALSE;
- new_entry->is_sub_map = FALSE;
+ new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
new_entry->offset = offset;
- if (cow & MAP_COPY_NEEDED)
- new_entry->needs_copy = TRUE;
- else
- new_entry->needs_copy = FALSE;
-
- if (cow & MAP_COPY_ON_WRITE)
- new_entry->copy_on_write = TRUE;
- else
- new_entry->copy_on_write = FALSE;
-
- if (cow & MAP_NOFAULT)
- new_entry->nofault = TRUE;
- else
- new_entry->nofault = FALSE;
-
if (map->is_main_map) {
new_entry->inheritance = VM_INHERIT_DEFAULT;
new_entry->protection = prot;
@@ -838,28 +836,19 @@ vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow)
}
/*
- * vm_map_simplify_entry: [ internal use only ]
+ * vm_map_simplify_entry:
*
- * Simplify the given map entry by:
- * removing extra sharing maps
- * [XXX maybe later] merging with a neighbor
+ * Simplify the given map entry by merging with either neighbor.
*/
-static void
+void
vm_map_simplify_entry(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
vm_map_entry_t next, prev;
- vm_size_t nextsize, prevsize, esize;
-
- /*
- * If this entry corresponds to a sharing map, then see if we can
- * remove the level of indirection. If it's not a sharing map, then it
- * points to a VM object, so see if we can merge with either of our
- * neighbors.
- */
+ vm_size_t prevsize, esize;
- if (entry->is_sub_map || entry->is_a_map || entry->wired_count)
+ if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
return;
prev = entry->prev;
@@ -870,14 +859,11 @@ vm_map_simplify_entry(map, entry)
(!prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!prev->object.vm_object ||
(prev->offset + prevsize == entry->offset)) &&
- (prev->needs_copy == entry->needs_copy) &&
- (prev->copy_on_write == entry->copy_on_write) &&
+ (prev->eflags == entry->eflags) &&
(prev->protection == entry->protection) &&
(prev->max_protection == entry->max_protection) &&
(prev->inheritance == entry->inheritance) &&
- (prev->is_a_map == FALSE) &&
- (prev->is_sub_map == FALSE) &&
- (prev->wired_count == 0)) {
+ (prev->wired_count == entry->wired_count)) {
if (map->first_free == prev)
map->first_free = entry;
if (map->hint == prev)
@@ -893,21 +879,17 @@ vm_map_simplify_entry(map, entry)
next = entry->next;
if (next != &map->header) {
- nextsize = next->end - next->start;
esize = entry->end - entry->start;
if ((entry->end == next->start) &&
(next->object.vm_object == entry->object.vm_object) &&
(!next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!entry->object.vm_object ||
(entry->offset + esize == next->offset)) &&
- (next->needs_copy == entry->needs_copy) &&
- (next->copy_on_write == entry->copy_on_write) &&
+ (next->eflags == entry->eflags) &&
(next->protection == entry->protection) &&
(next->max_protection == entry->max_protection) &&
(next->inheritance == entry->inheritance) &&
- (next->is_a_map == FALSE) &&
- (next->is_sub_map == FALSE) &&
- (next->wired_count == 0)) {
+ (next->wired_count == entry->wired_count)) {
if (map->first_free == next)
map->first_free = entry;
if (map->hint == next)
@@ -962,7 +944,7 @@ _vm_map_clip_start(map, entry, start)
vm_map_entry_link(map, entry->prev, new_entry);
- if (entry->is_a_map || entry->is_sub_map)
+ if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
vm_map_reference(new_entry->object.share_map);
else
vm_object_reference(new_entry->object.vm_object);
@@ -1006,7 +988,7 @@ _vm_map_clip_end(map, entry, end)
vm_map_entry_link(map, entry, new_entry);
- if (entry->is_a_map || entry->is_sub_map)
+ if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
vm_map_reference(new_entry->object.share_map);
else
vm_object_reference(new_entry->object.vm_object);
@@ -1068,11 +1050,9 @@ vm_map_submap(map, start, end, submap)
vm_map_clip_end(map, entry, end);
if ((entry->start == start) && (entry->end == end) &&
- (!entry->is_a_map) &&
- (entry->object.vm_object == NULL) &&
- (!entry->copy_on_write)) {
- entry->is_a_map = FALSE;
- entry->is_sub_map = TRUE;
+ ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
+ (entry->object.vm_object == NULL)) {
+ entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
vm_map_reference(entry->object.sub_map = submap);
result = KERN_SUCCESS;
}
@@ -1106,8 +1086,9 @@ vm_map_protect(map, start, end, new_prot, set_max)
if (vm_map_lookup_entry(map, start, &entry)) {
vm_map_clip_start(map, entry, start);
- } else
+ } else {
entry = entry->next;
+ }
/*
* Make a first pass to check for protection violations.
@@ -1115,7 +1096,7 @@ vm_map_protect(map, start, end, new_prot, set_max)
current = entry;
while ((current != &map->header) && (current->start < end)) {
- if (current->is_sub_map) {
+ if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
}
@@ -1152,11 +1133,11 @@ vm_map_protect(map, start, end, new_prot, set_max)
*/
if (current->protection != old_prot) {
-#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
+#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
VM_PROT_ALL)
#define max(a,b) ((a) > (b) ? (a) : (b))
- if (current->is_a_map) {
+ if (current->eflags & MAP_ENTRY_IS_A_MAP) {
vm_map_entry_t share_entry;
vm_offset_t share_end;
@@ -1229,9 +1210,22 @@ vm_map_madvise(map, pmap, start, end, advise)
for(current = entry;
(current != &map->header) && (current->start < end);
current = current->next) {
- if (current->is_a_map || current->is_sub_map) {
+ vm_size_t size = current->end - current->start;
+
+ if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
continue;
}
+
+ /*
+ * Create an object if needed
+ */
+ if (current->object.vm_object == NULL) {
+ vm_object_t object;
+ object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
+ current->object.vm_object = object;
+ current->offset = 0;
+ }
+
vm_map_clip_end(map, current, end);
switch (advise) {
case MADV_NORMAL:
@@ -1252,7 +1246,7 @@ vm_map_madvise(map, pmap, start, end, advise)
{
vm_pindex_t pindex;
int count;
- vm_size_t size = current->end - current->start;
+ size = current->end - current->start;
pindex = OFF_TO_IDX(entry->offset);
count = OFF_TO_IDX(size);
/*
@@ -1268,7 +1262,7 @@ vm_map_madvise(map, pmap, start, end, advise)
{
vm_pindex_t pindex;
int count;
- vm_size_t size = current->end - current->start;
+ size = current->end - current->start;
pindex = OFF_TO_IDX(current->offset);
count = OFF_TO_IDX(size);
vm_object_madvise(current->object.vm_object,
@@ -1341,6 +1335,137 @@ vm_map_inherit(map, start, end, new_inheritance)
}
/*
+ * Implement the semantics of mlock
+ */
+int
+vm_map_user_pageable(map, start, end, new_pageable)
+ register vm_map_t map;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ register boolean_t new_pageable;
+{
+ register vm_map_entry_t entry;
+ vm_map_entry_t start_entry;
+ register vm_offset_t failed = 0;
+ int rv;
+
+ vm_map_lock(map);
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
+ vm_map_unlock(map);
+ return (KERN_INVALID_ADDRESS);
+ }
+
+ if (new_pageable) {
+
+ entry = start_entry;
+ vm_map_clip_start(map, entry, start);
+
+ /*
+ * Now decrement the wiring count for each region. If a region
+ * becomes completely unwired, unwire its physical pages and
+ * mappings.
+ */
+ lock_set_recursive(&map->lock);
+
+ entry = start_entry;
+ while ((entry != &map->header) && (entry->start < end)) {
+ if (entry->eflags & MAP_ENTRY_USER_WIRED) {
+ vm_map_clip_end(map, entry, end);
+ entry->eflags &= ~MAP_ENTRY_USER_WIRED;
+ entry->wired_count--;
+ if (entry->wired_count == 0)
+ vm_fault_unwire(map, entry->start, entry->end);
+ }
+ entry = entry->next;
+ }
+ vm_map_simplify_entry(map, start_entry);
+ lock_clear_recursive(&map->lock);
+ } else {
+
+ /*
+ * Because of the possiblity of blocking, etc. We restart
+ * through the process's map entries from beginning so that
+ * we don't end up depending on a map entry that could have
+ * changed.
+ */
+ rescan:
+
+ entry = start_entry;
+
+ while ((entry != &map->header) && (entry->start < end)) {
+
+ if (entry->eflags & MAP_ENTRY_USER_WIRED) {
+ entry = entry->next;
+ continue;
+ }
+
+ if (entry->wired_count != 0) {
+ entry->wired_count++;
+ entry->eflags |= MAP_ENTRY_USER_WIRED;
+ entry = entry->next;
+ continue;
+ }
+
+ /* Here on entry being newly wired */
+
+ if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
+ int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
+ if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
+
+ vm_object_shadow(&entry->object.vm_object,
+ &entry->offset,
+ OFF_TO_IDX(entry->end
+ - entry->start));
+ entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
+
+ } else if (entry->object.vm_object == NULL) {
+
+ entry->object.vm_object =
+ vm_object_allocate(OBJT_DEFAULT,
+ OFF_TO_IDX(entry->end - entry->start));
+ entry->offset = (vm_offset_t) 0;
+
+ }
+ default_pager_convert_to_swapq(entry->object.vm_object);
+ }
+
+ vm_map_clip_start(map, entry, start);
+ vm_map_clip_end(map, entry, end);
+
+ entry->wired_count++;
+ entry->eflags |= MAP_ENTRY_USER_WIRED;
+
+ /* First we need to allow map modifications */
+ lock_set_recursive(&map->lock);
+ lock_write_to_read(&map->lock);
+
+ rv = vm_fault_user_wire(map, entry->start, entry->end);
+ if (rv) {
+
+ entry->wired_count--;
+ entry->eflags &= ~MAP_ENTRY_USER_WIRED;
+
+ lock_clear_recursive(&map->lock);
+ vm_map_unlock(map);
+
+ (void) vm_map_user_pageable(map, start, entry->start, TRUE);
+ return rv;
+ }
+
+ lock_clear_recursive(&map->lock);
+ vm_map_unlock(map);
+ vm_map_lock(map);
+
+ goto rescan;
+ }
+ }
+ vm_map_unlock(map);
+ return KERN_SUCCESS;
+}
+
+/*
* vm_map_pageable:
*
* Sets the pageability of the specified address
@@ -1467,8 +1592,8 @@ vm_map_pageable(map, start, end, new_pageable)
* point to sharing maps, because we won't
* hold the lock on the sharing map.
*/
- if (!entry->is_a_map && !entry->is_sub_map) {
- int copyflag = entry->needs_copy;
+ if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
+ int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
if (copyflag &&
((entry->protection & VM_PROT_WRITE) != 0)) {
@@ -1476,7 +1601,7 @@ vm_map_pageable(map, start, end, new_pageable)
&entry->offset,
OFF_TO_IDX(entry->end
- entry->start));
- entry->needs_copy = FALSE;
+ entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
} else if (entry->object.vm_object == NULL) {
entry->object.vm_object =
vm_object_allocate(OBJT_DEFAULT,
@@ -1568,6 +1693,7 @@ vm_map_pageable(map, start, end, new_pageable)
(void) vm_map_pageable(map, start, failed, TRUE);
return (rv);
}
+ vm_map_simplify_entry(map, start_entry);
}
vm_map_unlock(map);
@@ -1608,7 +1734,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
* Make a first pass to check for holes.
*/
for (current = entry; current->start < end; current = current->next) {
- if (current->is_sub_map) {
+ if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
vm_map_unlock_read(map);
return (KERN_INVALID_ARGUMENT);
}
@@ -1627,7 +1753,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
for (current = entry; current->start < end; current = current->next) {
offset = current->offset + (start - current->start);
size = (end <= current->end ? end : current->end) - start;
- if (current->is_a_map || current->is_sub_map) {
+ if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
register vm_map_t smap;
vm_map_entry_t tentry;
vm_size_t tsize;
@@ -1718,7 +1844,7 @@ vm_map_entry_delete(map, entry)
vm_map_entry_unlink(map, entry);
map->size -= entry->end - entry->start;
- if (entry->is_a_map || entry->is_sub_map) {
+ if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
vm_map_deallocate(entry->object.share_map);
} else {
vm_object_deallocate(entry->object.vm_object);
@@ -1915,7 +2041,8 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
vm_map_t src_map, dst_map;
register vm_map_entry_t src_entry, dst_entry;
{
- if (src_entry->is_sub_map || dst_entry->is_sub_map)
+ if ((dst_entry->eflags|src_entry->eflags) &
+ (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
return;
if (src_entry->wired_count == 0) {
@@ -1924,7 +2051,7 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
* If the source entry is marked needs_copy, it is already
* write-protected.
*/
- if (!src_entry->needs_copy) {
+ if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
boolean_t su;
@@ -1957,11 +2084,8 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
src_entry->object.vm_object->type == OBJT_SWAP))
vm_object_collapse(src_entry->object.vm_object);
++src_entry->object.vm_object->ref_count;
- src_entry->copy_on_write = TRUE;
- src_entry->needs_copy = TRUE;
-
- dst_entry->needs_copy = TRUE;
- dst_entry->copy_on_write = TRUE;
+ src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
+ dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->object.vm_object =
src_entry->object.vm_object;
dst_entry->offset = src_entry->offset;
@@ -2015,14 +2139,14 @@ vmspace_fork(vm1)
old_entry = old_map->header.next;
while (old_entry != &old_map->header) {
- if (old_entry->is_sub_map)
+ if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
panic("vm_map_fork: encountered a submap");
switch (old_entry->inheritance) {
case VM_INHERIT_NONE:
break;
- case VM_INHERIT_SHARE:
+ case VM_INHERIT_SHARE:
/*
* Clone the entry, creating the shared object if necessary.
*/
@@ -2033,13 +2157,13 @@ vmspace_fork(vm1)
old_entry->start));
old_entry->object.vm_object = object;
old_entry->offset = (vm_offset_t) 0;
- } else if (old_entry->needs_copy) {
+ } else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
vm_object_shadow(&old_entry->object.vm_object,
&old_entry->offset,
OFF_TO_IDX(old_entry->end -
- old_entry->start));
+ old_entry->start));
- old_entry->needs_copy = 0;
+ old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
object = old_entry->object.vm_object;
}
@@ -2077,7 +2201,7 @@ vmspace_fork(vm1)
*new_entry = *old_entry;
new_entry->wired_count = 0;
new_entry->object.vm_object = NULL;
- new_entry->is_a_map = FALSE;
+ new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
vm_map_entry_link(new_map, new_map->header.prev,
new_entry);
vm_map_copy_entry(old_map, new_map, old_entry,
@@ -2173,11 +2297,12 @@ RetryLookup:;
entry = tmp_entry;
*out_entry = entry;
}
+
/*
* Handle submaps.
*/
- if (entry->is_sub_map) {
+ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
vm_map_t old_map = map;
*var_map = map = entry->object.sub_map;
@@ -2205,7 +2330,7 @@ RetryLookup:;
* If we don't already have a VM object, track it down.
*/
- su = !entry->is_a_map;
+ su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
if (su) {
share_map = map;
share_offset = vaddr;
@@ -2237,7 +2362,7 @@ RetryLookup:;
* If the entry was copy-on-write, we either ...
*/
- if (entry->needs_copy) {
+ if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
/*
* If we want to write the page, we may as well handle that
* now since we've got the sharing map locked.
@@ -2264,7 +2389,7 @@ RetryLookup:;
&entry->offset,
OFF_TO_IDX(entry->end - entry->start));
- entry->needs_copy = FALSE;
+ entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
lock_write_to_read(&share_map->lock);
} else {
@@ -2333,7 +2458,7 @@ vm_map_lookup_done(map, entry)
* If this entry references a map, unlock it first.
*/
- if (entry->is_a_map)
+ if (entry->eflags & MAP_ENTRY_IS_A_MAP)
vm_map_unlock_read(entry->object.share_map);
/*
@@ -2343,62 +2468,6 @@ vm_map_lookup_done(map, entry)
vm_map_unlock_read(map);
}
-/*
- * Routine: vm_map_simplify
- * Purpose:
- * Attempt to simplify the map representation in
- * the vicinity of the given starting address.
- * Note:
- * This routine is intended primarily to keep the
- * kernel maps more compact -- they generally don't
- * benefit from the "expand a map entry" technology
- * at allocation time because the adjacent entry
- * is often wired down.
- */
-void
-vm_map_simplify(map, start)
- vm_map_t map;
- vm_offset_t start;
-{
- vm_map_entry_t this_entry;
- vm_map_entry_t prev_entry;
-
- vm_map_lock(map);
- if ((vm_map_lookup_entry(map, start, &this_entry)) &&
- ((prev_entry = this_entry->prev) != &map->header) &&
- (prev_entry->end == start) &&
- (prev_entry->object.vm_object == this_entry->object.vm_object) &&
- ((prev_entry->offset + (prev_entry->end - prev_entry->start))
- == this_entry->offset) &&
-
- (map->is_main_map) &&
-
- (prev_entry->is_a_map == FALSE) &&
- (prev_entry->is_sub_map == FALSE) &&
-
- (this_entry->is_a_map == FALSE) &&
- (this_entry->is_sub_map == FALSE) &&
-
- (prev_entry->inheritance == this_entry->inheritance) &&
- (prev_entry->protection == this_entry->protection) &&
- (prev_entry->max_protection == this_entry->max_protection) &&
- (prev_entry->wired_count == this_entry->wired_count) &&
-
- (prev_entry->copy_on_write == this_entry->copy_on_write) &&
- (prev_entry->needs_copy == this_entry->needs_copy)) {
- if (map->first_free == this_entry)
- map->first_free = prev_entry;
- if (map->hint == this_entry)
- SAVE_HINT(map, prev_entry);
- vm_map_entry_unlink(map, this_entry);
- prev_entry->end = this_entry->end;
- if (this_entry->object.vm_object)
- vm_object_deallocate(this_entry->object.vm_object);
- vm_map_entry_dispose(map, this_entry);
- }
- vm_map_unlock(map);
-}
-
#include "opt_ddb.h"
#ifdef DDB
#include <sys/kernel.h>
@@ -2440,12 +2509,12 @@ DB_SHOW_COMMAND(map, vm_map_print)
if (entry->wired_count != 0)
db_printf("wired, ");
}
- if (entry->is_a_map || entry->is_sub_map) {
+ if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
db_printf("share=0x%x, offset=0x%x\n",
(int) entry->object.share_map,
(int) entry->offset);
if ((entry->prev == &map->header) ||
- (!entry->prev->is_a_map) ||
+ ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
(entry->prev->object.share_map !=
entry->object.share_map)) {
db_indent += 2;
@@ -2457,13 +2526,13 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_printf("object=0x%x, offset=0x%x",
(int) entry->object.vm_object,
(int) entry->offset);
- if (entry->copy_on_write)
+ if (entry->eflags & MAP_ENTRY_COW)
db_printf(", copy (%s)",
- entry->needs_copy ? "needed" : "done");
+ (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
db_printf("\n");
if ((entry->prev == &map->header) ||
- (entry->prev->is_a_map) ||
+ (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
(entry->prev->object.vm_object !=
entry->object.vm_object)) {
db_indent += 2;
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index e0fde407aa8c4..5c745e7c4459a 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.15 1996/07/30 03:08:11 dyson Exp $
+ * $Id: vm_map.h,v 1.15.2.1 1996/12/15 09:57:14 davidg Exp $
*/
/*
@@ -104,11 +104,7 @@ struct vm_map_entry {
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_ooffset_t offset; /* offset into object */
- u_char is_a_map:1, /* Is "object" a map? */
- is_sub_map:1, /* Is "object" a submap? */
- copy_on_write:1, /* is data copy-on-write */
- needs_copy:1, /* does object need to be copied */
- nofault:1; /* should never fault */
+ u_char eflags; /* map entry flags */
/* Only in task maps: */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
@@ -116,6 +112,13 @@ struct vm_map_entry {
int wired_count; /* can be paged if = 0 */
};
+#define MAP_ENTRY_IS_A_MAP 0x1
+#define MAP_ENTRY_IS_SUB_MAP 0x2
+#define MAP_ENTRY_COW 0x4
+#define MAP_ENTRY_NEEDS_COPY 0x8
+#define MAP_ENTRY_NOFAULT 0x10
+#define MAP_ENTRY_USER_WIRED 0x20
+
/*
* Maps are doubly-linked lists of map entries, kept sorted
* by address. A single hint is provided to start
@@ -210,6 +213,13 @@ typedef struct {
#define MAP_COPY_ON_WRITE 0x2
#define MAP_NOFAULT 0x4
+/*
+ * vm_fault option flags
+ */
+#define VM_FAULT_NORMAL 0
+#define VM_FAULT_CHANGE_WIRING 1
+#define VM_FAULT_USER_WIRE 2
+
#ifdef KERNEL
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
@@ -230,6 +240,7 @@ int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
+int vm_map_user_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_map_reference __P((vm_map_t));
@@ -238,6 +249,7 @@ void vm_map_simplify __P((vm_map_t, vm_offset_t));
void vm_map_startup __P((void));
int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
void vm_map_madvise __P((vm_map_t, pmap_t, vm_offset_t, vm_offset_t, int));
+void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
#endif
#endif /* _VM_MAP_ */
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index faed27f801346..54d66789c4180 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
- * $Id: vm_meter.c,v 1.15 1996/05/18 03:37:47 dyson Exp $
+ * $Id: vm_meter.c,v 1.16 1996/09/08 20:44:39 dyson Exp $
*/
#include <sys/param.h>
@@ -179,7 +179,7 @@ vmtotal SYSCTL_HANDLER_ARGS
paging = 0;
for (map = &p->p_vmspace->vm_map, entry = map->header.next;
entry != &map->header; entry = entry->next) {
- if (entry->is_a_map || entry->is_sub_map ||
+ if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) ||
entry->object.vm_object == NULL)
continue;
entry->object.vm_object->flags |= OBJ_ACTIVE;
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 16a0dfbbce1b0..6d2b9a6a65a50 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.53 1996/10/29 22:07:11 dyson Exp $
+ * $Id: vm_mmap.c,v 1.53.2.1 1996/12/22 23:21:26 joerg Exp $
*/
/*
@@ -475,6 +475,10 @@ mprotect(p, uap, retval)
addr = (vm_offset_t) uap->addr;
size = uap->len;
prot = uap->prot & VM_PROT_ALL;
+#if defined(VM_PROT_READ_IS_EXEC)
+ if (prot & VM_PROT_READ)
+ prot |= VM_PROT_EXECUTE;
+#endif
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
@@ -648,7 +652,7 @@ mincore(p, uap, retval)
/*
* ignore submaps (for now) or null objects
*/
- if (current->is_a_map || current->is_sub_map ||
+ if ((current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) ||
current->object.vm_object == NULL)
continue;
@@ -787,7 +791,7 @@ mlock(p, uap, retval)
return (error);
#endif
- error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr + size, FALSE);
+ error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, FALSE);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -825,7 +829,7 @@ munlock(p, uap, retval)
return (error);
#endif
- error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr + size, TRUE);
+ error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, TRUE);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -905,9 +909,14 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
type = OBJT_VNODE;
}
}
- object = vm_pager_allocate(type, handle, OFF_TO_IDX(objsize), prot, foff);
- if (object == NULL)
- return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
+
+ if (handle == NULL) {
+ object = NULL;
+ } else {
+ object = vm_pager_allocate(type, handle, OFF_TO_IDX(objsize), prot, foff);
+ if (object == NULL)
+ return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
+ }
/*
* Force device mappings to be shared.
@@ -922,6 +931,14 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
docow = MAP_COPY_ON_WRITE | MAP_COPY_NEEDED;
}
+#if defined(VM_PROT_READ_IS_EXEC)
+ if (prot & VM_PROT_READ)
+ prot |= VM_PROT_EXECUTE;
+
+ if (maxprot & VM_PROT_READ)
+ maxprot |= VM_PROT_EXECUTE;
+#endif
+
rv = vm_map_find(map, object, foff, addr, size, fitit,
prot, maxprot, docow);
@@ -939,7 +956,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
/*
* "Pre-fault" resident pages.
*/
- if ((type == OBJT_VNODE) && (map->pmap != NULL)) {
+ if ((type == OBJT_VNODE) && (map->pmap != NULL) && (object != NULL)) {
pmap_object_init_pt(map->pmap, *addr,
object, (vm_pindex_t) OFF_TO_IDX(foff), size, 1);
}
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 3c6ceeb437db7..133d0edcba11e 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.81 1996/09/14 11:54:57 bde Exp $
+ * $Id: vm_object.c,v 1.82 1996/09/28 03:33:26 dyson Exp $
*/
/*
@@ -681,6 +681,8 @@ vm_object_pmap_remove(object, start, end)
if (p->pindex >= start && p->pindex < end)
vm_page_protect(p, VM_PROT_NONE);
}
+ if ((start == 0) && (object->size == end))
+ object->flags &= ~OBJ_WRITEABLE;
}
/*
@@ -695,7 +697,9 @@ vm_object_madvise(object, pindex, count, advise)
int count;
int advise;
{
- vm_pindex_t end;
+ int s;
+ vm_pindex_t end, tpindex;
+ vm_object_t tobject;
vm_page_t m;
if (object == NULL)
@@ -704,34 +708,60 @@ vm_object_madvise(object, pindex, count, advise)
end = pindex + count;
for (; pindex < end; pindex += 1) {
- m = vm_page_lookup(object, pindex);
+
+relookup:
+ tobject = object;
+ tpindex = pindex;
+shadowlookup:
+ m = vm_page_lookup(tobject, tpindex);
+ if (m == NULL) {
+ if (tobject->type != OBJT_DEFAULT) {
+ continue;
+ }
+
+ tobject = tobject->backing_object;
+ if ((tobject == NULL) || (tobject->ref_count != 1)) {
+ continue;
+ }
+ tpindex += OFF_TO_IDX(tobject->backing_object_offset);
+ goto shadowlookup;
+ }
/*
* If the page is busy or not in a normal active state,
* we skip it. Things can break if we mess with pages
* in any of the below states.
*/
- if (m == NULL || m->busy || (m->flags & PG_BUSY) ||
- m->hold_count || m->wire_count ||
- m->valid != VM_PAGE_BITS_ALL)
+ if (m->hold_count || m->wire_count ||
+ m->valid != VM_PAGE_BITS_ALL) {
continue;
+ }
+
+ if (m->busy || (m->flags & PG_BUSY)) {
+ s = splvm();
+ if (m->busy || (m->flags & PG_BUSY)) {
+ m->flags |= PG_WANTED;
+ tsleep(m, PVM, "madvpw", 0);
+ }
+ splx(s);
+ goto relookup;
+ }
if (advise == MADV_WILLNEED) {
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
- } else if ((advise == MADV_DONTNEED) ||
- ((advise == MADV_FREE) &&
- ((object->type != OBJT_DEFAULT) &&
- (object->type != OBJT_SWAP)))) {
+ } else if (advise == MADV_DONTNEED) {
vm_page_deactivate(m);
} else if (advise == MADV_FREE) {
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ m->dirty = 0;
/*
- * Force a demand-zero on next ref
+ * Force a demand zero if attempt to read from swap.
+ * We currently don't handle vnode files correctly,
+ * and will reread stale contents unnecessarily.
*/
if (object->type == OBJT_SWAP)
- swap_pager_dmzspace(object, m->pindex, 1);
- vm_page_protect(m, VM_PROT_NONE);
- vm_page_free(m);
+ swap_pager_dmzspace(tobject, m->pindex, 1);
}
}
}
@@ -853,6 +883,7 @@ vm_object_qcollapse(object)
swap_pager_freespace(backing_object,
backing_object_paging_offset_index + p->pindex, 1);
vm_page_rename(p, object, new_pindex);
+ vm_page_protect(p, VM_PROT_NONE);
p->dirty = VM_PAGE_BITS_ALL;
}
}
@@ -968,7 +999,9 @@ vm_object_collapse(object)
PAGE_WAKEUP(p);
vm_page_free(p);
} else {
+ vm_page_protect(p, VM_PROT_NONE);
vm_page_rename(p, object, new_pindex);
+ p->dirty = VM_PAGE_BITS_ALL;
}
}
}
@@ -1299,13 +1332,18 @@ vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
* pages not mapped to prev_entry may be in use anyway)
*/
- if (prev_object->ref_count > 1 ||
- prev_object->backing_object != NULL) {
+ if (prev_object->backing_object != NULL) {
return (FALSE);
}
prev_size >>= PAGE_SHIFT;
next_size >>= PAGE_SHIFT;
+
+ if ((prev_object->ref_count > 1) &&
+ (prev_object->size != prev_pindex + prev_size)) {
+ return (FALSE);
+ }
+
/*
* Remove any pages that may still be in the object from a previous
* deallocation.
@@ -1360,7 +1398,7 @@ _vm_object_in_map(map, object, entry)
}
tmpe = tmpe->next;
}
- } else if (entry->is_sub_map || entry->is_a_map) {
+ } else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
tmpm = entry->object.share_map;
tmpe = tmpm->header.next;
entcount = tmpm->nentries;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index d768359259bb4..0c4a001390cd0 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.69.2.1 1996/11/09 21:16:08 phk Exp $
+ * $Id: vm_page.c,v 1.69.2.2 1996/11/12 09:10:16 phk Exp $
*/
/*
@@ -98,16 +98,16 @@ static struct pglist *vm_page_buckets; /* Array of buckets */
static int vm_page_bucket_count; /* How big is array? */
static int vm_page_hash_mask; /* Mask for hash function */
-struct pglist vm_page_queue_free[PQ_L2_SIZE];
-struct pglist vm_page_queue_zero[PQ_L2_SIZE];
-struct pglist vm_page_queue_active;
-struct pglist vm_page_queue_inactive;
-struct pglist vm_page_queue_cache[PQ_L2_SIZE];
+struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0};
+struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0};
+struct pglist vm_page_queue_active = {0};
+struct pglist vm_page_queue_inactive = {0};
+struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {0};
-int no_queue;
+int no_queue=0;
-struct vpgqueues vm_page_queues[PQ_COUNT];
-int pqcnt[PQ_COUNT];
+struct vpgqueues vm_page_queues[PQ_COUNT] = {0};
+int pqcnt[PQ_COUNT] = {0};
static void
vm_page_queue_init(void) {
@@ -142,13 +142,13 @@ vm_page_queue_init(void) {
}
}
-vm_page_t vm_page_array;
-int vm_page_array_size;
-long first_page;
+vm_page_t vm_page_array = 0;
+int vm_page_array_size = 0;
+long first_page = 0;
static long last_page;
static vm_size_t page_mask;
static int page_shift;
-int vm_page_zero_count;
+int vm_page_zero_count = 0;
/*
* map of contiguous valid DEV_BSIZE chunks in a page
@@ -734,7 +734,7 @@ vm_page_alloc(object, pindex, page_req)
{
register vm_page_t m;
struct vpgqueues *pq;
- int queue;
+ int queue, qtype;
int s;
#ifdef DIAGNOSTIC
@@ -835,15 +835,16 @@ vm_page_alloc(object, pindex, page_req)
}
queue = m->queue;
- if (queue == PQ_ZERO)
+ qtype = queue - m->pc;
+ if (qtype == PQ_ZERO)
--vm_page_zero_count;
pq = &vm_page_queues[queue];
TAILQ_REMOVE(pq->pl, m, pageq);
--(*pq->cnt);
--(*pq->lcnt);
- if ((m->queue - m->pc) == PQ_ZERO) {
+ if (qtype == PQ_ZERO) {
m->flags = PG_ZERO|PG_BUSY;
- } else if ((m->queue - m->pc) == PQ_CACHE) {
+ } else if (qtype == PQ_CACHE) {
vm_page_remove(m);
m->flags = PG_BUSY;
} else {
@@ -874,6 +875,26 @@ vm_page_alloc(object, pindex, page_req)
return (m);
}
+void
+vm_wait()
+{
+ int s;
+
+ s = splvm();
+ if (curproc == pageproc) {
+ vm_pageout_pages_needed = 1;
+ tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
+ } else {
+ if (!vm_pages_needed) {
+ vm_pages_needed++;
+ wakeup(&vm_pages_needed);
+ }
+ tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
+ }
+ splx(s);
+}
+
+
/*
* vm_page_activate:
*
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index c6888bf1c50b9..9a0b87f9680c5 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.86 1996/09/28 03:33:40 dyson Exp $
+ * $Id: vm_pageout.c,v 1.86.2.1 1997/02/13 08:17:32 bde Exp $
*/
/*
@@ -221,6 +221,7 @@ vm_pageout_clean(m, sync)
if (!sync && object->backing_object) {
vm_object_collapse(object);
}
+
mc[vm_pageout_page_count] = m;
pageout_count = 1;
page_base = vm_pageout_page_count;
@@ -517,7 +518,7 @@ vm_pageout_map_deactivate_pages(map, desired)
*/
tmpe = map->header.next;
while (tmpe != &map->header) {
- if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) {
+ if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
obj = tmpe->object.vm_object;
if ((obj != NULL) && (obj->shadow_count <= 1) &&
((bigobj == NULL) ||
@@ -539,7 +540,7 @@ vm_pageout_map_deactivate_pages(map, desired)
while (tmpe != &map->header) {
if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
break;
- if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) {
+ if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
obj = tmpe->object.vm_object;
if (obj)
vm_pageout_object_deactivate_pages(map, obj, desired, 0);
@@ -810,10 +811,12 @@ rescan0:
if (vm_pageout_algorithm_lru ||
(m->object->ref_count == 0) || (m->act_count == 0)) {
--page_shortage;
- vm_page_protect(m, VM_PROT_NONE);
- if ((m->dirty == 0) &&
- (m->object->ref_count == 0)) {
- vm_page_cache(m);
+ if (m->object->ref_count == 0) {
+ vm_page_protect(m, VM_PROT_NONE);
+ if (m->dirty == 0)
+ vm_page_cache(m);
+ else
+ vm_page_deactivate(m);
} else {
vm_page_deactivate(m);
}
@@ -1013,6 +1016,15 @@ vm_pageout()
}
}
+void
+pagedaemon_wakeup()
+{
+ if (!vm_pages_needed && curproc != pageproc) {
+ vm_pages_needed++;
+ wakeup(&vm_pages_needed);
+ }
+}
+
#if !defined(NO_SWAPPING)
static void
vm_req_vmdaemon()
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index f17720b778d11..469482910baa7 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.h,v 1.16 1995/11/20 12:19:22 phk Exp $
+ * $Id: vm_pageout.h,v 1.17 1995/11/21 12:55:26 bde Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@@ -91,38 +91,9 @@ extern int vm_pageout_pages_needed;
* Signal pageout-daemon and wait for it.
*/
-static void pagedaemon_wakeup __P((void));
-static inline void
-pagedaemon_wakeup()
-{
- if (!vm_pages_needed && curproc != pageproc) {
- vm_pages_needed++;
- wakeup(&vm_pages_needed);
- }
-}
-
+extern void pagedaemon_wakeup __P((void));
#define VM_WAIT vm_wait()
-
-static void vm_wait __P((void));
-static inline void
-vm_wait()
-{
- int s;
-
- s = splhigh();
- if (curproc == pageproc) {
- vm_pageout_pages_needed = 1;
- tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
- } else {
- if (!vm_pages_needed) {
- vm_pages_needed++;
- wakeup(&vm_pages_needed);
- }
- tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
- }
- splx(s);
-}
-
+extern void vm_wait __P((void));
#ifdef KERNEL
void vm_pageout_page __P((vm_page_t, vm_object_t));
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 7581086d71830..3badd6ee8e3b4 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.64 1996/09/10 05:28:23 dyson Exp $
+ * $Id: vnode_pager.c,v 1.65 1996/10/17 02:49:35 dyson Exp $
*/
/*
@@ -148,6 +148,8 @@ vnode_pager_alloc(handle, size, prot, offset)
else
object->flags = 0;
+ if (vp->v_usecount == 0)
+ panic("vnode_pager_alloc: no vnode reference");
/*
* Hold a reference to the vnode and initialize object data.
*/