summaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/device_pager.c36
-rw-r--r--sys/vm/swap_pager.c50
-rw-r--r--sys/vm/vm_extern.h6
-rw-r--r--sys/vm/vm_fault.c20
-rw-r--r--sys/vm/vm_glue.c20
-rw-r--r--sys/vm/vm_map.c217
-rw-r--r--sys/vm/vm_map.h9
-rw-r--r--sys/vm/vm_meter.c104
-rw-r--r--sys/vm/vm_mmap.c20
-rw-r--r--sys/vm/vm_object.c115
-rw-r--r--sys/vm/vm_page.c203
-rw-r--r--sys/vm/vm_page.h11
-rw-r--r--sys/vm/vm_pageout.c24
-rw-r--r--sys/vm/vm_pager.c7
-rw-r--r--sys/vm/vm_swap.c11
-rw-r--r--sys/vm/vm_zone.c21
-rw-r--r--sys/vm/vm_zone.h12
-rw-r--r--sys/vm/vnode_pager.c4
18 files changed, 269 insertions, 621 deletions
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index a200b9c2a8db..3783be436893 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
- * $Id: device_pager.c,v 1.36 1998/12/07 21:58:50 archie Exp $
+ * $Id: device_pager.c,v 1.31 1998/07/15 02:32:35 bde Exp $
*/
#include <sys/param.h>
@@ -50,7 +50,6 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
-#include <vm/vm_zone.h>
static void dev_pager_init __P((void));
static vm_object_t dev_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
@@ -65,8 +64,8 @@ static boolean_t dev_pager_haspage __P((vm_object_t, vm_pindex_t, int *,
/* list of device pager objects */
static struct pagerlst dev_pager_object_list;
-static vm_zone_t fakepg_zone;
-static struct vm_zone fakepg_zone_store;
+/* list of available vm_page_t's */
+static TAILQ_HEAD(, vm_page) dev_pager_fakelist;
static vm_page_t dev_pager_getfake __P((vm_offset_t));
static void dev_pager_putfake __P((vm_page_t));
@@ -87,8 +86,7 @@ static void
dev_pager_init()
{
TAILQ_INIT(&dev_pager_object_list);
- fakepg_zone = &fakepg_zone_store;
- zinitna(fakepg_zone, NULL, "DP fakepg", sizeof(struct vm_page), 0, 0, 2);
+ TAILQ_INIT(&dev_pager_fakelist);
}
static vm_object_t
@@ -97,8 +95,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t fo
dev_t dev;
d_mmap_t *mapfunc;
vm_object_t object;
- unsigned int npages;
- vm_offset_t off;
+ unsigned int npages, off;
/*
* Make sure this device can be mapped.
@@ -207,8 +204,11 @@ dev_pager_getpages(object, m, count, reqpage)
if (mapfunc == NULL || mapfunc == (d_mmap_t *)nullop)
panic("dev_pager_getpage: no map function");
- paddr = pmap_phys_address((*mapfunc) ((dev_t) dev, (vm_offset_t) offset << PAGE_SHIFT, prot));
- KASSERT(paddr != -1,("dev_pager_getpage: map function returns error"));
+ paddr = pmap_phys_address((*mapfunc) ((dev_t) dev, (int) offset << PAGE_SHIFT, prot));
+#ifdef DIAGNOSTIC
+ if (paddr == -1)
+ panic("dev_pager_getpage: map function returns error");
+#endif
/*
* Replace the passed in reqpage page with our own fake page and free up the
* all of the original pages.
@@ -255,15 +255,23 @@ dev_pager_getfake(paddr)
vm_offset_t paddr;
{
vm_page_t m;
-
- m = zalloc(fakepg_zone);
+ int i;
+
+ if (TAILQ_FIRST(&dev_pager_fakelist) == NULL) {
+ m = (vm_page_t) malloc(PAGE_SIZE * 2, M_VMPGDATA, M_WAITOK);
+ for (i = (PAGE_SIZE * 2) / sizeof(*m); i > 0; i--) {
+ TAILQ_INSERT_TAIL(&dev_pager_fakelist, m, pageq);
+ m++;
+ }
+ }
+ m = TAILQ_FIRST(&dev_pager_fakelist);
+ TAILQ_REMOVE(&dev_pager_fakelist, m, pageq);
m->flags = PG_BUSY | PG_FICTITIOUS;
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
m->busy = 0;
m->queue = PQ_NONE;
- m->object = NULL;
m->wire_count = 1;
m->hold_count = 0;
@@ -278,5 +286,5 @@ dev_pager_putfake(m)
{
if (!(m->flags & PG_FICTITIOUS))
panic("dev_pager_putfake: bad page");
- zfree(fakepg_zone, m);
+ TAILQ_INSERT_TAIL(&dev_pager_fakelist, m, pageq);
}
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 16911684c998..6c58e0fff287 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.106 1999/01/08 17:31:23 eivind Exp $
+ * $Id: swap_pager.c,v 1.101 1998/09/04 08:06:56 dfr Exp $
*/
/*
@@ -81,6 +81,7 @@
static int nswiodone;
int swap_pager_full;
extern int vm_swap_size;
+static int suggest_more_swap = 0;
static int no_swap_space = 1;
static int max_pageout_cluster;
struct rlisthdr swaplist;
@@ -397,6 +398,11 @@ swap_pager_getswapspace(object, amount, rtval)
unsigned location;
vm_swap_size -= amount;
+ if (!suggest_more_swap && (vm_swap_size < btodb(cnt.v_page_count * PAGE_SIZE))) {
+ printf("swap_pager: suggest more swap space: %d MB\n",
+ (2 * cnt.v_page_count * (PAGE_SIZE / 1024)) / 1000);
+ suggest_more_swap = 1;
+ }
if (!rlist_alloc(&swaplist, amount, &location)) {
vm_swap_size += amount;
@@ -1122,6 +1128,22 @@ swap_pager_getpages(object, m, count, reqpage)
}
m[reqpage]->object->last_read = m[count-1]->pindex;
+
+ /*
+ * If we're out of swap space, then attempt to free
+ * some whenever multiple pages are brought in. We
+ * must set the dirty bits so that the page contents
+ * will be preserved.
+ */
+ if (SWAPLOW ||
+ (vm_swap_size < btodb((cnt.v_page_count - cnt.v_wire_count)) * PAGE_SIZE)) {
+ for (i = 0; i < count; i++) {
+ m[i]->dirty = VM_PAGE_BITS_ALL;
+ }
+ swap_pager_freespace(object,
+ m[0]->pindex + paging_offset, count);
+ }
+
} else {
swap_pager_ridpages(m, count, reqpage);
}
@@ -1297,7 +1319,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
swb[i]->swb_locked--;
}
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
for (i = firstidx; i < lastidx; i++) {
if (reqaddr[i] == SWB_EMPTY) {
printf("I/O to empty block???? -- pindex: %d, i: %d\n",
@@ -1348,9 +1370,11 @@ swap_pager_putpages(object, m, count, sync, rtvals)
}
spc = TAILQ_FIRST(&swap_pager_free);
- KASSERT(spc != NULL,
- ("swap_pager_putpages: free queue is empty, %d expected\n",
- swap_pager_free_count));
+#if defined(DIAGNOSTIC)
+ if (spc == NULL)
+ panic("swap_pager_putpages: free queue is empty, %d expected\n",
+ swap_pager_free_count);
+#endif
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
swap_pager_free_count--;
@@ -1358,7 +1382,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
bp = spc->spc_bp;
bzero(bp, sizeof *bp);
bp->b_spc = spc;
- bp->b_xflags = 0;
+ bp->b_vnbufs.le_next = NOLIST;
bp->b_data = (caddr_t) kva;
} else {
spc = NULL;
@@ -1511,14 +1535,12 @@ swap_pager_putpages(object, m, count, sync, rtvals)
}
}
- if (spc != NULL) {
- if (bp->b_rcred != NOCRED)
- crfree(bp->b_rcred);
- if (bp->b_wcred != NOCRED)
- crfree(bp->b_wcred);
- spc_free(spc);
- } else
- relpbuf(bp);
+ if (bp->b_rcred != NOCRED)
+ crfree(bp->b_rcred);
+ if (bp->b_wcred != NOCRED)
+ crfree(bp->b_wcred);
+
+ spc_free(spc);
if (swap_pager_free_pending)
swap_pager_sync();
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index ca5a53e9f186..34deba7d0f78 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
- * $Id: vm_extern.h,v 1.38 1998/06/07 17:13:09 dfr Exp $
+ * $Id: vm_extern.h,v 1.37 1998/01/22 17:30:32 dyson Exp $
*/
#ifndef _VM_EXTERN_H_
@@ -61,11 +61,7 @@ int swapon __P((struct proc *, void *, int *));
#endif
void faultin __P((struct proc *p));
-#ifndef VM_STACK
int grow __P((struct proc *, size_t));
-#else
-int grow_stack __P((struct proc *, size_t));
-#endif
int kernacc __P((caddr_t, int, int));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index e3d64f92108d..898ba8c9166e 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.92 1999/01/08 17:31:24 eivind Exp $
+ * $Id: vm_fault.c,v 1.87 1998/08/24 08:39:37 dfr Exp $
*/
/*
@@ -183,6 +183,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
vm_page_t marray[VM_FAULT_READ];
int hardfault;
int faultcount;
+ struct proc *p = curproc; /* XXX */
struct faultstate fs;
cnt.v_vm_faults++; /* needs lock XXX */
@@ -275,7 +276,7 @@ RetryFault:;
fs.m = vm_page_lookup(fs.object, fs.pindex);
if (fs.m != NULL) {
- int queue, s;
+ int queue;
/*
* If the page is being brought in, wait for it and
* then retry.
@@ -283,6 +284,8 @@ RetryFault:;
if ((fs.m->flags & PG_BUSY) ||
(fs.m->busy &&
(fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
+ int s;
+
unlock_things(&fs);
s = splvm();
if ((fs.m->flags & PG_BUSY) ||
@@ -298,9 +301,7 @@ RetryFault:;
}
queue = fs.m->queue;
- s = splvm();
vm_page_unqueue_nowakeup(fs.m);
- splx(s);
/*
* Mark page busy for other processes, and the pagedaemon.
@@ -527,8 +528,11 @@ readrest:
vm_object_pip_add(fs.object, 1);
}
}
- KASSERT((fs.m->flags & PG_BUSY) != 0,
- ("vm_fault: not busy after main loop"));
+
+#if defined(DIAGNOSTIC)
+ if ((fs.m->flags & PG_BUSY) == 0)
+ panic("vm_fault: not busy after main loop");
+#endif
/*
* PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
@@ -735,7 +739,7 @@ readrest:
if (wired)
vm_page_wire(fs.m);
else
- vm_page_unwire(fs.m, 1);
+ vm_page_unwire(fs.m);
} else {
vm_page_activate(fs.m);
}
@@ -867,7 +871,7 @@ vm_fault_unwire(map, start, end)
pa = pmap_extract(pmap, va);
if (pa != (vm_offset_t) 0) {
pmap_change_wiring(pmap, va, FALSE);
- vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
+ vm_page_unwire(PHYS_TO_VM_PAGE(pa));
}
}
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index ec844dbba246..e73862d9fa9c 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.79 1998/12/19 08:23:31 julian Exp $
+ * $Id: vm_glue.c,v 1.76 1998/09/29 17:33:59 abial Exp $
*/
#include "opt_rlimit.h"
@@ -229,7 +229,6 @@ vm_fork(p1, p2, flags)
up = p2->p_addr;
-#ifndef COMPAT_LINUX_THREADS
/*
* p_stats and p_sigacts currently point at fields in the user struct
* but not at &u, instead at p_addr. Copy p_sigacts and parts of
@@ -238,23 +237,6 @@ vm_fork(p1, p2, flags)
p2->p_stats = &up->u_stats;
p2->p_sigacts = &up->u_sigacts;
up->u_sigacts = *p1->p_sigacts;
-#else
- /*
- * p_stats currently points at fields in the user struct
- * but not at &u, instead at p_addr. Copy parts of
- * p_stats; zero the rest of p_stats (statistics).
- *
- * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
- * to share sigacts, so we use the up->u_sigacts.
- */
- p2->p_stats = &up->u_stats;
- if (p2->p_sigacts == NULL) {
- if (p2->p_procsig->ps_refcnt != 1)
- printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
- p2->p_sigacts = &up->u_sigacts;
- up->u_sigacts = *p1->p_sigacts;
- }
-#endif /* COMPAT_LINUX_THREADS */
bzero(&up->u_stats.pstat_startzero,
(unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
(caddr_t) &up->u_stats.pstat_startzero));
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 829548a2250d..7dbad01728f5 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.138 1998/10/25 17:44:58 phk Exp $
+ * $Id: vm_map.c,v 1.136 1998/10/01 20:46:41 jdp Exp $
*/
/*
@@ -75,9 +75,6 @@
#include <sys/vmmeter.h>
#include <sys/mman.h>
#include <sys/vnode.h>
-#ifdef VM_STACK
-#include <sys/resourcevar.h>
-#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -95,6 +92,8 @@
#include <vm/swap_pager.h>
#include <vm/vm_zone.h>
+static MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
+
/*
* Virtual memory maps provide for the mapping, protection,
* and sharing of virtual memory objects. In addition,
@@ -541,10 +540,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
new_entry->offset = offset;
-#ifdef VM_STACK
- new_entry->avail_ssize = 0;
-#endif
-
if (object) {
if ((object->ref_count > 1) || (object->shadow_count != 0)) {
vm_object_clear_flag(object, OBJ_ONEMAPPING);
@@ -577,204 +572,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
return (KERN_SUCCESS);
}
-#ifdef VM_STACK
-int
-vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
- vm_prot_t prot, vm_prot_t max, int cow)
-{
- vm_map_entry_t prev_entry;
- vm_map_entry_t new_stack_entry;
- vm_size_t init_ssize;
- int rv;
-
- if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
- return (KERN_NO_SPACE);
-
- if (max_ssize < SGROWSIZ)
- init_ssize = max_ssize;
- else
- init_ssize = SGROWSIZ;
-
- vm_map_lock(map);
-
- /* If addr is already mapped, no go */
- if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
- vm_map_unlock(map);
- return (KERN_NO_SPACE);
- }
-
- /* If we can't accomodate max_ssize in the current mapping,
- * no go. However, we need to be aware that subsequent user
- * mappings might map into the space we have reserved for
- * stack, and currently this space is not protected.
- *
- * Hopefully we will at least detect this condition
- * when we try to grow the stack.
- */
- if ((prev_entry->next != &map->header) &&
- (prev_entry->next->start < addrbos + max_ssize)) {
- vm_map_unlock(map);
- return (KERN_NO_SPACE);
- }
-
- /* We initially map a stack of only init_ssize. We will
- * grow as needed later. Since this is to be a grow
- * down stack, we map at the top of the range.
- *
- * Note: we would normally expect prot and max to be
- * VM_PROT_ALL, and cow to be 0. Possibly we should
- * eliminate these as input parameters, and just
- * pass these values here in the insert call.
- */
- rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
- addrbos + max_ssize, prot, max, cow);
-
- /* Now set the avail_ssize amount */
- if (rv == KERN_SUCCESS){
- new_stack_entry = prev_entry->next;
- if (new_stack_entry->end != addrbos + max_ssize ||
- new_stack_entry->start != addrbos + max_ssize - init_ssize)
- panic ("Bad entry start/end for new stack entry");
- else
- new_stack_entry->avail_ssize = max_ssize - init_ssize;
- }
-
- vm_map_unlock(map);
- return (rv);
-}
-
-/* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the
- * desired address is already mapped, or if we successfully grow
- * the stack. Also returns KERN_SUCCESS if addr is outside the
- * stack range (this is strange, but preserves compatibility with
- * the grow function in vm_machdep.c).
- */
-int
-vm_map_growstack (struct proc *p, vm_offset_t addr)
-{
- vm_map_entry_t prev_entry;
- vm_map_entry_t stack_entry;
- vm_map_entry_t new_stack_entry;
- struct vmspace *vm = p->p_vmspace;
- vm_map_t map = &vm->vm_map;
- vm_offset_t end;
- int grow_amount;
- int rv;
- int is_procstack = 0;
-
- vm_map_lock(map);
-
- /* If addr is already in the entry range, no need to grow.*/
- if (vm_map_lookup_entry(map, addr, &prev_entry)) {
- vm_map_unlock(map);
- return (KERN_SUCCESS);
- }
-
- if ((stack_entry = prev_entry->next) == &map->header) {
- vm_map_unlock(map);
- return (KERN_SUCCESS);
- }
- if (prev_entry == &map->header)
- end = stack_entry->start - stack_entry->avail_ssize;
- else
- end = prev_entry->end;
-
- /* This next test mimics the old grow function in vm_machdep.c.
- * It really doesn't quite make sense, but we do it anyway
- * for compatibility.
- *
- * If not growable stack, return success. This signals the
- * caller to proceed as he would normally with normal vm.
- */
- if (stack_entry->avail_ssize < 1 ||
- addr >= stack_entry->start ||
- addr < stack_entry->start - stack_entry->avail_ssize) {
- vm_map_unlock(map);
- return (KERN_SUCCESS);
- }
-
- /* Find the minimum grow amount */
- grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
- if (grow_amount > stack_entry->avail_ssize) {
- vm_map_unlock(map);
- return (KERN_NO_SPACE);
- }
-
- /* If there is no longer enough space between the entries
- * nogo, and adjust the available space. Note: this
- * should only happen if the user has mapped into the
- * stack area after the stack was created, and is
- * probably an error.
- *
- * This also effectively destroys any guard page the user
- * might have intended by limiting the stack size.
- */
- if (grow_amount > stack_entry->start - end) {
- stack_entry->avail_ssize = stack_entry->start - end;
- vm_map_unlock(map);
- return (KERN_NO_SPACE);
- }
-
- if (addr >= (vm_offset_t)vm->vm_maxsaddr)
- is_procstack = 1;
-
- /* If this is the main process stack, see if we're over the
- * stack limit.
- */
- if (is_procstack && (vm->vm_ssize + grow_amount >
- p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
- vm_map_unlock(map);
- return (KERN_NO_SPACE);
- }
-
- /* Round up the grow amount modulo SGROWSIZ */
- grow_amount = roundup (grow_amount, SGROWSIZ);
- if (grow_amount > stack_entry->avail_ssize) {
- grow_amount = stack_entry->avail_ssize;
- }
- if (is_procstack && (vm->vm_ssize + grow_amount >
- p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
- grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
- vm->vm_ssize;
- }
-
- /* Get the preliminary new entry start value */
- addr = stack_entry->start - grow_amount;
-
- /* If this puts us into the previous entry, cut back our growth
- * to the available space. Also, see the note above.
- */
- if (addr < end) {
- stack_entry->avail_ssize = stack_entry->start - end;
- addr = end;
- }
-
- rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
- stack_entry->protection,
- stack_entry->max_protection,
- 0);
-
- /* Adjust the available stack space by the amount we grew. */
- if (rv == KERN_SUCCESS) {
- new_stack_entry = prev_entry->next;
- if (new_stack_entry->end != stack_entry->start ||
- new_stack_entry->start != addr)
- panic ("Bad stack grow start/end in new stack entry");
- else {
- new_stack_entry->avail_ssize = stack_entry->avail_ssize -
- (new_stack_entry->end -
- new_stack_entry->start);
- vm->vm_ssize += new_stack_entry->end -
- new_stack_entry->start;
- }
- }
-
- vm_map_unlock(map);
- return (rv);
-
-}
-#endif
-
/*
* Find sufficient space for `length' bytes in the given map, starting at
* `start'. The map must be locked. Returns 0 on success, 1 on no space.
@@ -2957,9 +2754,9 @@ vm_freeze_copyopts(object, froma, toa)
vm_object_t object;
vm_pindex_t froma, toa;
{
- int rv;
- vm_object_t robject;
- vm_pindex_t idx;
+ int s, rv;
+ vm_object_t robject, robjectn;
+ vm_pindex_t idx, from, to;
if ((object == NULL) ||
((object->flags & OBJ_OPT) == 0))
@@ -2987,10 +2784,12 @@ vm_freeze_copyopts(object, froma, toa)
for (idx = 0; idx < robject->size; idx++) {
+m_outretry:
m_out = vm_page_grab(robject, idx,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m_out->valid == 0) {
+m_inretry:
m_in = vm_page_grab(object, bo_pindex + idx,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m_in->valid == 0) {
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 4d61a3f8efba..b7c6cd571b01 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.32 1998/01/22 17:30:38 dyson Exp $
+ * $Id: vm_map.h,v 1.31 1998/01/17 09:16:52 dyson Exp $
*/
/*
@@ -102,9 +102,6 @@ struct vm_map_entry {
struct vm_map_entry *next; /* next entry */
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
-#ifdef VM_STACK
- vm_offset_t avail_ssize; /* amt can grow if this is a stack */
-#endif
union vm_map_object object; /* object I point to */
vm_ooffset_t offset; /* offset into object */
u_char eflags; /* map entry flags */
@@ -338,10 +335,6 @@ void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_init2 __P((void));
int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
-#ifdef VM_STACK
-int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
-int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
-#endif
#endif
#endif /* _VM_MAP_ */
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 5bc74bde8e94..4879535c73de 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
- * $Id: vm_meter.c,v 1.26 1998/08/24 08:39:37 dfr Exp $
+ * $Id: vm_meter.c,v 1.25 1998/03/30 09:56:49 phk Exp $
*/
#include <sys/param.h>
@@ -42,8 +42,6 @@
#include <sys/vmmeter.h>
#include <vm/vm.h>
-#include <vm/vm_prot.h>
-#include <vm/vm_page.h>
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
#include <sys/lock.h>
@@ -217,103 +215,3 @@ vmtotal SYSCTL_HANDLER_ARGS
SYSCTL_PROC(_vm, VM_METER, vmmeter, CTLTYPE_OPAQUE|CTLFLAG_RD,
0, sizeof(struct vmtotal), vmtotal, "S,vmtotal", "");
-SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats");
-SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, "VM meter sys stats");
-SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, "VM meter vm stats");
-SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
-SYSCTL_INT(_vm_stats_sys, OID_AUTO,
- v_swtch, CTLFLAG_RD, &cnt.v_swtch, 0, "Context switches");
-SYSCTL_INT(_vm_stats_sys, OID_AUTO,
- v_trap, CTLFLAG_RD, &cnt.v_trap, 0, "Traps");
-SYSCTL_INT(_vm_stats_sys, OID_AUTO,
- v_syscall, CTLFLAG_RD, &cnt.v_syscall, 0, "Syscalls");
-SYSCTL_INT(_vm_stats_sys, OID_AUTO,
- v_intr, CTLFLAG_RD, &cnt.v_intr, 0, "HW intr");
-SYSCTL_INT(_vm_stats_sys, OID_AUTO,
- v_soft, CTLFLAG_RD, &cnt.v_soft, 0, "SW intr");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_vm_faults, CTLFLAG_RD, &cnt.v_vm_faults, 0, "VM faults");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_cow_faults, CTLFLAG_RD, &cnt.v_cow_faults, 0, "COW faults");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_cow_optim, CTLFLAG_RD, &cnt.v_cow_optim, 0, "Optimized COW faults");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_zfod, CTLFLAG_RD, &cnt.v_zfod, 0, "Zero fill");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_ozfod, CTLFLAG_RD, &cnt.v_ozfod, 0, "Optimized zero fill");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_swapin, CTLFLAG_RD, &cnt.v_swapin, 0, "Swapin operations");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_swapout, CTLFLAG_RD, &cnt.v_swapout, 0, "Swapout operations");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_swappgsin, CTLFLAG_RD, &cnt.v_swappgsin, 0, "Swapin pages");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_swappgsout, CTLFLAG_RD, &cnt.v_swappgsout, 0, "Swapout pages");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_vnodein, CTLFLAG_RD, &cnt.v_vnodein, 0, "Vnodein operations");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_vnodeout, CTLFLAG_RD, &cnt.v_vnodeout, 0, "Vnodeout operations");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_vnodepgsin, CTLFLAG_RD, &cnt.v_vnodepgsin, 0, "Vnodein pages");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_vnodepgsout, CTLFLAG_RD, &cnt.v_vnodepgsout, 0, "Vnodeout pages");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_intrans, CTLFLAG_RD, &cnt.v_intrans, 0, "In transit page blocking");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_reactivated, CTLFLAG_RD, &cnt.v_reactivated, 0, "Reactivated pages");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_pdwakeups, CTLFLAG_RD, &cnt.v_pdwakeups, 0, "Pagedaemon wakeups");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_pdpages, CTLFLAG_RD, &cnt.v_pdpages, 0, "Pagedaemon page scans");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_dfree, CTLFLAG_RD, &cnt.v_dfree, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_pfree, CTLFLAG_RD, &cnt.v_pfree, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_tfree, CTLFLAG_RD, &cnt.v_tfree, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_page_size, CTLFLAG_RD, &cnt.v_page_size, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_page_count, CTLFLAG_RD, &cnt.v_page_count, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_free_reserved, CTLFLAG_RD, &cnt.v_free_reserved, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_free_target, CTLFLAG_RD, &cnt.v_free_target, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_free_min, CTLFLAG_RD, &cnt.v_free_min, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_free_count, CTLFLAG_RD, &cnt.v_free_count, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_wire_count, CTLFLAG_RD, &cnt.v_wire_count, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_active_count, CTLFLAG_RD, &cnt.v_active_count, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_inactive_target, CTLFLAG_RD, &cnt.v_inactive_target, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_inactive_count, CTLFLAG_RD, &cnt.v_inactive_count, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_cache_count, CTLFLAG_RD, &cnt.v_cache_count, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_cache_min, CTLFLAG_RD, &cnt.v_cache_min, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_cache_max, CTLFLAG_RD, &cnt.v_cache_max, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_pageout_free_min, CTLFLAG_RD, &cnt.v_pageout_free_min, 0, "");
-SYSCTL_INT(_vm_stats_vm, OID_AUTO,
- v_interrupt_free_min, CTLFLAG_RD, &cnt.v_interrupt_free_min, 0, "");
-SYSCTL_INT(_vm_stats_misc, OID_AUTO,
- zero_page_count, CTLFLAG_RD, &vm_page_zero_count, 0, "");
-#if 0
-SYSCTL_INT(_vm_stats_misc, OID_AUTO,
- page_mask, CTLFLAG_RD, &page_mask, 0, "");
-SYSCTL_INT(_vm_stats_misc, OID_AUTO,
- page_shift, CTLFLAG_RD, &page_shift, 0, "");
-SYSCTL_INT(_vm_stats_misc, OID_AUTO,
- first_page, CTLFLAG_RD, &first_page, 0, "");
-SYSCTL_INT(_vm_stats_misc, OID_AUTO,
- last_page, CTLFLAG_RD, &last_page, 0, "");
-SYSCTL_INT(_vm_stats_misc, OID_AUTO,
- vm_page_bucket_count, CTLFLAG_RD, &vm_page_bucket_count, 0, "");
-SYSCTL_INT(_vm_stats_misc, OID_AUTO,
- vm_page_hash_mask, CTLFLAG_RD, &vm_page_hash_mask, 0, "");
-#endif
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index ba36e41fc632..6ea214a73526 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.85 1998/12/09 20:22:21 dt Exp $
+ * $Id: vm_mmap.c,v 1.83 1998/09/04 08:06:57 dfr Exp $
*/
/*
@@ -173,19 +173,11 @@ mmap(p, uap)
pos = uap->pos;
/* make sure mapping fits into numeric range etc */
- if ((ssize_t) uap->len < 0 ||
+ if ((pos + size > (vm_offset_t)-PAGE_SIZE) ||
+ (ssize_t) uap->len < 0 ||
((flags & MAP_ANON) && uap->fd != -1))
return (EINVAL);
-#ifdef VM_STACK
- if (flags & MAP_STACK) {
- if ((uap->fd != -1) ||
- ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
- return (EINVAL);
- flags |= MAP_ANON;
- pos = 0;
- }
-#endif
/*
* Align the file position to a page boundary,
* and save its page offset component.
@@ -1025,12 +1017,6 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
*addr = pmap_addr_hint(object, *addr, size);
}
-#ifdef VM_STACK
- if (flags & MAP_STACK)
- rv = vm_map_stack (map, *addr, size, prot,
- maxprot, docow);
- else
-#endif
rv = vm_map_find(map, object, foff, addr, size, fitit,
prot, maxprot, docow);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index a1477f222822..f419e2f51282 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.137 1999/01/08 17:31:26 eivind Exp $
+ * $Id: vm_object.c,v 1.128 1998/09/04 08:06:57 dfr Exp $
*/
/*
@@ -91,6 +91,7 @@
#include <vm/vm_zone.h>
static void vm_object_qcollapse __P((vm_object_t object));
+static void vm_object_dispose __P((vm_object_t));
/*
* Virtual memory objects maintain the actual data
@@ -119,9 +120,7 @@ static void vm_object_qcollapse __P((vm_object_t object));
*/
struct object_q vm_object_list;
-#ifndef NULL_SIMPLELOCKS
static struct simplelock vm_object_list_lock;
-#endif
static long vm_object_count; /* count of all objects */
vm_object_t kernel_object;
vm_object_t kmem_object;
@@ -242,8 +241,10 @@ vm_object_reference(object)
if (object == NULL)
return;
- KASSERT(!(object->flags & OBJ_DEAD),
- ("vm_object_reference: attempting to reference dead obj"));
+#if defined(DIAGNOSTIC)
+ if (object->flags & OBJ_DEAD)
+ panic("vm_object_reference: attempting to reference dead obj");
+#endif
object->ref_count++;
if (object->type == OBJT_VNODE) {
@@ -260,11 +261,11 @@ vm_object_vndeallocate(object)
vm_object_t object;
{
struct vnode *vp = (struct vnode *) object->handle;
-
- KASSERT(object->type == OBJT_VNODE,
- ("vm_object_vndeallocate: not a vnode object"));
- KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
+ if (object->type != OBJT_VNODE)
+ panic("vm_object_vndeallocate: not a vnode object");
+ if (vp == NULL)
+ panic("vm_object_vndeallocate: missing vp");
if (object->ref_count == 0) {
vprint("vm_object_vndeallocate", vp);
panic("vm_object_vndeallocate: bad object reference count");
@@ -294,6 +295,7 @@ void
vm_object_deallocate(object)
vm_object_t object;
{
+ int s;
vm_object_t temp;
while (object != NULL) {
@@ -326,10 +328,12 @@ vm_object_deallocate(object)
vm_object_t robject;
robject = TAILQ_FIRST(&object->shadow_head);
- KASSERT(robject != NULL,
- ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
- object->ref_count,
- object->shadow_count));
+#if defined(DIAGNOSTIC)
+ if (robject == NULL)
+ panic("vm_object_deallocate: ref_count: %d,"
+ " shadow_count: %d",
+ object->ref_count, object->shadow_count);
+#endif
if ((robject->handle == NULL) &&
(robject->type == OBJT_DEFAULT ||
robject->type == OBJT_SWAP)) {
@@ -414,8 +418,10 @@ vm_object_terminate(object)
*/
vm_object_pip_wait(object, "objtrm");
- KASSERT(!object->paging_in_progress,
- ("vm_object_terminate: pageout in progress"));
+#if defined(DIAGNOSTIC)
+ if (object->paging_in_progress != 0)
+ panic("vm_object_terminate: pageout in progress");
+#endif
/*
* Clean and free the pages, as appropriate. All references to the
@@ -436,51 +442,58 @@ vm_object_terminate(object)
vp = (struct vnode *) object->handle;
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
+
+ /*
+ * Let the pager know object is dead.
+ */
+ vm_pager_deallocate(object);
+
}
- if (object->ref_count != 0)
- panic("vm_object_terminate: object with references, ref_count=%d", object->ref_count);
+ if ((object->type != OBJT_VNODE) && (object->ref_count == 0)) {
- /*
- * Now free any remaining pages. For internal objects, this also
- * removes them from paging queues. Don't free wired pages, just
- * remove them from the object.
- */
- s = splvm();
- while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
+ /*
+ * Now free the pages. For internal objects, this also removes them
+ * from paging queues.
+ */
+ while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
#if !defined(MAX_PERF)
- if (p->busy || (p->flags & PG_BUSY))
- printf("vm_object_terminate: freeing busy page\n");
+ if (p->busy || (p->flags & PG_BUSY))
+ printf("vm_object_terminate: freeing busy page\n");
#endif
- if (p->wire_count == 0) {
vm_page_busy(p);
vm_page_free(p);
cnt.v_pfree++;
- } else {
- vm_page_busy(p);
- vm_page_remove(p);
}
- }
- splx(s);
-
- /*
- * Let the pager know object is dead.
- */
- vm_pager_deallocate(object);
+ /*
+ * Let the pager know object is dead.
+ */
+ vm_pager_deallocate(object);
- /*
- * Remove the object from the global object list.
- */
- simple_lock(&vm_object_list_lock);
- TAILQ_REMOVE(&vm_object_list, object, object_list);
- simple_unlock(&vm_object_list_lock);
+ }
- wakeup(object);
+ if ((object->ref_count == 0) && (object->resident_page_count == 0))
+ vm_object_dispose(object);
+}
- /*
- * Free the space for the object.
- */
- zfree(obj_zone, object);
+/*
+ * vm_object_dispose
+ *
+ * Dispose the object.
+ */
+static void
+vm_object_dispose(object)
+ vm_object_t object;
+{
+ simple_lock(&vm_object_list_lock);
+ TAILQ_REMOVE(&vm_object_list, object, object_list);
+ vm_object_count--;
+ simple_unlock(&vm_object_list_lock);
+ /*
+ * Free the space for the object.
+ */
+ zfree(obj_zone, object);
+ wakeup(object);
}
/*
@@ -516,6 +529,7 @@ vm_object_page_clean(object, start, end, flags)
vm_page_t mab[vm_pageout_page_count];
vm_page_t ma[vm_pageout_page_count];
int curgeneration;
+ struct proc *pproc = curproc; /* XXX */
if (object->type != OBJT_VNODE ||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
@@ -771,6 +785,7 @@ vm_object_madvise(object, pindex, count, advise)
int count;
int advise;
{
+ int s;
vm_pindex_t end, tpindex;
vm_object_t tobject;
vm_page_t m;
@@ -1292,7 +1307,7 @@ vm_object_page_remove(object, start, end, clean_only)
{
register vm_page_t p, next;
unsigned int size;
- int all;
+ int s, all;
if (object == NULL)
return;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index c953559da668..46f192389e6f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.115 1999/01/08 17:31:27 eivind Exp $
+ * $Id: vm_page.c,v 1.106 1998/08/24 08:39:38 dfr Exp $
*/
/*
@@ -352,7 +352,6 @@ vm_page_startup(starta, enda, vaddr)
* Distributes the object/offset key pair among hash buckets.
*
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
- * This routine may not block.
*/
static __inline int
vm_page_hash(object, pindex)
@@ -365,15 +364,10 @@ vm_page_hash(object, pindex)
/*
* vm_page_insert: [ internal use only ]
*
- * Inserts the given mem entry into the object and object list.
- *
- * The pagetables are not updated but will presumably fault the page
- * in if necessary, or if a kernel page the caller will at some point
- * enter the page into the kernel's pmap. We are not allowed to block
- * here so we *can't* do this anyway.
+ * Inserts the given mem entry into the object/object-page
+ * table and object list.
*
* The object and page must be locked, and must be splhigh.
- * This routine may not block.
*/
void
@@ -384,8 +378,10 @@ vm_page_insert(m, object, pindex)
{
register struct pglist *bucket;
- if (m->object != NULL)
+#if !defined(MAX_PERF)
+ if (m->flags & PG_TABLED)
panic("vm_page_insert: already inserted");
+#endif
/*
* Record the object/offset pair in this page
@@ -407,6 +403,7 @@ vm_page_insert(m, object, pindex)
*/
TAILQ_INSERT_TAIL(&object->memq, m, listq);
+ vm_page_flag_set(m, PG_TABLED);
m->object->page_hint = m;
m->object->generation++;
@@ -431,9 +428,6 @@ vm_page_insert(m, object, pindex)
* table and the object page list.
*
* The object and page must be locked, and at splhigh.
- * This routine may not block.
- *
- * I do not think the underlying pmap entry (if any) is removed here.
*/
void
@@ -443,7 +437,7 @@ vm_page_remove(m)
register struct pglist *bucket;
vm_object_t object;
- if (m->object == NULL)
+ if (!(m->flags & PG_TABLED))
return;
#if !defined(MAX_PERF)
@@ -488,8 +482,9 @@ vm_page_remove(m)
object->resident_page_count--;
object->generation++;
-
m->object = NULL;
+
+ vm_page_flag_clear(m, PG_TABLED);
}
/*
@@ -499,7 +494,6 @@ vm_page_remove(m)
* pair specified; if none is found, NULL is returned.
*
* The object must be locked. No side effects.
- * This routine may not block.
*/
vm_page_t
@@ -510,6 +504,7 @@ vm_page_lookup(object, pindex)
register vm_page_t m;
register struct pglist *bucket;
int generation;
+ int s;
/*
* Search the hash table for this object/offset pair
@@ -542,11 +537,7 @@ retry:
* current object to the specified target object/offset.
*
* The object must be locked.
- * This routine may not block.
- *
- * Note: this routine will raise itself to splvm(), the caller need not.
*/
-
void
vm_page_rename(m, new_object, new_pindex)
register vm_page_t m;
@@ -562,14 +553,8 @@ vm_page_rename(m, new_object, new_pindex)
}
/*
- * vm_page_unqueue_nowakeup:
- *
- * vm_page_unqueue() without any wakeup
- *
- * This routine must be called at splhigh().
- * This routine may not block.
+ * vm_page_unqueue without any wakeup
*/
-
void
vm_page_unqueue_nowakeup(m)
vm_page_t m;
@@ -590,14 +575,8 @@ vm_page_unqueue_nowakeup(m)
}
/*
- * vm_page_unqueue:
- *
- * Remove a page from its queue.
- *
- * This routine must be called at splhigh().
- * This routine may not block.
+ * vm_page_unqueue must be called at splhigh();
*/
-
void
vm_page_unqueue(m)
vm_page_t m;
@@ -621,12 +600,7 @@ vm_page_unqueue(m)
}
/*
- * vm_page_list_find:
- *
- * Find a page on the specified queue with color optimization.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a page on the specified queue with color optimization.
*/
vm_page_t
vm_page_list_find(basequeue, index)
@@ -680,12 +654,7 @@ vm_page_list_find(basequeue, index)
}
/*
- * vm_page_select:
- *
- * Find a page on the specified queue with color optimization.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a page on the specified queue with color optimization.
*/
vm_page_t
vm_page_select(object, pindex, basequeue)
@@ -706,14 +675,9 @@ vm_page_select(object, pindex, basequeue)
}
/*
- * vm_page_select_cache:
- *
- * Find a page on the cache queue with color optimization. As pages
- * might be found, but not applicable, they are deactivated. This
- * keeps us from using potentially busy cached pages.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a page on the cache queue with color optimization. As pages
+ * might be found, but not applicable, they are deactivated. This
+ * keeps us from using potentially busy cached pages.
*/
vm_page_t
vm_page_select_cache(object, pindex)
@@ -741,14 +705,8 @@ vm_page_select_cache(object, pindex)
}
/*
- * vm_page_select_free:
- *
- * Find a free or zero page, with specified preference.
- *
- * This routine must be called at splvm().
- * This routine may not block.
+ * Find a free or zero page, with specified preference.
*/
-
static vm_page_t
vm_page_select_free(object, pindex, prefqueue)
vm_object_t object;
@@ -852,11 +810,6 @@ vm_page_select_free(object, pindex, prefqueue)
* VM_ALLOC_ZERO zero page
*
* Object must be locked.
- * This routine may not block.
- *
- * Additional special handling is required when called from an
- * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with
- * the page cache in this case.
*/
vm_page_t
vm_page_alloc(object, pindex, page_req)
@@ -870,8 +823,11 @@ vm_page_alloc(object, pindex, page_req)
int queue, qtype;
int s;
- KASSERT(!vm_page_lookup(object, pindex),
- ("vm_page_alloc: page already allocated"));
+#ifdef DIAGNOSTIC
+ m = vm_page_lookup(object, pindex);
+ if (m)
+ panic("vm_page_alloc: page already allocated");
+#endif
if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
page_req = VM_ALLOC_SYSTEM;
@@ -884,7 +840,10 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_NORMAL:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = vm_page_select_free(object, pindex, PQ_FREE);
- KASSERT(m != NULL, ("vm_page_alloc(NORMAL): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(NORMAL): missing page on free queue\n");
+#endif
} else {
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
@@ -903,7 +862,10 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_ZERO:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = vm_page_select_free(object, pindex, PQ_ZERO);
- KASSERT(m != NULL, ("vm_page_alloc(ZERO): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(ZERO): missing page on free queue\n");
+#endif
} else {
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
@@ -924,7 +886,10 @@ vm_page_alloc(object, pindex, page_req)
((cnt.v_cache_count == 0) &&
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
m = vm_page_select_free(object, pindex, PQ_FREE);
- KASSERT(m != NULL, ("vm_page_alloc(SYSTEM): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
+#endif
} else {
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
@@ -943,7 +908,10 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_INTERRUPT:
if (cnt.v_free_count > 0) {
m = vm_page_select_free(object, pindex, PQ_FREE);
- KASSERT(m != NULL, ("vm_page_alloc(INTERRUPT): missing page on free queue\n"));
+#if defined(DIAGNOSTIC)
+ if (m == NULL)
+ panic("vm_page_alloc(INTERRUPT): missing page on free queue\n");
+#endif
} else {
splx(s);
vm_pageout_deficit++;
@@ -986,13 +954,7 @@ vm_page_alloc(object, pindex, page_req)
m->dirty = 0;
m->queue = PQ_NONE;
- /*
- * vm_page_insert() is safe prior to the splx(). Note also that
- * inserting a page here does not insert it into the pmap (which
- * could cause us to block allocating memory). We cannot block
- * anywhere.
- */
-
+ /* XXX before splx until vm_page_insert is safe */
vm_page_insert(m, object, pindex);
/*
@@ -1022,12 +984,6 @@ vm_page_alloc(object, pindex, page_req)
return (m);
}
-/*
- * vm_wait: (also see VM_WAIT macro)
- *
- * Block until free pages are available for allocation
- */
-
void
vm_wait()
{
@@ -1047,14 +1003,9 @@ vm_wait()
splx(s);
}
-/*
- * vm_page_sleep:
- *
- * Block until page is no longer busy.
- */
-
int
vm_page_sleep(vm_page_t m, char *msg, char *busy) {
+ vm_object_t object = m->object;
int slept = 0;
if ((busy && *busy) || (m->flags & PG_BUSY)) {
int s;
@@ -1075,13 +1026,14 @@ vm_page_sleep(vm_page_t m, char *msg, char *busy) {
* Put the specified page on the active list (if appropriate).
*
* The page queues must be locked.
- * This routine may not block.
*/
void
vm_page_activate(m)
register vm_page_t m;
{
int s;
+ vm_page_t np;
+ vm_object_t object;
s = splvm();
if (m->queue != PQ_ACTIVE) {
@@ -1107,9 +1059,7 @@ vm_page_activate(m)
}
/*
- * helper routine for vm_page_free and vm_page_free_zero.
- *
- * This routine may not block.
+ * helper routine for vm_page_free and vm_page_free_zero
*/
static int
vm_page_freechk_and_unqueue(m)
@@ -1149,7 +1099,6 @@ vm_page_freechk_and_unqueue(m)
m->wire_count, m->pindex);
}
#endif
- printf("vm_page_free: freeing wired page\n");
m->wire_count = 0;
if (m->object)
m->object->wire_count--;
@@ -1176,9 +1125,7 @@ vm_page_freechk_and_unqueue(m)
}
/*
- * helper routine for vm_page_free and vm_page_free_zero.
- *
- * This routine may not block.
+ * helper routine for vm_page_free and vm_page_free_zero
*/
static __inline void
vm_page_free_wakeup()
@@ -1211,7 +1158,6 @@ vm_page_free_wakeup()
* disassociating it with any VM object.
*
* Object and page must be locked prior to entry.
- * This routine may not block.
*/
void
vm_page_free(m)
@@ -1284,7 +1230,6 @@ vm_page_free_zero(m)
* as necessary.
*
* The page queues must be locked.
- * This routine may not block.
*/
void
vm_page_wire(m)
@@ -1292,16 +1237,16 @@ vm_page_wire(m)
{
int s;
- s = splvm();
if (m->wire_count == 0) {
+ s = splvm();
vm_page_unqueue(m);
+ splx(s);
cnt.v_wire_count++;
if (m->object)
m->object->wire_count++;
}
- m->wire_count++;
- splx(s);
(*vm_page_queues[PQ_NONE].lcnt)++;
+ m->wire_count++;
vm_page_flag_set(m, PG_MAPPED);
}
@@ -1312,12 +1257,10 @@ vm_page_wire(m)
* enabling it to be paged again.
*
* The page queues must be locked.
- * This routine may not block.
*/
void
-vm_page_unwire(m, activate)
+vm_page_unwire(m)
register vm_page_t m;
- int activate;
{
int s;
@@ -1329,17 +1272,10 @@ vm_page_unwire(m, activate)
if (m->object)
m->object->wire_count--;
cnt.v_wire_count--;
- if (activate) {
- TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
- m->queue = PQ_ACTIVE;
- (*vm_page_queues[PQ_ACTIVE].lcnt)++;
- cnt.v_active_count++;
- } else {
- TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
- m->queue = PQ_INACTIVE;
- (*vm_page_queues[PQ_INACTIVE].lcnt)++;
- cnt.v_inactive_count++;
- }
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ m->queue = PQ_ACTIVE;
+ (*vm_page_queues[PQ_ACTIVE].lcnt)++;
+ cnt.v_active_count++;
}
} else {
#if !defined(MAX_PERF)
@@ -1351,9 +1287,13 @@ vm_page_unwire(m, activate)
/*
- * Move the specified page to the inactive queue.
+ * vm_page_deactivate:
+ *
+ * Returns the given page to the inactive list,
+ * indicating that no physical maps have access
+ * to this page. [Used by the physical mapping system.]
*
- * This routine may not block.
+ * The page queues must be locked.
*/
void
vm_page_deactivate(m)
@@ -1362,7 +1302,11 @@ vm_page_deactivate(m)
int s;
/*
- * Ignore if already inactive.
+ * Only move active pages -- ignore locked or already inactive ones.
+ *
+ * XXX: sometimes we get pages which aren't wired down or on any queue -
+ * we need to put them on the inactive queue also, otherwise we lose
+ * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
*/
if (m->queue == PQ_INACTIVE)
return;
@@ -1383,8 +1327,7 @@ vm_page_deactivate(m)
/*
* vm_page_cache
*
- * Put the specified page onto the page cache queue (if appropriate).
- * This routine may not block.
+ * Put the specified page onto the page cache queue (if appropriate).
*/
void
vm_page_cache(m)
@@ -1422,8 +1365,6 @@ vm_page_cache(m)
* Grab a page, waiting until we are waken up due to the page
* changing state. We keep on waiting, if the page continues
* to be in the object. If the page doesn't exist, allocate it.
- *
- * This routine may block.
*/
vm_page_t
vm_page_grab(object, pindex, allocflags)
@@ -1471,7 +1412,7 @@ retrylookup:
/*
* mapping function for valid bits or for dirty bits in
- * a page. May not block.
+ * a page
*/
__inline int
vm_page_bits(int base, int size)
@@ -1493,7 +1434,7 @@ vm_page_bits(int base, int size)
}
/*
- * set a page valid and clean. May not block.
+ * set a page valid and clean
*/
void
vm_page_set_validclean(m, base, size)
@@ -1509,7 +1450,7 @@ vm_page_set_validclean(m, base, size)
}
/*
- * set a page (partially) invalid. May not block.
+ * set a page (partially) invalid
*/
void
vm_page_set_invalid(m, base, size)
@@ -1526,7 +1467,7 @@ vm_page_set_invalid(m, base, size)
}
/*
- * is (partial) page valid? May not block.
+ * is (partial) page valid?
*/
int
vm_page_is_valid(m, base, size)
@@ -1542,10 +1483,6 @@ vm_page_is_valid(m, base, size)
return 0;
}
-/*
- * update dirty bits from pmap/mmu. May not block.
- */
-
void
vm_page_test_dirty(m)
vm_page_t m;
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 3149391d9127..32c83410f73f 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.48 1998/10/28 13:37:02 dg Exp $
+ * $Id: vm_page.h,v 1.45 1998/09/01 17:12:19 wollman Exp $
*/
/*
@@ -203,6 +203,7 @@ extern struct vpgqueues {
*/
#define PG_BUSY 0x01 /* page is in transit (O) */
#define PG_WANTED 0x02 /* someone is waiting for page (O) */
+#define PG_TABLED 0x04 /* page is in an object (O) */
#define PG_FICTITIOUS 0x08 /* physical page doesn't exist (O) */
#define PG_WRITEABLE 0x10 /* page is mapped writeable */
#define PG_MAPPED 0x20 /* page is mapped */
@@ -361,7 +362,7 @@ vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
-void vm_page_unwire __P((vm_page_t, int));
+void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
void vm_page_unqueue __P((vm_page_t));
void vm_page_unqueue_nowakeup __P((vm_page_t));
@@ -391,8 +392,12 @@ vm_page_hold(vm_page_t mem)
static __inline void
vm_page_unhold(vm_page_t mem)
{
+#ifdef DIAGNOSTIC
+ if (--mem->hold_count < 0)
+ panic("vm_page_unhold: hold count < 0!!!");
+#else
--mem->hold_count;
- KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
+#endif
}
static __inline void
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 606981f819e4..a9e9cfbe1df5 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.128 1998/10/25 17:44:59 phk Exp $
+ * $Id: vm_pageout.c,v 1.126 1998/09/04 08:06:57 dfr Exp $
*/
/*
@@ -155,19 +155,19 @@ static int vm_swap_idle_enabled=0;
#endif
SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
- CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
+ CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "");
SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
- CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
+ CTLFLAG_RW, &vm_pageout_stats_max, 0, "");
SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
- CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
+ CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "");
SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
- CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
+ CTLFLAG_RW, &vm_pageout_stats_interval, 0, "");
SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
- CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
+ CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "");
#if defined(NO_SWAPPING)
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
@@ -176,19 +176,19 @@ SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
#else
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
- CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
+ CTLFLAG_RW, &vm_swap_enabled, 0, "");
SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
- CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
+ CTLFLAG_RW, &vm_swap_idle_enabled, 0, "");
#endif
SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
- CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
+ CTLFLAG_RW, &defer_swap_pageouts, 0, "");
SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
- CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
+ CTLFLAG_RW, &disable_swap_pageouts, 0, "");
SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
- CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
+ CTLFLAG_RW, &max_page_launder, 0, "");
#define VM_PAGEOUT_PAGE_COUNT 16
@@ -1221,6 +1221,7 @@ vm_pageout()
* The pageout daemon is never done, so loop forever.
*/
while (TRUE) {
+ int inactive_target;
int error;
int s = splvm();
if (!vm_pages_needed ||
@@ -1274,6 +1275,7 @@ vm_req_vmdaemon()
static void
vm_daemon()
{
+ vm_object_t object;
struct proc *p;
while (TRUE) {
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 18df05d88e66..21267e00fb19 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pager.c,v 1.39 1998/10/31 15:31:29 peter Exp $
+ * $Id: vm_pager.c,v 1.37 1998/03/16 01:56:01 dyson Exp $
*/
/*
@@ -71,7 +71,6 @@
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/kernel.h>
#include <sys/buf.h>
#include <sys/ucred.h>
#include <sys/malloc.h>
@@ -214,7 +213,7 @@ vm_pager_bufferinit()
for (i = 0; i < nswbuf; i++, bp++) {
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
bp->b_rcred = bp->b_wcred = NOCRED;
- bp->b_xflags = 0;
+ bp->b_vnbufs.le_next = NOLIST;
}
swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
@@ -337,7 +336,7 @@ initpbuf(struct buf *bp) {
bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
bp->b_kvabase = bp->b_data;
bp->b_kvasize = MAXPHYS;
- bp->b_xflags = 0;
+ bp->b_vnbufs.le_next = NOLIST;
}
/*
diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c
index bfcebdc028c9..10488d9a2184 100644
--- a/sys/vm/vm_swap.c
+++ b/sys/vm/vm_swap.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
- * $Id: vm_swap.c,v 1.56 1998/07/04 22:30:26 julian Exp $
+ * $Id: vm_swap.c,v 1.55 1998/07/04 20:45:42 julian Exp $
*/
#include "opt_devfs.h"
@@ -203,8 +203,8 @@ swapon(p, uap)
switch (vp->v_type) {
case VBLK:
- dev = vp->v_rdev;
- if (major(dev) >= nblkdev || bdevsw[major(dev)] == NULL) {
+ dev = (dev_t) vp->v_rdev;
+ if (major(dev) >= nblkdev) {
error = ENXIO;
break;
}
@@ -264,11 +264,14 @@ swaponvp(p, vp, dev, nblks)
}
return EINVAL;
found:
+ if (dev != NODEV && (major(dev) >= nblkdev))
+ return (ENXIO);
+
error = VOP_OPEN(vp, FREAD | FWRITE, p->p_ucred, p);
if (error)
return (error);
- if (nblks == 0 && dev != NODEV && (bdevsw[major(dev)]->d_psize == 0 ||
+ if (nblks == 0 && (bdevsw[major(dev)]->d_psize == 0 ||
(nblks = (*bdevsw[major(dev)]->d_psize) (dev)) == -1)) {
(void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
return (ENXIO);
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 0d684a74d321..11a7ae079708 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
- * $Id: vm_zone.c,v 1.25 1999/01/08 17:31:29 eivind Exp $
+ * $Id: vm_zone.c,v 1.21 1998/04/25 04:50:01 dyson Exp $
*/
#include <sys/param.h>
@@ -194,7 +194,7 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
z->zitems = NULL;
for (i = 0; i < nitems; i++) {
((void **) item)[0] = z->zitems;
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
((void **) item)[1] = (void *) ZENTRY_FREE;
#endif
z->zitems = item;
@@ -357,7 +357,7 @@ _zget(vm_zone_t z)
nitems -= 1;
for (i = 0; i < nitems; i++) {
((void **) item)[0] = z->zitems;
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
((void **) item)[1] = (void *) ZENTRY_FREE;
#endif
z->zitems = item;
@@ -367,7 +367,7 @@ _zget(vm_zone_t z)
} else if (z->zfreecnt > 0) {
item = z->zitems;
z->zitems = ((void **) item)[0];
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
if (((void **) item)[1] != (void *) ZENTRY_FREE)
zerror(ZONE_ERROR_NOTFREE);
((void **) item)[1] = 0;
@@ -388,8 +388,7 @@ sysctl_vm_zone SYSCTL_HANDLER_ARGS
char tmpbuf[128];
char tmpname[14];
- snprintf(tmpbuf, sizeof(tmpbuf),
- "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
+ sprintf(tmpbuf, "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
if (error)
return (error);
@@ -414,7 +413,7 @@ sysctl_vm_zone SYSCTL_HANDLER_ARGS
tmpbuf[0] = '\n';
}
- snprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
+ sprintf(tmpbuf + offset,
"%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
tmpname, curzone->zsize, curzone->zmax,
(curzone->ztotal - curzone->zfreecnt),
@@ -432,7 +431,7 @@ sysctl_vm_zone SYSCTL_HANDLER_ARGS
return (0);
}
-#ifdef INVARIANT_SUPPORT
+#if defined(DIAGNOSTIC)
void
zerror(int error)
{
@@ -460,8 +459,8 @@ SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
NULL, 0, sysctl_vm_zone, "A", "Zone Info");
SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
- CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
+ CTLFLAG_RD, &zone_kmem_pages, 0, "");
SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
- CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
+ CTLFLAG_RD, &zone_kmem_kvaspace, 0, "");
SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
- CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");
+ CTLFLAG_RD, &zone_kern_pages, 0, "");
diff --git a/sys/vm/vm_zone.h b/sys/vm/vm_zone.h
index 8fe91d1e1aaf..55c54d37f4f0 100644
--- a/sys/vm/vm_zone.h
+++ b/sys/vm/vm_zone.h
@@ -11,10 +11,10 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
- * $Id: vm_zone.h,v 1.11 1999/01/08 17:31:30 eivind Exp $
+ * $Id: vm_zone.c,v 1.20 1998/04/15 17:47:40 bde Exp $
*/
-#ifndef _SYS_ZONE_H
+#if !defined(_SYS_ZONE_H)
#define _SYS_ZONE_H
@@ -76,7 +76,7 @@ _zalloc(vm_zone_t z)
{
void *item;
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
if (z == 0)
zerror(ZONE_ERROR_INVALID);
#endif
@@ -86,7 +86,7 @@ _zalloc(vm_zone_t z)
item = z->zitems;
z->zitems = ((void **) item)[0];
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
if (((void **) item)[1] != (void *) ZENTRY_FREE)
zerror(ZONE_ERROR_NOTFREE);
((void **) item)[1] = 0;
@@ -101,7 +101,7 @@ static __inline__ void
_zfree(vm_zone_t z, void *item)
{
((void **) item)[0] = z->zitems;
-#ifdef INVARIANTS
+#if defined(DIAGNOSTIC)
if (((void **) item)[1] == (void *) ZENTRY_FREE)
zerror(ZONE_ERROR_ALREADYFREE);
((void **) item)[1] = (void *) ZENTRY_FREE;
@@ -123,7 +123,7 @@ zalloc(vm_zone_t z)
static __inline__ void
zfree(vm_zone_t z, void *item)
{
-#ifdef SMP
+#if defined(SMP)
zfreei(z, item);
#else
_zfree(z, item);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index fba7e2fbec27..23affb7cce1d 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.100 1998/10/13 08:24:44 dg Exp $
+ * $Id: vnode_pager.c,v 1.99 1998/09/28 23:58:10 rvb Exp $
*/
/*
@@ -489,7 +489,7 @@ vnode_pager_input_old(object, m)
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_READ;
auio.uio_resid = size;
- auio.uio_procp = curproc;
+ auio.uio_procp = (struct proc *) 0;
error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred);
if (!error) {