summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/vfs_bio.c60
-rw-r--r--sys/kern/vfs_cluster.c21
2 files changed, 71 insertions, 10 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index c4f0ceaea7e0..db24553d5309 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: vfs_bio.c,v 1.62 1995/09/04 00:20:13 dyson Exp $
+ * $Id: vfs_bio.c,v 1.63 1995/09/09 18:10:14 davidg Exp $
*/
/*
@@ -485,10 +485,23 @@ brelse(struct buf * bp)
m->flags &= ~PG_WANTED;
}
vm_page_test_dirty(m);
- if ((m->dirty & m->valid) == 0 &&
+ /*
+ * if page isn't valid, no sense in keeping it around
+ */
+ if (m->valid == 0) {
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+ vm_page_free(m);
+ /*
+ * if page isn't dirty and hasn't been referenced by
+ * a process, then cache it
+ */
+ } else if ((m->dirty & m->valid) == 0 &&
(m->flags & PG_REFERENCED) == 0 &&
!pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
vm_page_cache(m);
+ /*
+ * otherwise activate it
+ */
} else if ((m->flags & PG_ACTIVE) == 0) {
vm_page_activate(m);
m->act_count = 0;
@@ -871,11 +884,50 @@ loop:
}
bp->b_flags |= B_BUSY | B_CACHE;
bremfree(bp);
+
/*
- * check for size inconsistancies
+ * check for size inconsistancies (note that they shouldn't happen
+ * but do when filesystems don't handle the size changes correctly.)
+ * We are conservative on metadata and don't just extend the buffer
+ * but write and re-constitute it.
*/
if (bp->b_bcount != size) {
- allocbuf(bp, size);
+ if (bp->b_flags & B_VMIO) {
+ allocbuf(bp, size);
+ } else {
+ bp->b_flags |= B_NOCACHE;
+ VOP_BWRITE(bp);
+ goto loop;
+ }
+ }
+ /*
+ * make sure that all pages in the buffer are valid, if they
+ * aren't, clear the cache flag.
+ * ASSUMPTION:
+ * if the buffer is greater than 1 page in size, it is assumed
+ * that the buffer address starts on a page boundary...
+ */
+ if (bp->b_flags & B_VMIO) {
+ int szleft, i;
+ szleft = size;
+ for (i=0;i<bp->b_npages;i++) {
+ if (szleft > PAGE_SIZE) {
+ if ((bp->b_pages[i]->valid & VM_PAGE_BITS_ALL) !=
+ VM_PAGE_BITS_ALL) {
+ bp->b_flags &= ~B_CACHE;
+ break;
+ }
+ szleft -= PAGE_SIZE;
+ } else {
+ if (!vm_page_is_valid(bp->b_pages[i],
+ (((vm_offset_t) bp->b_data) & PAGE_MASK),
+ szleft)) {
+ bp->b_flags &= ~B_CACHE;
+ break;
+ }
+ szleft = 0;
+ }
+ }
}
splx(s);
return (bp);
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index a78a5e8ac83b..b0aac13f349a 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -33,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
- * $Id: vfs_cluster.c,v 1.19 1995/09/03 20:32:52 dyson Exp $
+ * $Id: vfs_cluster.c,v 1.20 1995/09/04 00:20:15 dyson Exp $
*/
#include <sys/param.h>
@@ -290,7 +290,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
panic("cluster_rbuild: size %d != filesize %d\n",
size, vp->v_mount->mnt_stat.f_iosize);
#endif
- if (size * (lbn + run + 1) > filesize)
+ if (size * (lbn + run) > filesize)
--run;
tbp = getblk(vp, lbn, size, 0, 0);
@@ -313,8 +313,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
bp->b_lblkno = lbn;
pbgetvp(vp, bp);
- b_save = malloc(sizeof(struct buf *) * run + sizeof(struct cluster_save),
- M_SEGMENT, M_WAITOK);
+ b_save = malloc(sizeof(struct buf *) * run +
+ sizeof(struct cluster_save), M_SEGMENT, M_WAITOK);
b_save->bs_nchildren = 0;
b_save->bs_children = (struct buf **) (b_save + 1);
bp->b_saveaddr = b_save;
@@ -328,6 +328,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
if (i != 0) {
if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
break;
+
if (incore(vp, lbn + i))
break;
tbp = getblk(vp, lbn + i, size, 0, 0);
@@ -345,6 +346,10 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
}
if (j != tbp->b_npages) {
+ /*
+ * force buffer to be re-constituted later
+ */
+ tbp->b_flags |= B_RELBUF;
brelse(tbp);
break;
}
@@ -364,13 +369,17 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
m = tbp->b_pages[j];
++m->busy;
++m->object->paging_in_progress;
- if (m->valid == VM_PAGE_BITS_ALL) {
+ if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
m = bogus_page;
}
if ((bp->b_npages == 0) ||
- (bp->b_pages[bp->b_npages - 1] != m)) {
+ (bp->b_bufsize & PAGE_MASK) == 0) {
bp->b_pages[bp->b_npages] = m;
bp->b_npages++;
+ } else {
+ if ( tbp->b_npages > 1) {
+ panic("cluster_rbuild: page unaligned filesystems not supported");
+ }
}
}
bp->b_bcount += tbp->b_bcount;