summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_bio.c10
-rw-r--r--sys/kern/vfs_cluster.c2
2 files changed, 4 insertions, 8 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 0b4a643f0c19..e1c0f9eb9413 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -882,13 +882,10 @@ bwrite(struct buf * bp)
int rtval = bufwait(bp);
brelse(bp);
return (rtval);
- } else if ((oldflags & B_NOWDRAIN) == 0) {
+ } else {
/*
* don't allow the async write to saturate the I/O
- * system. Deadlocks can occur only if a device strategy
- * routine (like in MD) turns around and issues another
- * high-level write, in which case B_NOWDRAIN is expected
- * to be set. Otherwise we will not deadlock here because
+ * system. We will not deadlock here because
* we are blocking waiting for I/O that is already in-progress
* to complete.
*/
@@ -1461,8 +1458,7 @@ brelse(struct buf * bp)
if (bp->b_bufsize || bp->b_kvasize)
bufspacewakeup();
- bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF |
- B_DIRECT | B_NOWDRAIN);
+ bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
panic("brelse: not dirty");
/* unlock */
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 85fb8b339ffc..72e80d63ece7 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -839,7 +839,7 @@ cluster_wbuild(vp, size, start_lbn, len)
bp->b_data = (char *)((vm_offset_t)bp->b_data |
((vm_offset_t)tbp->b_data & PAGE_MASK));
bp->b_flags |= B_CLUSTER |
- (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN));
+ (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
bp->b_iodone = cluster_callback;
pbgetvp(vp, bp);
/*