diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_mib.c | 25 | ||||
-rw-r--r-- | sys/kern/kern_physio.c | 21 | ||||
-rw-r--r-- | sys/kern/kern_sendfile.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_param.c | 24 | ||||
-rw-r--r-- | sys/kern/vfs_aio.c | 19 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 89 | ||||
-rw-r--r-- | sys/kern/vfs_cluster.c | 2 | ||||
-rw-r--r-- | sys/kern/vfs_default.c | 4 |
8 files changed, 131 insertions, 57 deletions
diff --git a/sys/kern/kern_mib.c b/sys/kern/kern_mib.c index 07db75ae753d..abd04b47023b 100644 --- a/sys/kern/kern_mib.c +++ b/sys/kern/kern_mib.c @@ -146,8 +146,29 @@ char kernelname[MAXPATHLEN] = PATH_KERNEL; /* XXX bloat */ SYSCTL_STRING(_kern, KERN_BOOTFILE, bootfile, CTLFLAG_RW | CTLFLAG_MPSAFE, kernelname, sizeof kernelname, "Name of kernel file booted"); -SYSCTL_INT(_kern, KERN_MAXPHYS, maxphys, CTLFLAG_RD | CTLFLAG_CAPRD, - SYSCTL_NULL_INT_PTR, MAXPHYS, "Maximum block I/O access size"); +#ifdef COMPAT_FREEBSD12 +static int +sysctl_maxphys(SYSCTL_HANDLER_ARGS) +{ + u_long lvalue; + int ivalue; + + lvalue = maxphys; + if (sizeof(int) == sizeof(u_long) || req->oldlen >= sizeof(u_long)) + return (sysctl_handle_long(oidp, &lvalue, 0, req)); + if (lvalue > INT_MAX) + return (sysctl_handle_long(oidp, &lvalue, 0, req)); + ivalue = lvalue; + return (sysctl_handle_int(oidp, &ivalue, 0, req)); +} +SYSCTL_PROC(_kern, KERN_MAXPHYS, maxphys, CTLTYPE_LONG | CTLFLAG_RDTUN | + CTLFLAG_NOFETCH | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, + NULL, 0, sysctl_maxphys, "UL", "Maximum block I/O access size"); +#else +SYSCTL_ULONG(_kern, KERN_MAXPHYS, maxphys, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH | CTLFLAG_CAPRD, + &maxphys, 0, "Maximum block I/O access size"); +#endif SYSCTL_INT(_hw, HW_NCPU, ncpu, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncpus, 0, "Number of active CPUs"); diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c index 6e7ceeb11e0c..2b0f4d14b41e 100644 --- a/sys/kern/kern_physio.c +++ b/sys/kern/kern_physio.c @@ -69,7 +69,7 @@ physio(struct cdev *dev, struct uio *uio, int ioflag) * need to reject any requests that will not fit into one buffer. */ if (dev->si_flags & SI_NOSPLIT && - (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS || + (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys || uio->uio_iovcnt > 1)) { /* * Tell the user why his I/O was rejected. @@ -78,10 +78,10 @@ physio(struct cdev *dev, struct uio *uio, int ioflag) uprintf("%s: request size=%zd > si_iosize_max=%d; " "cannot split request\n", devtoname(dev), uio->uio_resid, dev->si_iosize_max); - if (uio->uio_resid > MAXPHYS) - uprintf("%s: request size=%zd > MAXPHYS=%d; " + if (uio->uio_resid > maxphys) + uprintf("%s: request size=%zd > maxphys=%lu; " "cannot split request\n", devtoname(dev), - uio->uio_resid, MAXPHYS); + uio->uio_resid, maxphys); if (uio->uio_iovcnt > 1) uprintf("%s: request vectors=%d > 1; " "cannot split request\n", devtoname(dev), @@ -101,12 +101,13 @@ physio(struct cdev *dev, struct uio *uio, int ioflag) pages = NULL; } else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { pbuf = NULL; - maxpages = btoc(MIN(uio->uio_resid, MAXPHYS)) + 1; + maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1; pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK); } else { pbuf = uma_zalloc(pbuf_zone, M_WAITOK); + MPASS((pbuf->b_flags & B_MAXPHYS) != 0); sa = pbuf->b_data; - maxpages = btoc(MAXPHYS); + maxpages = btoc(maxphys); pages = pbuf->b_pages; } prot = VM_PROT_READ; @@ -144,13 +145,13 @@ physio(struct cdev *dev, struct uio *uio, int ioflag) bp->bio_length = uio->uio_iov[i].iov_len; if (bp->bio_length > dev->si_iosize_max) bp->bio_length = dev->si_iosize_max; - if (bp->bio_length > MAXPHYS) - bp->bio_length = MAXPHYS; + if (bp->bio_length > maxphys) + bp->bio_length = maxphys; /* * Make sure the pbuf can map the request. - * The pbuf has kvasize = MAXPHYS, so a request - * larger than MAXPHYS - PAGE_SIZE must be + * The pbuf has kvasize = maxphys, so a request + * larger than maxphys - PAGE_SIZE must be * page aligned or it will be fragmented. */ poff = (vm_offset_t)base & PAGE_MASK; diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c index 3f6bbf816138..e0b9b0e261d4 100644 --- a/sys/kern/kern_sendfile.c +++ b/sys/kern/kern_sendfile.c @@ -885,7 +885,7 @@ retry_space: * do any heuristics and use exactly the value supplied by * application. Otherwise, we allow readahead up to "rem". * If application wants more, let it be, but there is no - * reason to go above MAXPHYS. Also check against "obj_size", + * reason to go above maxphys. Also check against "obj_size", * since vm_pager_has_page() can hint beyond EOF. */ if (flags & SF_USER_READAHEAD) { @@ -895,7 +895,7 @@ retry_space: npages; rhpages += SF_READAHEAD(flags); } - rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages); + rhpages = min(howmany(maxphys, PAGE_SIZE), rhpages); rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) - npages, rhpages); diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c index 032edd4229e1..39ec48a32cb3 100644 --- a/sys/kern/subr_param.c +++ b/sys/kern/subr_param.c @@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$"); #include "opt_param.h" #include "opt_msgbuf.h" +#include "opt_maxphys.h" #include "opt_maxusers.h" #include <sys/param.h> @@ -95,14 +96,15 @@ int maxprocperuid; /* max # of procs per user */ int maxfiles; /* sys. wide open files limit */ int maxfilesperproc; /* per-proc open files limit */ int msgbufsize; /* size of kernel message buffer */ -int nbuf; +int nbuf; /* number of bcache bufs */ int bio_transient_maxcnt; int ngroups_max; /* max # groups per process */ int nswbuf; pid_t pid_max = PID_MAX; -long maxswzone; /* max swmeta KVA storage */ -long maxbcache; /* max buffer cache KVA storage */ -long maxpipekva; /* Limit on pipe KVA */ +u_long maxswzone; /* max swmeta KVA storage */ +u_long maxbcache; /* max buffer cache KVA storage */ +u_long maxpipekva; /* Limit on pipe KVA */ +u_long maxphys; /* max raw I/O transfer size */ int vm_guest = VM_GUEST_NO; /* Running as virtual machine guest? */ u_long maxtsiz; /* max text size */ u_long dfldsiz; /* initial data size limit */ @@ -294,6 +296,18 @@ init_param2(long physpages) nbuf = NBUF; TUNABLE_INT_FETCH("kern.nbuf", &nbuf); TUNABLE_INT_FETCH("kern.bio_transient_maxcnt", &bio_transient_maxcnt); + maxphys = MAXPHYS; + TUNABLE_ULONG_FETCH("kern.maxphys", &maxphys); + if (maxphys == 0) { + maxphys = MAXPHYS; + } else if (__bitcountl(maxphys) != 1) { /* power of two */ + if (flsl(maxphys) == NBBY * sizeof(maxphys)) + maxphys = MAXPHYS; + else + maxphys = 1UL << flsl(maxphys); + } + if (maxphys < PAGE_SIZE) + maxphys = MAXPHYS; /* * Physical buffers are pre-allocated buffers (struct buf) that @@ -305,7 +319,7 @@ init_param2(long physpages) * The default for maxpipekva is min(1/64 of the kernel address space, * max(1/64 of main memory, 512KB)). See sys_pipe.c for more details. */ - maxpipekva = (physpages / 64) * PAGE_SIZE; + maxpipekva = ptoa(physpages / 64); TUNABLE_LONG_FETCH("kern.ipc.maxpipekva", &maxpipekva); if (maxpipekva < 512 * 1024) maxpipekva = 512 * 1024; diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c index 18a9f8aeac7a..c91f17794599 100644 --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -1252,14 +1252,16 @@ aio_qbio(struct proc *p, struct kaiocb *job) ki = p->p_aioinfo; poff = (vm_offset_t)cb->aio_buf & PAGE_MASK; if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { - if (cb->aio_nbytes > MAXPHYS) { + if (cb->aio_nbytes > maxphys) { error = -1; goto unref; } pbuf = NULL; + job->pages = malloc(sizeof(vm_page_t) * (atop(round_page( + cb->aio_nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO); } else { - if (cb->aio_nbytes > MAXPHYS - poff) { + if (cb->aio_nbytes > maxphys) { error = -1; goto unref; } @@ -1273,6 +1275,7 @@ aio_qbio(struct proc *p, struct kaiocb *job) AIO_LOCK(ki); ki->kaio_buffer_count++; AIO_UNLOCK(ki); + job->pages = pbuf->b_pages; } job->bp = bp = g_alloc_bio(); @@ -1289,7 +1292,7 @@ aio_qbio(struct proc *p, struct kaiocb *job) prot |= VM_PROT_WRITE; /* Less backwards than it looks */ job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, (vm_offset_t)cb->aio_buf, bp->bio_length, prot, job->pages, - nitems(job->pages)); + atop(maxphys) + 1); if (job->npages < 0) { error = EFAULT; goto doerror; @@ -1320,6 +1323,8 @@ doerror: AIO_UNLOCK(ki); uma_zfree(pbuf_zone, pbuf); job->pbuf = NULL; + } else { + free(job->pages, M_TEMP); } g_destroy_bio(bp); job->bp = NULL; @@ -2342,7 +2347,8 @@ aio_biowakeup(struct bio *bp) /* Release mapping into kernel space. */ userp = job->userproc; ki = userp->p_aioinfo; - if (job->pbuf) { + vm_page_unhold_pages(job->pages, job->npages); + if (job->pbuf != NULL) { pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages); uma_zfree(pbuf_zone, job->pbuf); job->pbuf = NULL; @@ -2350,9 +2356,10 @@ aio_biowakeup(struct bio *bp) AIO_LOCK(ki); ki->kaio_buffer_count--; AIO_UNLOCK(ki); - } else + } else { + free(job->pages, M_TEMP); atomic_subtract_int(&num_unmapped_aio, 1); - vm_page_unhold_pages(job->pages, job->npages); + } bp = job->bp; job->bp = NULL; diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 706e97106c67..2f18221e9270 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -147,8 +147,14 @@ struct bufdomain { #define BD_RUN_UNLOCK(bd) mtx_unlock(BD_RUN_LOCKPTR((bd))) #define BD_DOMAIN(bd) (bd - bdomain) -static struct buf *buf; /* buffer header pool */ -extern struct buf *swbuf; /* Swap buffer header pool. */ +static char *buf; /* buffer header pool */ +static struct buf * +nbufp(unsigned i) +{ + return ((struct buf *)(buf + (sizeof(struct buf) + + sizeof(vm_page_t) * atop(maxbcachebuf)) * i)); +} + caddr_t __read_mostly unmapped_buf; /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */ @@ -994,8 +1000,8 @@ maxbcachebuf_adjust(void) maxbcachebuf = i; if (maxbcachebuf < MAXBSIZE) maxbcachebuf = MAXBSIZE; - if (maxbcachebuf > MAXPHYS) - maxbcachebuf = MAXPHYS; + if (maxbcachebuf > maxphys) + maxbcachebuf = maxphys; if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF) printf("maxbcachebuf=%d\n", maxbcachebuf); } @@ -1113,10 +1119,10 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) biotmap_sz = buf_sz / TRANSIENT_DENOM; buf_sz -= biotmap_sz; } - if (biotmap_sz / INT_MAX > MAXPHYS) + if (biotmap_sz / INT_MAX > maxphys) bio_transient_maxcnt = INT_MAX; else - bio_transient_maxcnt = biotmap_sz / MAXPHYS; + bio_transient_maxcnt = biotmap_sz / maxphys; /* * Artificially limit to 1024 simultaneous in-flight I/Os * using the transient mapping. @@ -1136,10 +1142,11 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) /* * Reserve space for the buffer cache buffers */ - buf = (void *)v; - v = (caddr_t)(buf + nbuf); + buf = (char *)v; + v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) * + atop(maxbcachebuf)) * nbuf; - return(v); + return (v); } /* Initialize the buffer subsystem. Called before use of any buffers. */ @@ -1157,12 +1164,12 @@ bufinit(void) mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF); - unmapped_buf = (caddr_t)kva_alloc(MAXPHYS); + unmapped_buf = (caddr_t)kva_alloc(maxphys); /* finally, initialize each buffer header and stick on empty q */ for (i = 0; i < nbuf; i++) { - bp = &buf[i]; - bzero(bp, sizeof *bp); + bp = nbufp(i); + bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf)); bp->b_flags = B_INVAL; bp->b_rcred = NOCRED; bp->b_wcred = NOCRED; @@ -1246,7 +1253,8 @@ bufinit(void) /* Setup the kva and free list allocators. */ vmem_set_reclaim(buffer_arena, bufkva_reclaim); - buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf), + buf_zone = uma_zcache_create("buf free cache", + sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf), NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0); /* @@ -1295,7 +1303,7 @@ vfs_buf_check_mapped(struct buf *bp) KASSERT(bp->b_data != unmapped_buf, ("mapped buf: b_data was not updated %p", bp)); KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf + - MAXPHYS, ("b_data + b_offset unmapped %p", bp)); + maxphys, ("b_data + b_offset unmapped %p", bp)); } static inline void @@ -1330,7 +1338,7 @@ bufshutdown(int show_busybufs) { static int first_buf_printf = 1; struct buf *bp; - int iter, nbusy, pbusy; + int i, iter, nbusy, pbusy; #ifndef PREEMPTION int subiter; #endif @@ -1348,9 +1356,11 @@ bufshutdown(int show_busybufs) */ for (iter = pbusy = 0; iter < 20; iter++) { nbusy = 0; - for (bp = &buf[nbuf]; --bp >= buf; ) + for (i = nbuf - 1; i >= 0; i--) { + bp = nbufp(i); if (isbufbusy(bp)) nbusy++; + } if (nbusy == 0) { if (first_buf_printf) printf("All buffers synced."); @@ -1391,7 +1401,8 @@ bufshutdown(int show_busybufs) * a fsck if we're just a client of a wedged NFS server */ nbusy = 0; - for (bp = &buf[nbuf]; --bp >= buf; ) { + for (i = nbuf - 1; i >= 0; i--) { + bp = nbufp(i); if (isbufbusy(bp)) { #if 0 /* XXX: This is bogus. We should probably have a BO_REMOTE flag instead */ @@ -1571,6 +1582,7 @@ buf_free(struct buf *bp) buf_deallocate(bp); bufkva_free(bp); atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1); + MPASS((bp->b_flags & B_MAXPHYS) == 0); BUF_UNLOCK(bp); uma_zfree(buf_zone, bp); } @@ -1674,6 +1686,7 @@ buf_alloc(struct bufdomain *bd) ("bp: %p still has %d vm pages\n", bp, bp->b_npages)); KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp)); KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp)); + MPASS((bp->b_flags & B_MAXPHYS) == 0); bp->b_domain = BD_DOMAIN(bd); bp->b_flags = 0; @@ -2018,6 +2031,9 @@ bufkva_alloc(struct buf *bp, int maxsize, int gbflags) KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0, ("Invalid gbflags 0x%x in %s", gbflags, __func__)); + MPASS((bp->b_flags & B_MAXPHYS) == 0); + KASSERT(maxsize <= maxbcachebuf, + ("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf)); bufkva_free(bp); @@ -3036,6 +3052,10 @@ vfs_vmio_extend(struct buf *bp, int desiredpages, int size) */ obj = bp->b_bufobj->bo_object; if (bp->b_npages < desiredpages) { + KASSERT(desiredpages <= atop(maxbcachebuf), + ("vfs_vmio_extend past maxbcachebuf %p %d %u", + bp, desiredpages, maxbcachebuf)); + /* * We must allocate system pages since blocking * here could interfere with paging I/O, no @@ -3163,7 +3183,7 @@ vfs_bio_awrite(struct buf *bp) (vp->v_mount != 0) && /* Only on nodes that have the size info */ (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { size = vp->v_mount->mnt_stat.f_iosize; - maxcl = MAXPHYS / size; + maxcl = maxphys / size; BO_RLOCK(bo); for (i = 1; i < maxcl; i++) @@ -4853,6 +4873,10 @@ vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) to = round_page(to); from = round_page(from); index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; + MPASS((bp->b_flags & B_MAXPHYS) == 0); + KASSERT(to - from <= maxbcachebuf, + ("vm_hold_load_pages too large %p %#jx %#jx %u", + bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf)); for (pg = from; pg < to; pg += PAGE_SIZE, index++) { /* @@ -4912,12 +4936,13 @@ vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf) vm_prot_t prot; int pidx; + MPASS((bp->b_flags & B_MAXPHYS) != 0); prot = VM_PROT_READ; if (bp->b_iocmd == BIO_READ) prot |= VM_PROT_WRITE; /* Less backwards than it looks */ - if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, - (vm_offset_t)uaddr, len, prot, bp->b_pages, - btoc(MAXPHYS))) < 0) + pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, + (vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES); + if (pidx < 0) return (-1); bp->b_bufsize = len; bp->b_npages = pidx; @@ -4927,7 +4952,7 @@ vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf) bp->b_data = bp->b_kvabase + bp->b_offset; } else bp->b_data = unmapped_buf; - return(0); + return (0); } /* @@ -5398,19 +5423,23 @@ DB_SHOW_COMMAND(bufqueues, bufqueues) db_printf("\n"); cnt = 0; total = 0; - for (j = 0; j < nbuf; j++) - if (buf[j].b_domain == i && BUF_ISLOCKED(&buf[j])) { + for (j = 0; j < nbuf; j++) { + bp = nbufp(j); + if (bp->b_domain == i && BUF_ISLOCKED(bp)) { cnt++; - total += buf[j].b_bufsize; + total += bp->b_bufsize; } + } db_printf("\tLocked buffers: %d space %ld\n", cnt, total); cnt = 0; total = 0; - for (j = 0; j < nbuf; j++) - if (buf[j].b_domain == i) { + for (j = 0; j < nbuf; j++) { + bp = nbufp(j); + if (bp->b_domain == i) { cnt++; - total += buf[j].b_bufsize; + total += bp->b_bufsize; } + } db_printf("\tTotal buffers: %d space %ld\n", cnt, total); } } @@ -5421,7 +5450,7 @@ DB_SHOW_COMMAND(lockedbufs, lockedbufs) int i; for (i = 0; i < nbuf; i++) { - bp = &buf[i]; + bp = nbufp(i); if (BUF_ISLOCKED(bp)) { db_show_buffer((uintptr_t)bp, 1, 0, NULL); db_printf("\n"); @@ -5464,7 +5493,7 @@ DB_COMMAND(countfreebufs, db_coundfreebufs) } for (i = 0; i < nbuf; i++) { - bp = &buf[i]; + bp = nbufp(i); if (bp->b_qindex == QUEUE_EMPTY) nfree++; else diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c index 5dfbb9f113be..6b77adf5df34 100644 --- a/sys/kern/vfs_cluster.c +++ b/sys/kern/vfs_cluster.c @@ -386,6 +386,7 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT); if (bp == NULL) return tbp; + MPASS((bp->b_flags & B_MAXPHYS) != 0); /* * We are synthesizing a buffer out of vm_page_t's, but @@ -871,6 +872,7 @@ cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len, --len; continue; } + MPASS((bp->b_flags & B_MAXPHYS) != 0); /* * We got a pbuf to make the cluster in. diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index d9f44e1dc6b9..4b96d9522ce3 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -974,8 +974,8 @@ vop_stdallocate(struct vop_allocate_args *ap) iosize = vap->va_blocksize; if (iosize == 0) iosize = BLKDEV_IOSIZE; - if (iosize > MAXPHYS) - iosize = MAXPHYS; + if (iosize > maxphys) + iosize = maxphys; buf = malloc(iosize, M_TEMP, M_WAITOK); #ifdef __notyet__ |