diff options
| author | John Polstra <jdp@FreeBSD.org> | 1998-09-22 01:30:55 +0000 |
|---|---|---|
| committer | John Polstra <jdp@FreeBSD.org> | 1998-09-22 01:30:55 +0000 |
| commit | d026663988045416f8b8feb9c7150afe5f019279 (patch) | |
| tree | 955054514e0dbf77f592949a533a8faa9a3b0fdf /sys/miscfs/procfs | |
| parent | bd0da250336d886bb7f726252035dd66b2d1c815 (diff) | |
Notes
Diffstat (limited to 'sys/miscfs/procfs')
| -rw-r--r-- | sys/miscfs/procfs/procfs_mem.c | 140 |
1 files changed, 60 insertions, 80 deletions
diff --git a/sys/miscfs/procfs/procfs_mem.c b/sys/miscfs/procfs/procfs_mem.c index 089c490ecce5..ac4bc9b8bda6 100644 --- a/sys/miscfs/procfs/procfs_mem.c +++ b/sys/miscfs/procfs/procfs_mem.c @@ -37,7 +37,7 @@ * * @(#)procfs_mem.c 8.4 (Berkeley) 1/21/94 * - * $Id: procfs_mem.c,v 1.20 1996/10/24 02:47:05 dyson Exp $ + * $Id: procfs_mem.c,v 1.20.2.1 1997/08/12 04:45:23 sef Exp $ */ /* @@ -74,10 +74,11 @@ procfs_rwmem(p, uio) int error; int writing; struct vmspace *vm; - int fix_prot = 0; vm_map_t map; vm_object_t object = NULL; vm_offset_t pageno = 0; /* page number */ + vm_prot_t reqprot; + vm_offset_t kva; /* * if the vmspace is in the midst of being deallocated or the @@ -94,6 +95,9 @@ procfs_rwmem(p, uio) map = &vm->vm_map; writing = uio->uio_rw == UIO_WRITE; + reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : VM_PROT_READ; + + kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE); /* * Only map in one page at a time. We don't have to, but it @@ -101,7 +105,6 @@ procfs_rwmem(p, uio) */ do { vm_map_t tmap; - vm_offset_t kva = 0; vm_offset_t uva; int page_offset; /* offset into page */ vm_map_entry_t out_entry; @@ -109,8 +112,8 @@ procfs_rwmem(p, uio) boolean_t wired, single_use; vm_pindex_t pindex; u_int len; + vm_page_t m; - fix_prot = 0; object = NULL; uva = (vm_offset_t) uio->uio_offset; @@ -127,6 +130,8 @@ procfs_rwmem(p, uio) len = min(PAGE_SIZE - page_offset, uio->uio_resid); if (uva >= VM_MAXUSER_ADDRESS) { + vm_offset_t tkva; + if (writing || (uva >= (VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))) { error = 0; break; @@ -148,10 +153,10 @@ procfs_rwmem(p, uio) fill_eproc (p, &p->p_addr->u_kproc.kp_eproc); /* locate the in-core address */ - kva = (u_int)p->p_addr + uva - VM_MAXUSER_ADDRESS; + tkva = (u_int)p->p_addr + uva - VM_MAXUSER_ADDRESS; /* transfer it */ - error = uiomove((caddr_t)kva, len, uio); + error = uiomove((caddr_t)tkva, len, uio); /* let the pages go */ PRELE(p); @@ -160,33 +165,12 @@ procfs_rwmem(p, uio) } /* - * Check the permissions for the area we're interested - * in. + * Fault the page on behalf of the process */ - if (writing) { - fix_prot = !vm_map_check_protection(map, pageno, - pageno + PAGE_SIZE, VM_PROT_WRITE); - - if (fix_prot) { - /* - * If the page is not writable, we make it so. - * XXX It is possible that a page may *not* be - * read/executable, if a process changes that! - * We will assume, for now, that a page is either - * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE. - */ - error = vm_map_protect(map, pageno, - pageno + PAGE_SIZE, VM_PROT_ALL, 0); - if (error) { - /* - * We don't have to undo something - * that didn't work, so we clear the - * flag. - */ - fix_prot = 0; - break; - } - } + error = vm_fault(map, pageno, reqprot, FALSE); + if (error) { + error = EFAULT; + break; } /* @@ -196,88 +180,84 @@ procfs_rwmem(p, uio) * vm_map_lookup() can change the map argument. */ tmap = map; - error = vm_map_lookup(&tmap, pageno, - writing ? VM_PROT_WRITE : VM_PROT_READ, + error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, &object, &pindex, &out_prot, &wired, &single_use); if (error) { + error = EFAULT; + /* * Make sure that there is no residue in 'object' from * an error return on vm_map_lookup. */ object = NULL; + break; } - /* - * We're done with tmap now. - * But reference the object first, so that we won't loose - * it. - */ - vm_object_reference(object); - vm_map_lookup_done(tmap, out_entry); + m = vm_page_lookup(object, pindex); - /* - * Fault the page in... - */ - if (writing && object->backing_object) { - error = vm_fault(map, pageno, - VM_PROT_WRITE, FALSE); - if (error) - break; + /* Allow fallback to backing objects if we are reading */ + + while (m == NULL && !writing && object->backing_object) { + + pindex += OFF_TO_IDX(object->backing_object_offset); + object = object->backing_object; + + m = vm_page_lookup(object, pindex); } - /* Find space in kernel_map for the page we're interested in */ - error = vm_map_find(kernel_map, object, - IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 1, - VM_PROT_ALL, VM_PROT_ALL, 0); - if (error) { + if (m == NULL) { + error = EFAULT; + + /* + * Make sure that there is no residue in 'object' from + * an error return on vm_map_lookup. + */ + object = NULL; + + vm_map_lookup_done(tmap, out_entry); + break; } /* - * Mark the page we just found as pageable. + * Wire the page into memory */ - error = vm_map_pageable(kernel_map, kva, - kva + PAGE_SIZE, 0); - if (error) { - vm_map_remove(kernel_map, kva, kva + PAGE_SIZE); - object = NULL; - break; - } + vm_page_wire(m); /* - * Now do the i/o move. + * We're done with tmap now. + * But reference the object first, so that we won't loose + * it. */ - error = uiomove((caddr_t)(kva + page_offset), - len, uio); + vm_object_reference(object); + vm_map_lookup_done(tmap, out_entry); + + pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); /* - * vm_map_remove gets rid of the object reference, so - * we need to get rid of our 'object' pointer if there - * is subsequently an error. + * Now do the i/o move. */ - vm_map_remove(kernel_map, kva, kva + PAGE_SIZE); - object = NULL; + error = uiomove((caddr_t)(kva + page_offset), len, uio); + + pmap_kremove(kva); /* - * Undo the protection 'damage'. + * release the page and the object */ - if (fix_prot) { - vm_map_protect(map, pageno, pageno + PAGE_SIZE, - VM_PROT_READ|VM_PROT_EXECUTE, 0); - fix_prot = 0; - } + vm_page_unwire(m); + vm_object_deallocate(object); + + object = NULL; + } while (error == 0 && uio->uio_resid > 0); if (object) vm_object_deallocate(object); - if (fix_prot) - vm_map_protect(map, pageno, pageno + PAGE_SIZE, - VM_PROT_READ|VM_PROT_EXECUTE, 0); - + kmem_free(kernel_map, kva, PAGE_SIZE); vmspace_free(vm); return (error); } |
