diff options
author | John Baldwin <jhb@FreeBSD.org> | 2016-01-19 21:37:51 +0000 |
---|---|---|
committer | John Baldwin <jhb@FreeBSD.org> | 2016-01-19 21:37:51 +0000 |
commit | 8a4dc40ff46ef766bff63a8328d6dd838547f860 (patch) | |
tree | 487bad23399744527b32d7512d35abaf220f0d77 /sys | |
parent | f2e7f06a0de5a6f8f1cdf6e5f4b9b35f193eaa13 (diff) | |
download | src-8a4dc40ff46ef766bff63a8328d6dd838547f860.tar.gz src-8a4dc40ff46ef766bff63a8328d6dd838547f860.zip |
Notes
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/vfs_aio.c | 119 | ||||
-rw-r--r-- | sys/vm/vm_extern.h | 1 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 45 |
3 files changed, 80 insertions, 85 deletions
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c index f79e2bb9cb0b..4a4c6125918f 100644 --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -1048,6 +1048,13 @@ notification_done: } } +static void +aio_switch_vmspace(struct aiocblist *aiocbe) +{ + + vmspace_switch_aio(aiocbe->userproc->p_vmspace); +} + /* * The AIO daemon, most of the actual work is done in aio_process_*, * but the setup (and address space mgmt) is done in this routine. @@ -1058,18 +1065,20 @@ aio_daemon(void *_id) struct aiocblist *aiocbe; struct aiothreadlist *aiop; struct kaioinfo *ki; - struct proc *curcp, *mycp, *userp; - struct vmspace *myvm, *tmpvm; + struct proc *p, *userp; + struct vmspace *myvm; struct thread *td = curthread; int id = (intptr_t)_id; /* - * Local copies of curproc (cp) and vmspace (myvm) + * Grab an extra reference on the daemon's vmspace so that it + * doesn't get freed by jobs that switch to a different + * vmspace. */ - mycp = td->td_proc; - myvm = mycp->p_vmspace; + p = td->td_proc; + myvm = vmspace_acquire_ref(p); - KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp")); + KASSERT(p->p_textvp == NULL, ("kthread has a textvp")); /* * Allocate and ready the aio control info. There is one aiop structure @@ -1088,12 +1097,6 @@ aio_daemon(void *_id) mtx_lock(&aio_job_mtx); for (;;) { /* - * curcp is the current daemon process context. - * userp is the current user process context. - */ - curcp = mycp; - - /* * Take daemon off of free queue */ if (aiop->aiothreadflags & AIOP_FREE) { @@ -1111,34 +1114,7 @@ aio_daemon(void *_id) /* * Connect to process address space for user program. */ - if (userp != curcp) { - /* - * Save the current address space that we are - * connected to. - */ - tmpvm = mycp->p_vmspace; - - /* - * Point to the new user address space, and - * refer to it. - */ - mycp->p_vmspace = userp->p_vmspace; - atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1); - - /* Activate the new mapping. */ - pmap_activate(FIRST_THREAD_IN_PROC(mycp)); - - /* - * If the old address space wasn't the daemons - * own address space, then we need to remove the - * daemon's reference from the other process - * that it was acting on behalf of. - */ - if (tmpvm != myvm) { - vmspace_free(tmpvm); - } - curcp = userp; - } + aio_switch_vmspace(aiocbe); ki = userp->p_aioinfo; @@ -1172,34 +1148,13 @@ aio_daemon(void *_id) /* * Disconnect from user address space. */ - if (curcp != mycp) { - + if (p->p_vmspace != myvm) { mtx_unlock(&aio_job_mtx); - - /* Get the user address space to disconnect from. */ - tmpvm = mycp->p_vmspace; - - /* Get original address space for daemon. */ - mycp->p_vmspace = myvm; - - /* Activate the daemon's address space. */ - pmap_activate(FIRST_THREAD_IN_PROC(mycp)); -#ifdef DIAGNOSTIC - if (tmpvm == myvm) { - printf("AIOD: vmspace problem -- %d\n", - mycp->p_pid); - } -#endif - /* Remove our vmspace reference. */ - vmspace_free(tmpvm); - - curcp = mycp; - + vmspace_switch_aio(myvm); mtx_lock(&aio_job_mtx); /* * We have to restart to avoid race, we only sleep if - * no job can be selected, that should be - * curcp == mycp. + * no job can be selected. */ continue; } @@ -1214,29 +1169,23 @@ aio_daemon(void *_id) * thereby freeing resources. */ if (msleep(aiop->aiothread, &aio_job_mtx, PRIBIO, "aiordy", - aiod_lifetime)) { - if (TAILQ_EMPTY(&aio_jobs)) { - if ((aiop->aiothreadflags & AIOP_FREE) && - (num_aio_procs > target_aio_procs)) { - TAILQ_REMOVE(&aio_freeproc, aiop, list); - num_aio_procs--; - mtx_unlock(&aio_job_mtx); - uma_zfree(aiop_zone, aiop); - free_unr(aiod_unr, id); -#ifdef DIAGNOSTIC - if (mycp->p_vmspace->vm_refcnt <= 1) { - printf("AIOD: bad vm refcnt for" - " exiting daemon: %d\n", - mycp->p_vmspace->vm_refcnt); - } -#endif - kproc_exit(0); - } - } - } + aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) && + (aiop->aiothreadflags & AIOP_FREE) && + num_aio_procs > target_aio_procs) + break; } + TAILQ_REMOVE(&aio_freeproc, aiop, list); + num_aio_procs--; mtx_unlock(&aio_job_mtx); - panic("shouldn't be here\n"); + uma_zfree(aiop_zone, aiop); + free_unr(aiod_unr, id); + vmspace_free(myvm); + + KASSERT(p->p_vmspace == myvm, + ("AIOD: bad vmspace for exiting daemon")); + KASSERT(myvm->vm_refcnt > 1, + ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt)); + kproc_exit(0); } /* diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index 922c3280302c..240001806f2e 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -106,6 +106,7 @@ void vmspace_exit(struct thread *); struct vmspace *vmspace_acquire_ref(struct proc *); void vmspace_free(struct vmspace *); void vmspace_exitfree(struct proc *); +void vmspace_switch_aio(struct vmspace *); void vnode_pager_setsize(struct vnode *, vm_ooffset_t); int vslock(void *, size_t); void vsunlock(void *, size_t); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 2e69b876b072..5fdc62fe08b4 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -451,6 +451,51 @@ vmspace_acquire_ref(struct proc *p) return (vm); } +/* + * Switch between vmspaces in an AIO kernel process. + * + * The AIO kernel processes switch to and from a user process's + * vmspace while performing an I/O operation on behalf of a user + * process. The new vmspace is either the vmspace of a user process + * obtained from an active AIO request or the initial vmspace of the + * AIO kernel process (when it is idling). Because user processes + * will block to drain any active AIO requests before proceeding in + * exit() or execve(), the vmspace reference count for these vmspaces + * can never be 0. This allows for a much simpler implementation than + * the loop in vmspace_acquire_ref() above. Similarly, AIO kernel + * processes hold an extra reference on their initial vmspace for the + * life of the process so that this guarantee is true for any vmspace + * passed as 'newvm'. + */ +void +vmspace_switch_aio(struct vmspace *newvm) +{ + struct vmspace *oldvm; + + /* XXX: Need some way to assert that this is an aio daemon. */ + + KASSERT(newvm->vm_refcnt > 0, + ("vmspace_switch_aio: newvm unreferenced")); + + oldvm = curproc->p_vmspace; + if (oldvm == newvm) + return; + + /* + * Point to the new address space and refer to it. + */ + curproc->p_vmspace = newvm; + atomic_add_int(&newvm->vm_refcnt, 1); + + /* Activate the new mapping. */ + pmap_activate(curthread); + + /* Remove the daemon's reference to the old address space. */ + KASSERT(oldvm->vm_refcnt > 1, + ("vmspace_switch_aio: oldvm dropping last reference")); + vmspace_free(oldvm); +} + void _vm_map_lock(vm_map_t map, const char *file, int line) { |