diff options
author | Mark Johnston <markj@FreeBSD.org> | 2018-11-24 17:06:01 +0000 |
---|---|---|
committer | Mark Johnston <markj@FreeBSD.org> | 2018-11-24 17:06:01 +0000 |
commit | 792843c38f819a0966b6169fc86af05e88bc6ecd (patch) | |
tree | 58895a7e84fbc57c7263d9bba157eb10746b3cc3 | |
parent | 36c4960ef829cf675aee22da292ea6002bd0d429 (diff) |
Notes
-rw-r--r-- | sys/compat/linux/linux_event.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_event.c | 45 | ||||
-rw-r--r-- | sys/kern/vfs_aio.c | 5 | ||||
-rw-r--r-- | sys/sys/event.h | 2 |
4 files changed, 26 insertions, 28 deletions
diff --git a/sys/compat/linux/linux_event.c b/sys/compat/linux/linux_event.c index bb4825574ed5..f10cccec30a4 100644 --- a/sys/compat/linux/linux_event.c +++ b/sys/compat/linux/linux_event.c @@ -504,7 +504,7 @@ linux_epoll_ctl(struct thread *td, struct linux_epoll_ctl_args *args) * and the EV_ADD flag is not set. */ kev[0].flags &= ~EV_ADD; - error = kqfd_register(args->epfd, &kev[0], td, 1); + error = kqfd_register(args->epfd, &kev[0], td, M_WAITOK); if (error != ENOENT) { error = EEXIST; goto leave0; diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index c1070073610c..1c487cdebda7 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -102,13 +102,13 @@ TASKQUEUE_DEFINE_THREAD(kqueue_ctx); static int kevent_copyout(void *arg, struct kevent *kevp, int count); static int kevent_copyin(void *arg, struct kevent *kevp, int count); static int kqueue_register(struct kqueue *kq, struct kevent *kev, - struct thread *td, int waitok); + struct thread *td, int mflag); static int kqueue_acquire(struct file *fp, struct kqueue **kqp); static void kqueue_release(struct kqueue *kq, int locked); static void kqueue_destroy(struct kqueue *kq); static void kqueue_drain(struct kqueue *kq, struct thread *td); static int kqueue_expand(struct kqueue *kq, struct filterops *fops, - uintptr_t ident, int waitok); + uintptr_t ident, int mflag); static void kqueue_task(void *arg, int pending); static int kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, @@ -150,7 +150,7 @@ static void knote_drop_detached(struct knote *kn, struct thread *td); static void knote_enqueue(struct knote *kn); static void knote_dequeue(struct knote *kn); static void knote_init(void); -static struct knote *knote_alloc(int waitok); +static struct knote *knote_alloc(int mflag); static void knote_free(struct knote *kn); static void filt_kqdetach(struct knote *kn); @@ -582,7 +582,7 @@ knote_fork(struct knlist *list, int pid) kev.fflags = kn->kn_sfflags; kev.data = kn->kn_id; /* parent */ kev.udata = kn->kn_kevent.udata;/* preserve udata */ - error = kqueue_register(kq, &kev, NULL, 0); + error = kqueue_register(kq, &kev, NULL, M_NOWAIT); if (error) kn->kn_fflags |= NOTE_TRACKERR; @@ -596,7 +596,7 @@ knote_fork(struct knlist *list, int pid) kev.fflags = kn->kn_sfflags; kev.data = kn->kn_id; /* parent */ kev.udata = kn->kn_kevent.udata;/* preserve udata */ - error = kqueue_register(kq, &kev, NULL, 0); + error = kqueue_register(kq, &kev, NULL, M_NOWAIT); if (error) kn->kn_fflags |= NOTE_TRACKERR; if (kn->kn_fop->f_event(kn, NOTE_FORK)) @@ -1229,7 +1229,7 @@ kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, if (!kevp->filter) continue; kevp->flags &= ~EV_SYSFLAGS; - error = kqueue_register(kq, kevp, td, 1); + error = kqueue_register(kq, kevp, td, M_WAITOK); if (error || (kevp->flags & EV_RECEIPT)) { if (nevents == 0) return (error); @@ -1370,12 +1370,11 @@ kqueue_fo_release(int filt) } /* - * A ref to kq (obtained via kqueue_acquire) must be held. waitok will - * influence if memory allocation should wait. Make sure it is 0 if you - * hold any mutexes. + * A ref to kq (obtained via kqueue_acquire) must be held. */ static int -kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) +kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, + int mflag) { struct filterops *fops; struct file *fp; @@ -1405,7 +1404,7 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int wa * allocation failures are handled in the loop, only * if the spare knote appears to be actually required. */ - tkn = knote_alloc(waitok); + tkn = knote_alloc(mflag); } else { tkn = NULL; } @@ -1421,11 +1420,11 @@ findkn: goto done; if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, - kev->ident, 0) != 0) { + kev->ident, M_NOWAIT) != 0) { /* try again */ fdrop(fp, td); fp = NULL; - error = kqueue_expand(kq, fops, kev->ident, waitok); + error = kqueue_expand(kq, fops, kev->ident, mflag); if (error) goto done; goto findkn; @@ -1462,7 +1461,7 @@ findkn: } } else { if ((kev->flags & EV_ADD) == EV_ADD) { - error = kqueue_expand(kq, fops, kev->ident, waitok); + error = kqueue_expand(kq, fops, kev->ident, mflag); if (error != 0) goto done; } @@ -1693,12 +1692,11 @@ kqueue_schedtask(struct kqueue *kq) */ static int kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, - int waitok) + int mflag) { struct klist *list, *tmp_knhash, *to_free; u_long tmp_knhashmask; int error, fd, size; - int mflag = waitok ? M_WAITOK : M_NOWAIT; KQ_NOTOWNED(kq); @@ -1737,8 +1735,8 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, } else { if (kq->kq_knhashmask == 0) { tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, - &tmp_knhashmask, - waitok ? HASH_WAITOK : HASH_NOWAIT); + &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? + HASH_WAITOK : HASH_NOWAIT); if (tmp_knhash == NULL) return (ENOMEM); KQ_LOCK(kq); @@ -1827,7 +1825,7 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, asbt = -1; } else asbt = 0; - marker = knote_alloc(1); + marker = knote_alloc(M_WAITOK); marker->kn_status = KN_MARKER; KQ_LOCK(kq); @@ -2703,11 +2701,10 @@ knote_init(void) SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); static struct knote * -knote_alloc(int waitok) +knote_alloc(int mflag) { - return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | - M_ZERO)); + return (uma_zalloc(knote_zone, mflag | M_ZERO)); } static void @@ -2721,7 +2718,7 @@ knote_free(struct knote *kn) * Register the kev w/ the kq specified by fd. */ int -kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) +kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) { struct kqueue *kq; struct file *fp; @@ -2734,7 +2731,7 @@ kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) if ((error = kqueue_acquire(fp, &kq)) != 0) goto noacquire; - error = kqueue_register(kq, kev, td, waitok); + error = kqueue_register(kq, kev, td, mflag); kqueue_release(kq, 0); noacquire: diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c index a8bce0ee2500..5506e6ed14c7 100644 --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -1595,7 +1595,7 @@ aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj, kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags; kev.data = (intptr_t)job; kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; - error = kqfd_register(kqfd, &kev, td, 1); + error = kqfd_register(kqfd, &kev, td, M_WAITOK); if (error) goto aqueue_fail; @@ -2164,7 +2164,8 @@ kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list, /* pass user defined sigval data */ kev.udata = lj->lioj_signal.sigev_value.sival_ptr; error = kqfd_register( - lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1); + lj->lioj_signal.sigev_notify_kqueue, &kev, td, + M_WAITOK); if (error) { uma_zfree(aiolio_zone, lj); return (error); diff --git a/sys/sys/event.h b/sys/sys/event.h index c3063e3b8cb5..e2e80f37946e 100644 --- a/sys/sys/event.h +++ b/sys/sys/event.h @@ -348,7 +348,7 @@ void knlist_cleardel(struct knlist *knl, struct thread *td, knlist_cleardel((knl), (td), (islocked), 1) void knote_fdclose(struct thread *p, int fd); int kqfd_register(int fd, struct kevent *kev, struct thread *p, - int waitok); + int mflag); int kqueue_add_filteropts(int filt, struct filterops *filtops); int kqueue_del_filteropts(int filt); |