-/* $OpenBSD: kern_event.c,v 1.169 2021/07/24 09:16:51 mpi Exp $ */
+/* $OpenBSD: kern_event.c,v 1.170 2021/11/06 05:48:47 visa Exp $ */
/*-
* Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
void filt_kqdetach(struct knote *kn);
int filt_kqueue(struct knote *kn, long hint);
+int filt_kqueuemodify(struct kevent *kev, struct knote *kn);
+int filt_kqueueprocess(struct knote *kn, struct kevent *kev);
+int filt_kqueue_common(struct knote *kn, struct kqueue *kq);
int filt_procattach(struct knote *kn);
void filt_procdetach(struct knote *kn);
int filt_proc(struct knote *kn, long hint);
void filt_seltruedetach(struct knote *kn);
const struct filterops kqread_filtops = {
- .f_flags = FILTEROP_ISFD,
+ .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
.f_attach = NULL,
.f_detach = filt_kqdetach,
.f_event = filt_kqueue,
+ .f_modify = filt_kqueuemodify,
+ .f_process = filt_kqueueprocess,
};
const struct filterops proc_filtops = {
struct pool knote_pool;
struct pool kqueue_pool;
+struct mutex kqueue_klist_lock = MUTEX_INITIALIZER(IPL_MPFLOOR);
int kq_ntimeouts = 0;
int kq_timeoutmax = (4 * 1024);
free(kq->kq_knlist, M_KEVENT, kq->kq_knlistsize *
sizeof(struct knlist));
hashfree(kq->kq_knhash, KN_HASHSIZE, M_KEVENT);
+ klist_free(&kq->kq_sel.si_note);
pool_put(&kqueue_pool, kq);
}
return (EINVAL);
kn->kn_fop = &kqread_filtops;
- klist_insert_locked(&kq->kq_sel.si_note, kn);
+ klist_insert(&kq->kq_sel.si_note, kn);
return (0);
}
{
struct kqueue *kq = kn->kn_fp->f_data;
- klist_remove_locked(&kq->kq_sel.si_note, kn);
+ klist_remove(&kq->kq_sel.si_note, kn);
+}
+
+int
+filt_kqueue_common(struct knote *kn, struct kqueue *kq)
+{
+ MUTEX_ASSERT_LOCKED(&kq->kq_lock);
+
+ kn->kn_data = kq->kq_count;
+
+ return (kn->kn_data > 0);
}
int
filt_kqueue(struct knote *kn, long hint)
{
struct kqueue *kq = kn->kn_fp->f_data;
+ int active;
mtx_enter(&kq->kq_lock);
- kn->kn_data = kq->kq_count;
+ active = filt_kqueue_common(kn, kq);
mtx_leave(&kq->kq_lock);
- return (kn->kn_data > 0);
+
+ return (active);
+}
+
+int
+filt_kqueuemodify(struct kevent *kev, struct knote *kn)
+{
+ struct kqueue *kq = kn->kn_fp->f_data;
+ int active;
+
+ mtx_enter(&kq->kq_lock);
+ knote_modify(kev, kn);
+ active = filt_kqueue_common(kn, kq);
+ mtx_leave(&kq->kq_lock);
+
+ return (active);
+}
+
+int
+filt_kqueueprocess(struct knote *kn, struct kevent *kev)
+{
+ struct kqueue *kq = kn->kn_fp->f_data;
+ int active;
+
+ mtx_enter(&kq->kq_lock);
+ if (kev != NULL && (kn->kn_flags & EV_ONESHOT))
+ active = 1;
+ else
+ active = filt_kqueue_common(kn, kq);
+ if (active)
+ knote_submit(kn, kev);
+ mtx_leave(&kq->kq_lock);
+
+ return (active);
}
int
TAILQ_INIT(&kq->kq_head);
mtx_init(&kq->kq_lock, IPL_HIGH);
task_set(&kq->kq_task, kqueue_task, kq);
+ klist_init_mutex(&kq->kq_sel.si_note, &kqueue_klist_lock);
return (kq);
}
/* Kernel lock is needed inside selwakeup(). */
KERNEL_ASSERT_LOCKED();
+ mtx_enter(&kqueue_klist_lock);
mtx_enter(&kq->kq_lock);
if (kq->kq_state & KQ_SEL) {
kq->kq_state &= ~KQ_SEL;
mtx_leave(&kq->kq_lock);
KNOTE(&kq->kq_sel.si_note, 0);
}
+ mtx_leave(&kqueue_klist_lock);
KQRELE(kq);
}