summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVisa Hankala <visa@cvs.openbsd.org>2020-05-17 10:53:15 +0000
committerVisa Hankala <visa@cvs.openbsd.org>2020-05-17 10:53:15 +0000
commit808f67e34191db07bdd18aeec6dbfd3b75f3d665 (patch)
tree8617b40b86ffdced68446cd25ea81eaf258dad3f
parenta2641293a79c45152258468d4a3f85f51ad25414 (diff)
Add kqueue_scan_state struct
The struct keeps track of the end point of an event queue scan by persisting the end marker. This will be needed when kqueue_scan() is called repeatedly to complete a scan in a piecewise fashion. The end marker has to be preserved between calls because otherwise the scan might collect an event more than once. If a collected event gets reactivated during scanning, it will be added at the tail of the queue, out of reach because of the end marker. OK mpi@
-rw-r--r--sys/kern/kern_event.c98
-rw-r--r--sys/sys/event.h13
2 files changed, 84 insertions, 27 deletions
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index 3613f30fad0..0bed8e5f671 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_event.c,v 1.131 2020/04/07 13:27:51 visa Exp $ */
+/* $OpenBSD: kern_event.c,v 1.132 2020/05/17 10:53:14 visa Exp $ */
/*-
* Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
@@ -62,7 +62,7 @@ void KQREF(struct kqueue *);
void KQRELE(struct kqueue *);
int kqueue_sleep(struct kqueue *, struct timespec *);
-int kqueue_scan(struct kqueue *kq, int maxevents,
+int kqueue_scan(struct kqueue_scan_state *scan, int maxevents,
struct kevent *ulistp, struct timespec *timeout,
struct proc *p, int *retval);
@@ -529,6 +529,7 @@ out:
int
sys_kevent(struct proc *p, void *v, register_t *retval)
{
+ struct kqueue_scan_state scan;
struct filedesc* fdp = p->p_fd;
struct sys_kevent_args /* {
syscallarg(int) fd;
@@ -612,8 +613,10 @@ sys_kevent(struct proc *p, void *v, register_t *retval)
KQREF(kq);
FRELE(fp, p);
- error = kqueue_scan(kq, SCARG(uap, nevents), SCARG(uap, eventlist),
+ kqueue_scan_setup(&scan, kq);
+ error = kqueue_scan(&scan, SCARG(uap, nevents), SCARG(uap, eventlist),
tsp, p, &n);
+ kqueue_scan_finish(&scan);
KQRELE(kq);
*retval = n;
return (error);
@@ -870,11 +873,12 @@ kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
}
int
-kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *ulistp,
- struct timespec *tsp, struct proc *p, int *retval)
+kqueue_scan(struct kqueue_scan_state *scan, int maxevents,
+ struct kevent *ulistp, struct timespec *tsp, struct proc *p, int *retval)
{
struct kevent *kevp;
- struct knote mend, mstart, *kn;
+ struct knote *kn;
+ struct kqueue *kq = scan->kqs_kq;
int s, count, nkev = 0, error = 0;
struct kevent kev[KQ_NEVENTS];
@@ -882,9 +886,6 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *ulistp,
if (count == 0)
goto done;
- memset(&mstart, 0, sizeof(mstart));
- memset(&mend, 0, sizeof(mend));
-
retry:
if (kq->kq_state & KQ_DYING) {
error = EBADF;
@@ -894,7 +895,8 @@ retry:
kevp = &kev[0];
s = splhigh();
if (kq->kq_count == 0) {
- if (tsp != NULL && !timespecisset(tsp)) {
+ if ((tsp != NULL && !timespecisset(tsp)) ||
+ scan->kqs_nevent != 0) {
splx(s);
error = 0;
goto done;
@@ -910,27 +912,40 @@ retry:
goto done;
}
- mstart.kn_filter = EVFILT_MARKER;
- mstart.kn_status = KN_PROCESSING;
- TAILQ_INSERT_HEAD(&kq->kq_head, &mstart, kn_tqe);
- mend.kn_filter = EVFILT_MARKER;
- mend.kn_status = KN_PROCESSING;
- TAILQ_INSERT_TAIL(&kq->kq_head, &mend, kn_tqe);
+ /*
+ * Put the end marker in the queue to limit the scan to the events
+ * that are currently active. This prevents events from being
+ * recollected if they reactivate during scan.
+ *
+ * If a partial scan has been performed already but no events have
+ * been collected, reposition the end marker to make any new events
+ * reachable.
+ */
+ if (!scan->kqs_queued) {
+ TAILQ_INSERT_TAIL(&kq->kq_head, &scan->kqs_end, kn_tqe);
+ scan->kqs_queued = 1;
+ } else if (scan->kqs_nevent == 0) {
+ TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end, kn_tqe);
+ TAILQ_INSERT_TAIL(&kq->kq_head, &scan->kqs_end, kn_tqe);
+ }
+
+ TAILQ_INSERT_HEAD(&kq->kq_head, &scan->kqs_start, kn_tqe);
while (count) {
- kn = TAILQ_NEXT(&mstart, kn_tqe);
+ kn = TAILQ_NEXT(&scan->kqs_start, kn_tqe);
if (kn->kn_filter == EVFILT_MARKER) {
- if (kn == &mend) {
- TAILQ_REMOVE(&kq->kq_head, &mend, kn_tqe);
- TAILQ_REMOVE(&kq->kq_head, &mstart, kn_tqe);
+ if (kn == &scan->kqs_end) {
+ TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start,
+ kn_tqe);
splx(s);
- if (count == maxevents)
+ if (scan->kqs_nevent == 0)
goto retry;
goto done;
}
/* Move start marker past another thread's marker. */
- TAILQ_REMOVE(&kq->kq_head, &mstart, kn_tqe);
- TAILQ_INSERT_AFTER(&kq->kq_head, kn, &mstart, kn_tqe);
+ TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe);
+ TAILQ_INSERT_AFTER(&kq->kq_head, kn, &scan->kqs_start,
+ kn_tqe);
continue;
}
@@ -958,6 +973,9 @@ retry:
*kevp = kn->kn_kevent;
kevp++;
nkev++;
+ count--;
+ scan->kqs_nevent++;
+
if (kn->kn_flags & EV_ONESHOT) {
splx(s);
kn->kn_fop->f_detach(kn);
@@ -983,7 +1001,6 @@ retry:
knote_release(kn);
}
kqueue_check(kq);
- count--;
if (nkev == KQ_NEVENTS) {
splx(s);
#ifdef KTRACE
@@ -1000,8 +1017,7 @@ retry:
break;
}
}
- TAILQ_REMOVE(&kq->kq_head, &mend, kn_tqe);
- TAILQ_REMOVE(&kq->kq_head, &mstart, kn_tqe);
+ TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe);
splx(s);
done:
if (nkev != 0) {
@@ -1016,6 +1032,36 @@ done:
return (error);
}
+void
+kqueue_scan_setup(struct kqueue_scan_state *scan, struct kqueue *kq)
+{
+ memset(scan, 0, sizeof(*scan));
+ scan->kqs_kq = kq;
+ scan->kqs_start.kn_filter = EVFILT_MARKER;
+ scan->kqs_start.kn_status = KN_PROCESSING;
+ scan->kqs_end.kn_filter = EVFILT_MARKER;
+ scan->kqs_end.kn_status = KN_PROCESSING;
+}
+
+void
+kqueue_scan_finish(struct kqueue_scan_state *scan)
+{
+ struct kqueue *kq = scan->kqs_kq;
+ int s;
+
+ KASSERT(scan->kqs_start.kn_filter == EVFILT_MARKER);
+ KASSERT(scan->kqs_start.kn_status == KN_PROCESSING);
+ KASSERT(scan->kqs_end.kn_filter == EVFILT_MARKER);
+ KASSERT(scan->kqs_end.kn_status == KN_PROCESSING);
+
+ if (scan->kqs_queued) {
+ scan->kqs_queued = 0;
+ s = splhigh();
+ TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end, kn_tqe);
+ splx(s);
+ }
+}
+
/*
* XXX
* This could be expanded to call kqueue_scan, if desired.
diff --git a/sys/sys/event.h b/sys/sys/event.h
index 3f42b89aaa7..51487835ced 100644
--- a/sys/sys/event.h
+++ b/sys/sys/event.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: event.h,v 1.36 2020/05/10 00:56:06 guenther Exp $ */
+/* $OpenBSD: event.h,v 1.37 2020/05/17 10:53:14 visa Exp $ */
/*-
* Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
@@ -189,6 +189,15 @@ struct knote {
#define kn_fp kn_ptr.p_fp
};
+struct kqueue_scan_state {
+ struct kqueue *kqs_kq; /* kqueue of this scan */
+ struct knote kqs_start; /* start marker */
+ struct knote kqs_end; /* end marker */
+ int kqs_nevent; /* number of events collected */
+ int kqs_queued; /* if set, end marker is
+ * in queue */
+};
+
struct proc;
extern const struct filterops sig_filtops;
@@ -200,6 +209,8 @@ extern void knote_fdclose(struct proc *p, int fd);
extern void knote_processexit(struct proc *);
extern int kqueue_register(struct kqueue *kq,
struct kevent *kev, struct proc *p);
+extern void kqueue_scan_setup(struct kqueue_scan_state *, struct kqueue *);
+extern void kqueue_scan_finish(struct kqueue_scan_state *);
extern int filt_seltrue(struct knote *kn, long hint);
extern int seltrue_kqfilter(dev_t, struct knote *);
extern void klist_insert(struct klist *, struct knote *);