aboutsummaryrefslogtreecommitdiff
path: root/aio-posix.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-07-09 11:53:05 +0200
committerStefan Hajnoczi <stefanha@redhat.com>2014-08-29 10:46:58 +0100
commite4c7e2d12d7b1c4501ab3397218a206d4953e633 (patch)
treee6953b841050e558d58554c2db9b066869322209 /aio-posix.c
parent3672fa50837c1700deb1f86f0068c22c7e49aa22 (diff)
AioContext: export and use aio_dispatch
So far, aio_poll's scheme was dispatch/poll/dispatch, where the first dispatch phase was used only in the GSource case in order to avoid a blocking poll. Earlier patches changed it to dispatch/prepare/poll/dispatch, where prepare is aio_compute_timeout. By making aio_dispatch public, we can remove the first dispatch phase altogether, so that both aio_poll and the GSource use the same prepare/poll/dispatch scheme. This patch breaks the invariant that aio_poll(..., true) will not block the first time it returns false. This used to be fundamental for qemu_aio_flush's implementation as "while (qemu_aio_wait()) {}" but no code in QEMU relies on this invariant anymore. The return value of aio_poll() is now comparable with that of g_main_context_iteration. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'aio-posix.c')
-rw-r--r--aio-posix.c55
1 files changed, 13 insertions, 42 deletions
diff --git a/aio-posix.c b/aio-posix.c
index 798a3ff532..0936b4ff9d 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -119,12 +119,21 @@ bool aio_pending(AioContext *ctx)
return false;
}
-static bool aio_dispatch(AioContext *ctx)
+bool aio_dispatch(AioContext *ctx)
{
AioHandler *node;
bool progress = false;
/*
+ * If there are callbacks left that have been queued, we need to call them.
+ * Do not call select in this case, because it is possible that the caller
+ * does not need a complete flush (as is the case for aio_poll loops).
+ */
+ if (aio_bh_poll(ctx)) {
+ progress = true;
+ }
+
+ /*
* We have to walk very carefully in case aio_set_fd_handler is
* called while we're walking.
*/
@@ -184,22 +193,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* aio_notify can avoid the expensive event_notifier_set if
* everything (file descriptors, bottom halves, timers) will
- * be re-evaluated before the next blocking poll(). This happens
- * in two cases:
- *
- * 1) when aio_poll is called with blocking == false
- *
- * 2) when we are called after poll(). If we are called before
- * poll(), bottom halves will not be re-evaluated and we need
- * aio_notify() if blocking == true.
- *
- * The first aio_dispatch() only does something when AioContext is
- * running as a GSource, and in that case aio_poll is used only
- * with blocking == false, so this optimization is already quite
- * effective. However, the code is ugly and should be restructured
- * to have a single aio_dispatch() call. To do this, we need to
- * reorganize aio_poll into a prepare/poll/dispatch model like
- * glib's.
+ * be re-evaluated before the next blocking poll(). This is
+ * already true when aio_poll is called with blocking == false;
+ * if blocking == true, it is only true after poll() returns.
*
* If we're in a nested event loop, ctx->dispatching might be true.
* In that case we can restore it just before returning, but we
@@ -207,26 +203,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
*/
aio_set_dispatching(ctx, !blocking);
- /*
- * If there are callbacks left that have been queued, we need to call them.
- * Do not call select in this case, because it is possible that the caller
- * does not need a complete flush (as is the case for aio_poll loops).
- */
- if (aio_bh_poll(ctx)) {
- blocking = false;
- progress = true;
- }
-
- /* Re-evaluate condition (1) above. */
- aio_set_dispatching(ctx, !blocking);
- if (aio_dispatch(ctx)) {
- progress = true;
- }
-
- if (progress && !blocking) {
- goto out;
- }
-
ctx->walking_handlers++;
g_array_set_size(ctx->pollfds, 0);
@@ -264,15 +240,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* Run dispatch even if there were no readable fds to run timers */
aio_set_dispatching(ctx, true);
- if (aio_bh_poll(ctx)) {
- progress = true;
- }
-
if (aio_dispatch(ctx)) {
progress = true;
}
-out:
aio_set_dispatching(ctx, was_dispatching);
return progress;
}