aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2012-06-09 04:01:51 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2012-10-30 09:30:53 +0100
commitf42b22077bc63a482d7a8755b54e33475ab78541 (patch)
tree33f248df458d15ca3a816be0f2ef24ffa8af17a7
parentcd9ba1ebcf0439457f22b75b38533f6634f23c5f (diff)
aio: add Win32 implementation
The Win32 implementation will only accept EventNotifiers, thus a few drivers are disabled under Windows. EventNotifiers are a good match for the GSource implementation, too, because the Win32 port of glib allows to place their HANDLEs in a GPollFD. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--Makefile.objs6
-rw-r--r--aio-posix.c (renamed from aio.c)0
-rw-r--r--aio-win32.c209
-rw-r--r--block/Makefile.objs6
-rw-r--r--main-loop.c2
5 files changed, 218 insertions, 5 deletions
diff --git a/Makefile.objs b/Makefile.objs
index 98046fc64a..a8ade04c02 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -42,12 +42,12 @@ coroutine-obj-$(CONFIG_WIN32) += coroutine-win32.o
# block-obj-y is code used by both qemu system emulation and qemu-img
block-obj-y = iov.o cache-utils.o qemu-option.o module.o async.o
-block-obj-y += nbd.o block.o blockjob.o aio.o aes.o qemu-config.o
+block-obj-y += nbd.o block.o blockjob.o aes.o qemu-config.o
block-obj-y += qemu-progress.o qemu-sockets.o uri.o notify.o
block-obj-y += $(coroutine-obj-y) $(qobject-obj-y) $(version-obj-y)
block-obj-$(CONFIG_POSIX) += posix-aio-compat.o
-block-obj-$(CONFIG_POSIX) += event_notifier-posix.o
-block-obj-$(CONFIG_WIN32) += event_notifier-win32.o
+block-obj-$(CONFIG_POSIX) += event_notifier-posix.o aio-posix.o
+block-obj-$(CONFIG_WIN32) += event_notifier-win32.o aio-win32.o
block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
block-obj-y += block/
block-obj-y += $(qapi-obj-y) qapi-types.o qapi-visit.o
diff --git a/aio.c b/aio-posix.c
index 44247224e2..44247224e2 100644
--- a/aio.c
+++ b/aio-posix.c
diff --git a/aio-win32.c b/aio-win32.c
new file mode 100644
index 0000000000..9881fdbca7
--- /dev/null
+++ b/aio-win32.c
@@ -0,0 +1,209 @@
+/*
+ * QEMU aio implementation
+ *
+ * Copyright IBM Corp., 2008
+ * Copyright Red Hat Inc., 2012
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu-common.h"
+#include "block.h"
+#include "qemu-queue.h"
+#include "qemu_socket.h"
+
+struct AioHandler {
+ EventNotifier *e;
+ EventNotifierHandler *io_notify;
+ AioFlushEventNotifierHandler *io_flush;
+ GPollFD pfd;
+ int deleted;
+ QLIST_ENTRY(AioHandler) node;
+};
+
+void aio_set_event_notifier(AioContext *ctx,
+ EventNotifier *e,
+ EventNotifierHandler *io_notify,
+ AioFlushEventNotifierHandler *io_flush)
+{
+ AioHandler *node;
+
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ if (node->e == e && !node->deleted) {
+ break;
+ }
+ }
+
+ /* Are we deleting the fd handler? */
+ if (!io_notify) {
+ if (node) {
+ /* If the lock is held, just mark the node as deleted */
+ if (ctx->walking_handlers) {
+ node->deleted = 1;
+ node->pfd.revents = 0;
+ } else {
+ /* Otherwise, delete it for real. We can't just mark it as
+ * deleted because deleted nodes are only cleaned up after
+ * releasing the walking_handlers lock.
+ */
+ QLIST_REMOVE(node, node);
+ g_free(node);
+ }
+ }
+ } else {
+ if (node == NULL) {
+ /* Alloc and insert if it's not already there */
+ node = g_malloc0(sizeof(AioHandler));
+ node->e = e;
+ node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
+ node->pfd.events = G_IO_IN;
+ QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+ }
+ /* Update handler with latest information */
+ node->io_notify = io_notify;
+ node->io_flush = io_flush;
+ }
+}
+
+bool aio_pending(AioContext *ctx)
+{
+ AioHandler *node;
+
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ if (node->pfd.revents && node->io_notify) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool aio_poll(AioContext *ctx, bool blocking)
+{
+ AioHandler *node;
+ HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
+ bool busy, progress;
+ int count;
+
+ progress = false;
+
+ /*
+ * If there are callbacks left that have been queued, we need to call then.
+ * Do not call select in this case, because it is possible that the caller
+ * does not need a complete flush (as is the case for qemu_aio_wait loops).
+ */
+ if (aio_bh_poll(ctx)) {
+ blocking = false;
+ progress = true;
+ }
+
+ /*
+ * Then dispatch any pending callbacks from the GSource.
+ *
+ * We have to walk very carefully in case qemu_aio_set_fd_handler is
+ * called while we're walking.
+ */
+ node = QLIST_FIRST(&ctx->aio_handlers);
+ while (node) {
+ AioHandler *tmp;
+
+ ctx->walking_handlers++;
+
+ if (node->pfd.revents && node->io_notify) {
+ node->pfd.revents = 0;
+ node->io_notify(node->e);
+ progress = true;
+ }
+
+ tmp = node;
+ node = QLIST_NEXT(node, node);
+
+ ctx->walking_handlers--;
+
+ if (!ctx->walking_handlers && tmp->deleted) {
+ QLIST_REMOVE(tmp, node);
+ g_free(tmp);
+ }
+ }
+
+ if (progress && !blocking) {
+ return true;
+ }
+
+ ctx->walking_handlers++;
+
+ /* fill fd sets */
+ busy = false;
+ count = 0;
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ /* If there aren't pending AIO operations, don't invoke callbacks.
+ * Otherwise, if there are no AIO requests, qemu_aio_wait() would
+ * wait indefinitely.
+ */
+ if (!node->deleted && node->io_flush) {
+ if (node->io_flush(node->e) == 0) {
+ continue;
+ }
+ busy = true;
+ }
+ if (!node->deleted && node->io_notify) {
+ events[count++] = event_notifier_get_handle(node->e);
+ }
+ }
+
+ ctx->walking_handlers--;
+
+ /* No AIO operations? Get us out of here */
+ if (!busy) {
+ return progress;
+ }
+
+ /* wait until next event */
+ for (;;) {
+ int timeout = blocking ? INFINITE : 0;
+ int ret = WaitForMultipleObjects(count, events, FALSE, timeout);
+
+ /* if we have any signaled events, dispatch event */
+ if ((DWORD) (ret - WAIT_OBJECT_0) >= count) {
+ break;
+ }
+
+ blocking = false;
+
+ /* we have to walk very carefully in case
+ * qemu_aio_set_fd_handler is called while we're walking */
+ node = QLIST_FIRST(&ctx->aio_handlers);
+ while (node) {
+ AioHandler *tmp;
+
+ ctx->walking_handlers++;
+
+ if (!node->deleted &&
+ event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
+ node->io_notify) {
+ node->io_notify(node->e);
+ progress = true;
+ }
+
+ tmp = node;
+ node = QLIST_NEXT(node, node);
+
+ ctx->walking_handlers--;
+
+ if (!ctx->walking_handlers && tmp->deleted) {
+ QLIST_REMOVE(tmp, node);
+ g_free(tmp);
+ }
+ }
+ }
+
+ return progress;
+}
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 554f429d05..684765bf63 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -2,13 +2,17 @@ block-obj-y += raw.o cow.o qcow.o vdi.o vmdk.o cloop.o dmg.o bochs.o vpc.o vvfat
block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o
block-obj-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o qed-cluster.o
block-obj-y += qed-check.o
-block-obj-y += parallels.o nbd.o blkdebug.o sheepdog.o blkverify.o
+block-obj-y += parallels.o blkdebug.o blkverify.o
block-obj-$(CONFIG_WIN32) += raw-win32.o
block-obj-$(CONFIG_POSIX) += raw-posix.o
+
+ifeq ($(CONFIG_POSIX),y)
+block-obj-y += nbd.o sheepdog.o
block-obj-$(CONFIG_LIBISCSI) += iscsi.o
block-obj-$(CONFIG_CURL) += curl.o
block-obj-$(CONFIG_RBD) += rbd.o
block-obj-$(CONFIG_GLUSTERFS) += gluster.o
+endif
common-obj-y += stream.o
common-obj-y += commit.o
diff --git a/main-loop.c b/main-loop.c
index 1fdc3bdf2e..a86c275149 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -537,6 +537,7 @@ bool qemu_aio_wait(void)
return aio_poll(qemu_aio_context, true);
}
+#ifdef CONFIG_POSIX
void qemu_aio_set_fd_handler(int fd,
IOHandler *io_read,
IOHandler *io_write,
@@ -549,7 +550,6 @@ void qemu_aio_set_fd_handler(int fd,
qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
}
-#ifdef CONFIG_POSIX
void qemu_aio_set_event_notifier(EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush)