aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--aio.c2
-rw-r--r--async.c30
-rw-r--r--hw/hw.h1
-rw-r--r--iohandler.c1
-rw-r--r--linux-aio.c1
-rw-r--r--main-loop.c18
-rw-r--r--main-loop.h55
-rw-r--r--qemu-aio.h79
-rw-r--r--qemu-char.h1
-rw-r--r--qemu-common.h1
-rw-r--r--qemu-coroutine-lock.c2
11 files changed, 119 insertions, 72 deletions
diff --git a/aio.c b/aio.c
index 44214e1ffc..7e3fe708d2 100644
--- a/aio.c
+++ b/aio.c
@@ -18,8 +18,6 @@
#include "qemu-queue.h"
#include "qemu_socket.h"
-typedef struct AioHandler AioHandler;
-
/* The list of registered AIO handlers */
static QLIST_HEAD(, AioHandler) aio_handlers;
diff --git a/async.c b/async.c
index 85cc6410c5..189ee1beb5 100644
--- a/async.c
+++ b/async.c
@@ -26,9 +26,6 @@
#include "qemu-aio.h"
#include "main-loop.h"
-/* Anchor of the list of Bottom Halves belonging to the context */
-static struct QEMUBH *first_bh;
-
/***********************************************************/
/* bottom halves (can be seen as timers which expire ASAP) */
@@ -41,27 +38,26 @@ struct QEMUBH {
bool deleted;
};
-QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
+QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
{
QEMUBH *bh;
bh = g_malloc0(sizeof(QEMUBH));
bh->cb = cb;
bh->opaque = opaque;
- bh->next = first_bh;
- first_bh = bh;
+ bh->next = ctx->first_bh;
+ ctx->first_bh = bh;
return bh;
}
-int qemu_bh_poll(void)
+int aio_bh_poll(AioContext *ctx)
{
QEMUBH *bh, **bhp, *next;
int ret;
- static int nesting = 0;
- nesting++;
+ ctx->walking_bh++;
ret = 0;
- for (bh = first_bh; bh; bh = next) {
+ for (bh = ctx->first_bh; bh; bh = next) {
next = bh->next;
if (!bh->deleted && bh->scheduled) {
bh->scheduled = 0;
@@ -72,11 +68,11 @@ int qemu_bh_poll(void)
}
}
- nesting--;
+ ctx->walking_bh--;
/* remove deleted bhs */
- if (!nesting) {
- bhp = &first_bh;
+ if (!ctx->walking_bh) {
+ bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
if (bh->deleted) {
@@ -120,11 +116,11 @@ void qemu_bh_delete(QEMUBH *bh)
bh->deleted = 1;
}
-void qemu_bh_update_timeout(uint32_t *timeout)
+void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout)
{
QEMUBH *bh;
- for (bh = first_bh; bh; bh = bh->next) {
+ for (bh = ctx->first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
@@ -140,3 +136,7 @@ void qemu_bh_update_timeout(uint32_t *timeout)
}
}
+AioContext *aio_context_new(void)
+{
+ return g_new0(AioContext, 1);
+}
diff --git a/hw/hw.h b/hw/hw.h
index b337ee3042..f530f6f41a 100644
--- a/hw/hw.h
+++ b/hw/hw.h
@@ -10,6 +10,7 @@
#include "ioport.h"
#include "irq.h"
+#include "qemu-aio.h"
#include "qemu-file.h"
#include "vmstate.h"
#include "qemu-log.h"
diff --git a/iohandler.c b/iohandler.c
index a2d871bb91..60460a6f88 100644
--- a/iohandler.c
+++ b/iohandler.c
@@ -26,6 +26,7 @@
#include "qemu-common.h"
#include "qemu-char.h"
#include "qemu-queue.h"
+#include "qemu-aio.h"
#include "main-loop.h"
#ifndef _WIN32
diff --git a/linux-aio.c b/linux-aio.c
index ce9b5d4be8..f3d8ef33ca 100644
--- a/linux-aio.c
+++ b/linux-aio.c
@@ -9,6 +9,7 @@
*/
#include "qemu-common.h"
#include "qemu-aio.h"
+#include "qemu-queue.h"
#include "block/raw-posix-aio.h"
#include <sys/eventfd.h>
diff --git a/main-loop.c b/main-loop.c
index baefe413d1..40fdbd3770 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -26,6 +26,7 @@
#include "qemu-timer.h"
#include "slirp/slirp.h"
#include "main-loop.h"
+#include "qemu-aio.h"
#ifndef _WIN32
@@ -199,6 +200,8 @@ static int qemu_signal_init(void)
}
#endif
+static AioContext *qemu_aio_context;
+
int qemu_init_main_loop(void)
{
int ret;
@@ -218,6 +221,7 @@ int qemu_init_main_loop(void)
return ret;
}
+ qemu_aio_context = aio_context_new();
return 0;
}
@@ -481,7 +485,7 @@ int main_loop_wait(int nonblocking)
if (nonblocking) {
timeout = 0;
} else {
- qemu_bh_update_timeout(&timeout);
+ aio_bh_update_timeout(qemu_aio_context, &timeout);
}
/* poll any events */
@@ -510,3 +514,15 @@ int main_loop_wait(int nonblocking)
return ret;
}
+
+/* Functions to operate on the main QEMU AioContext. */
+
+QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
+{
+ return aio_bh_new(qemu_aio_context, cb, opaque);
+}
+
+int qemu_bh_poll(void)
+{
+ return aio_bh_poll(qemu_aio_context);
+}
diff --git a/main-loop.h b/main-loop.h
index 91a0aff9c7..1d1a56b858 100644
--- a/main-loop.h
+++ b/main-loop.h
@@ -25,6 +25,8 @@
#ifndef QEMU_MAIN_LOOP_H
#define QEMU_MAIN_LOOP_H 1
+#include "qemu-aio.h"
+
#define SIG_IPI SIGUSR1
/**
@@ -163,7 +165,6 @@ void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque);
typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size);
typedef int IOCanReadHandler(void *opaque);
-typedef void IOHandler(void *opaque);
/**
* qemu_set_fd_handler2: Register a file descriptor with the main loop
@@ -244,56 +245,6 @@ int qemu_set_fd_handler(int fd,
IOHandler *fd_write,
void *opaque);
-typedef struct QEMUBH QEMUBH;
-typedef void QEMUBHFunc(void *opaque);
-
-/**
- * qemu_bh_new: Allocate a new bottom half structure.
- *
- * Bottom halves are lightweight callbacks whose invocation is guaranteed
- * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
- * is opaque and must be allocated prior to its use.
- */
-QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
-
-/**
- * qemu_bh_schedule: Schedule a bottom half.
- *
- * Scheduling a bottom half interrupts the main loop and causes the
- * execution of the callback that was passed to qemu_bh_new.
- *
- * Bottom halves that are scheduled from a bottom half handler are instantly
- * invoked. This can create an infinite loop if a bottom half handler
- * schedules itself.
- *
- * @bh: The bottom half to be scheduled.
- */
-void qemu_bh_schedule(QEMUBH *bh);
-
-/**
- * qemu_bh_cancel: Cancel execution of a bottom half.
- *
- * Canceling execution of a bottom half undoes the effect of calls to
- * qemu_bh_schedule without freeing its resources yet. While cancellation
- * itself is also wait-free and thread-safe, it can of course race with the
- * loop that executes bottom halves unless you are holding the iothread
- * mutex. This makes it mostly useless if you are not holding the mutex.
- *
- * @bh: The bottom half to be canceled.
- */
-void qemu_bh_cancel(QEMUBH *bh);
-
-/**
- *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
- *
- * Deleting a bottom half frees the memory that was allocated for it by
- * qemu_bh_new. It also implies canceling the bottom half if it was
- * scheduled.
- *
- * @bh: The bottom half to be deleted.
- */
-void qemu_bh_delete(QEMUBH *bh);
-
#ifdef CONFIG_POSIX
/**
* qemu_add_child_watch: Register a child process for reaping.
@@ -349,8 +300,8 @@ void qemu_fd_register(int fd);
void qemu_iohandler_fill(int *pnfds, fd_set *readfds, fd_set *writefds, fd_set *xfds);
void qemu_iohandler_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds, int rc);
+QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
void qemu_bh_schedule_idle(QEMUBH *bh);
int qemu_bh_poll(void);
-void qemu_bh_update_timeout(uint32_t *timeout);
#endif
diff --git a/qemu-aio.h b/qemu-aio.h
index dc416a5239..2ed6ad3723 100644
--- a/qemu-aio.h
+++ b/qemu-aio.h
@@ -15,7 +15,6 @@
#define QEMU_AIO_H
#include "qemu-common.h"
-#include "qemu-char.h"
#include "event_notifier.h"
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
@@ -39,9 +38,87 @@ void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque);
void qemu_aio_release(void *p);
+typedef struct AioHandler AioHandler;
+typedef void QEMUBHFunc(void *opaque);
+typedef void IOHandler(void *opaque);
+
+typedef struct AioContext {
+ /* Anchor of the list of Bottom Halves belonging to the context */
+ struct QEMUBH *first_bh;
+
+ /* A simple lock used to protect the first_bh list, and ensure that
+ * no callbacks are removed while we're walking and dispatching callbacks.
+ */
+ int walking_bh;
+} AioContext;
+
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
+/**
+ * aio_context_new: Allocate a new AioContext.
+ *
+ * AioContext provide a mini event-loop that can be waited on synchronously.
+ * They also provide bottom halves, a service to execute a piece of code
+ * as soon as possible.
+ */
+AioContext *aio_context_new(void);
+
+/**
+ * aio_bh_new: Allocate a new bottom half structure.
+ *
+ * Bottom halves are lightweight callbacks whose invocation is guaranteed
+ * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
+ * is opaque and must be allocated prior to its use.
+ */
+QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+
+/**
+ * aio_bh_poll: Poll bottom halves for an AioContext.
+ *
+ * These are internal functions used by the QEMU main loop.
+ */
+int aio_bh_poll(AioContext *ctx);
+void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout);
+
+/**
+ * qemu_bh_schedule: Schedule a bottom half.
+ *
+ * Scheduling a bottom half interrupts the main loop and causes the
+ * execution of the callback that was passed to qemu_bh_new.
+ *
+ * Bottom halves that are scheduled from a bottom half handler are instantly
+ * invoked. This can create an infinite loop if a bottom half handler
+ * schedules itself.
+ *
+ * @bh: The bottom half to be scheduled.
+ */
+void qemu_bh_schedule(QEMUBH *bh);
+
+/**
+ * qemu_bh_cancel: Cancel execution of a bottom half.
+ *
+ * Canceling execution of a bottom half undoes the effect of calls to
+ * qemu_bh_schedule without freeing its resources yet. While cancellation
+ * itself is also wait-free and thread-safe, it can of course race with the
+ * loop that executes bottom halves unless you are holding the iothread
+ * mutex. This makes it mostly useless if you are not holding the mutex.
+ *
+ * @bh: The bottom half to be canceled.
+ */
+void qemu_bh_cancel(QEMUBH *bh);
+
+/**
+ *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
+ *
+ * Deleting a bottom half frees the memory that was allocated for it by
+ * qemu_bh_new. It also implies canceling the bottom half if it was
+ * scheduled.
+ *
+ * @bh: The bottom half to be deleted.
+ */
+void qemu_bh_delete(QEMUBH *bh);
+
/* Flush any pending AIO operation. This function will block until all
* outstanding AIO operations have been completed or cancelled. */
void qemu_aio_flush(void);
diff --git a/qemu-char.h b/qemu-char.h
index 486644b3bd..5087168bd7 100644
--- a/qemu-char.h
+++ b/qemu-char.h
@@ -5,6 +5,7 @@
#include "qemu-queue.h"
#include "qemu-option.h"
#include "qemu-config.h"
+#include "qemu-aio.h"
#include "qobject.h"
#include "qstring.h"
#include "main-loop.h"
diff --git a/qemu-common.h b/qemu-common.h
index 36ce522066..24e13ccb5e 100644
--- a/qemu-common.h
+++ b/qemu-common.h
@@ -14,6 +14,7 @@
typedef struct QEMUTimer QEMUTimer;
typedef struct QEMUFile QEMUFile;
+typedef struct QEMUBH QEMUBH;
typedef struct DeviceState DeviceState;
struct Monitor;
diff --git a/qemu-coroutine-lock.c b/qemu-coroutine-lock.c
index 26ad76bf50..9dda3f86c9 100644
--- a/qemu-coroutine-lock.c
+++ b/qemu-coroutine-lock.c
@@ -26,7 +26,7 @@
#include "qemu-coroutine.h"
#include "qemu-coroutine-int.h"
#include "qemu-queue.h"
-#include "main-loop.h"
+#include "qemu-aio.h"
#include "trace.h"
static QTAILQ_HEAD(, Coroutine) unlock_bh_queue =