blob: 734d2cfa0b5d3d8c866fca58abf5abe93a1c3759 [file] [log] [blame]
aliguoria76bab42008-09-22 19:17:18 +00001/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
aliguoria76bab42008-09-22 19:17:18 +000014 */
15
16#include "qemu-common.h"
17#include "block.h"
Blue Swirl72cf2d42009-09-12 07:36:22 +000018#include "qemu-queue.h"
aliguoria76bab42008-09-22 19:17:18 +000019#include "qemu_socket.h"
20
aliguoria76bab42008-09-22 19:17:18 +000021struct AioHandler
22{
23 int fd;
24 IOHandler *io_read;
25 IOHandler *io_write;
26 AioFlushHandler *io_flush;
27 int deleted;
28 void *opaque;
Blue Swirl72cf2d42009-09-12 07:36:22 +000029 QLIST_ENTRY(AioHandler) node;
aliguoria76bab42008-09-22 19:17:18 +000030};
31
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020032static AioHandler *find_aio_handler(AioContext *ctx, int fd)
aliguoria76bab42008-09-22 19:17:18 +000033{
34 AioHandler *node;
35
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020036 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
aliguoria76bab42008-09-22 19:17:18 +000037 if (node->fd == fd)
Alexander Graf79d5ca52009-05-06 02:58:48 +020038 if (!node->deleted)
39 return node;
aliguoria76bab42008-09-22 19:17:18 +000040 }
41
42 return NULL;
43}
44
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020045void aio_set_fd_handler(AioContext *ctx,
46 int fd,
47 IOHandler *io_read,
48 IOHandler *io_write,
49 AioFlushHandler *io_flush,
50 void *opaque)
aliguoria76bab42008-09-22 19:17:18 +000051{
52 AioHandler *node;
53
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020054 node = find_aio_handler(ctx, fd);
aliguoria76bab42008-09-22 19:17:18 +000055
56 /* Are we deleting the fd handler? */
57 if (!io_read && !io_write) {
58 if (node) {
59 /* If the lock is held, just mark the node as deleted */
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020060 if (ctx->walking_handlers)
aliguoria76bab42008-09-22 19:17:18 +000061 node->deleted = 1;
62 else {
63 /* Otherwise, delete it for real. We can't just mark it as
64 * deleted because deleted nodes are only cleaned up after
65 * releasing the walking_handlers lock.
66 */
Blue Swirl72cf2d42009-09-12 07:36:22 +000067 QLIST_REMOVE(node, node);
Anthony Liguori7267c092011-08-20 22:09:37 -050068 g_free(node);
aliguoria76bab42008-09-22 19:17:18 +000069 }
70 }
71 } else {
72 if (node == NULL) {
73 /* Alloc and insert if it's not already there */
Anthony Liguori7267c092011-08-20 22:09:37 -050074 node = g_malloc0(sizeof(AioHandler));
aliguoria76bab42008-09-22 19:17:18 +000075 node->fd = fd;
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020076 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
aliguoria76bab42008-09-22 19:17:18 +000077 }
78 /* Update handler with latest information */
79 node->io_read = io_read;
80 node->io_write = io_write;
81 node->io_flush = io_flush;
82 node->opaque = opaque;
83 }
aliguoria76bab42008-09-22 19:17:18 +000084}
85
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020086void aio_set_event_notifier(AioContext *ctx,
87 EventNotifier *notifier,
88 EventNotifierHandler *io_read,
89 AioFlushEventNotifierHandler *io_flush)
Paolo Bonzini9958c352012-06-09 03:44:00 +020090{
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020091 aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
92 (IOHandler *)io_read, NULL,
93 (AioFlushHandler *)io_flush, notifier);
Paolo Bonzini9958c352012-06-09 03:44:00 +020094}
95
Paolo Bonzinia915f4b2012-09-13 12:28:51 +020096bool aio_wait(AioContext *ctx)
aliguoria76bab42008-09-22 19:17:18 +000097{
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +020098 AioHandler *node;
99 fd_set rdfds, wrfds;
100 int max_fd = -1;
aliguoria76bab42008-09-22 19:17:18 +0000101 int ret;
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200102 bool busy;
aliguoria76bab42008-09-22 19:17:18 +0000103
Kevin Wolf8febfa22009-10-22 17:54:36 +0200104 /*
105 * If there are callbacks left that have been queued, we need to call then.
Paolo Bonzinibcdc1852012-04-12 14:00:55 +0200106 * Do not call select in this case, because it is possible that the caller
107 * does not need a complete flush (as is the case for qemu_aio_wait loops).
Kevin Wolf8febfa22009-10-22 17:54:36 +0200108 */
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200109 if (aio_bh_poll(ctx)) {
Paolo Bonzinibcdc1852012-04-12 14:00:55 +0200110 return true;
Paolo Bonzinibafbd6a2012-04-12 14:00:54 +0200111 }
Kevin Wolf8febfa22009-10-22 17:54:36 +0200112
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200113 ctx->walking_handlers++;
aliguoria76bab42008-09-22 19:17:18 +0000114
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200115 FD_ZERO(&rdfds);
116 FD_ZERO(&wrfds);
117
118 /* fill fd sets */
119 busy = false;
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200120 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200121 /* If there aren't pending AIO operations, don't invoke callbacks.
122 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
123 * wait indefinitely.
124 */
Paolo Bonzini4231c882012-09-26 15:21:36 +0200125 if (!node->deleted && node->io_flush) {
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200126 if (node->io_flush(node->opaque) == 0) {
127 continue;
128 }
129 busy = true;
130 }
131 if (!node->deleted && node->io_read) {
132 FD_SET(node->fd, &rdfds);
133 max_fd = MAX(max_fd, node->fd + 1);
134 }
135 if (!node->deleted && node->io_write) {
136 FD_SET(node->fd, &wrfds);
137 max_fd = MAX(max_fd, node->fd + 1);
138 }
139 }
140
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200141 ctx->walking_handlers--;
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200142
143 /* No AIO operations? Get us out of here */
144 if (!busy) {
145 return false;
146 }
147
148 /* wait until next event */
149 ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
150
151 /* if we have any readable fds, dispatch event */
152 if (ret > 0) {
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200153 /* we have to walk very carefully in case
154 * qemu_aio_set_fd_handler is called while we're walking */
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200155 node = QLIST_FIRST(&ctx->aio_handlers);
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200156 while (node) {
157 AioHandler *tmp;
aliguorif71903d2008-10-12 21:19:57 +0000158
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200159 ctx->walking_handlers++;
Paolo Bonzini2db2bfc2012-09-27 19:27:43 +0530160
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200161 if (!node->deleted &&
162 FD_ISSET(node->fd, &rdfds) &&
163 node->io_read) {
164 node->io_read(node->opaque);
Paolo Bonzinibcdc1852012-04-12 14:00:55 +0200165 }
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200166 if (!node->deleted &&
167 FD_ISSET(node->fd, &wrfds) &&
168 node->io_write) {
169 node->io_write(node->opaque);
aliguoria76bab42008-09-22 19:17:18 +0000170 }
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200171
172 tmp = node;
173 node = QLIST_NEXT(node, node);
174
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200175 ctx->walking_handlers--;
Paolo Bonzini2db2bfc2012-09-27 19:27:43 +0530176
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200177 if (!ctx->walking_handlers && tmp->deleted) {
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200178 QLIST_REMOVE(tmp, node);
179 g_free(tmp);
aliguoria76bab42008-09-22 19:17:18 +0000180 }
181 }
Paolo Bonzini9eb0bfc2012-04-12 14:00:56 +0200182 }
Paolo Bonzinibcdc1852012-04-12 14:00:55 +0200183
184 return true;
aliguoria76bab42008-09-22 19:17:18 +0000185}