Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM Corp., 2008 |
| 5 | * Copyright Red Hat Inc., 2012 |
| 6 | * |
| 7 | * Authors: |
| 8 | * Anthony Liguori <aliguori@us.ibm.com> |
| 9 | * Paolo Bonzini <pbonzini@redhat.com> |
| 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 12 | * the COPYING file in the top-level directory. |
| 13 | * |
| 14 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 15 | * GNU GPL, version 2 or (at your option) any later version. |
| 16 | */ |
| 17 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 18 | #include "qemu/osdep.h" |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 19 | #include "qemu-common.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 20 | #include "block/block.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 21 | #include "qemu/queue.h" |
| 22 | #include "qemu/sockets.h" |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 23 | #include "qapi/error.h" |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 24 | #include "qemu/rcu_queue.h" |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 25 | |
| 26 | struct AioHandler { |
| 27 | EventNotifier *e; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 28 | IOHandler *io_read; |
| 29 | IOHandler *io_write; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 30 | EventNotifierHandler *io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 31 | GPollFD pfd; |
| 32 | int deleted; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 33 | void *opaque; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 34 | bool is_external; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 35 | QLIST_ENTRY(AioHandler) node; |
| 36 | }; |
| 37 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 38 | void aio_set_fd_handler(AioContext *ctx, |
| 39 | int fd, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 40 | bool is_external, |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 41 | IOHandler *io_read, |
| 42 | IOHandler *io_write, |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 43 | AioPollFn *io_poll, |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 44 | void *opaque) |
| 45 | { |
| 46 | /* fd is a SOCKET in our case */ |
| 47 | AioHandler *node; |
| 48 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 49 | qemu_lockcnt_lock(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 50 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 51 | if (node->pfd.fd == fd && !node->deleted) { |
| 52 | break; |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | /* Are we deleting the fd handler? */ |
| 57 | if (!io_read && !io_write) { |
| 58 | if (node) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 59 | /* If aio_poll is in progress, just mark the node as deleted */ |
| 60 | if (qemu_lockcnt_count(&ctx->list_lock)) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 61 | node->deleted = 1; |
| 62 | node->pfd.revents = 0; |
| 63 | } else { |
| 64 | /* Otherwise, delete it for real. We can't just mark it as |
| 65 | * deleted because deleted nodes are only cleaned up after |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 66 | * releasing the list_lock. |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 67 | */ |
| 68 | QLIST_REMOVE(node, node); |
| 69 | g_free(node); |
| 70 | } |
| 71 | } |
| 72 | } else { |
| 73 | HANDLE event; |
| 74 | |
| 75 | if (node == NULL) { |
| 76 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 77 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 78 | node->pfd.fd = fd; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 79 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | node->pfd.events = 0; |
| 83 | if (node->io_read) { |
| 84 | node->pfd.events |= G_IO_IN; |
| 85 | } |
| 86 | if (node->io_write) { |
| 87 | node->pfd.events |= G_IO_OUT; |
| 88 | } |
| 89 | |
| 90 | node->e = &ctx->notifier; |
| 91 | |
| 92 | /* Update handler with latest information */ |
| 93 | node->opaque = opaque; |
| 94 | node->io_read = io_read; |
| 95 | node->io_write = io_write; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 96 | node->is_external = is_external; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 97 | |
| 98 | event = event_notifier_get_handle(&ctx->notifier); |
| 99 | WSAEventSelect(node->pfd.fd, event, |
| 100 | FD_READ | FD_ACCEPT | FD_CLOSE | |
| 101 | FD_CONNECT | FD_WRITE | FD_OOB); |
| 102 | } |
| 103 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 104 | qemu_lockcnt_unlock(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 105 | aio_notify(ctx); |
| 106 | } |
| 107 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 108 | void aio_set_fd_poll(AioContext *ctx, int fd, |
| 109 | IOHandler *io_poll_begin, |
| 110 | IOHandler *io_poll_end) |
| 111 | { |
| 112 | /* Not implemented */ |
| 113 | } |
| 114 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 115 | void aio_set_event_notifier(AioContext *ctx, |
| 116 | EventNotifier *e, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 117 | bool is_external, |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 118 | EventNotifierHandler *io_notify, |
| 119 | AioPollFn *io_poll) |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 120 | { |
| 121 | AioHandler *node; |
| 122 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 123 | qemu_lockcnt_lock(&ctx->list_lock); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 124 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 125 | if (node->e == e && !node->deleted) { |
| 126 | break; |
| 127 | } |
| 128 | } |
| 129 | |
| 130 | /* Are we deleting the fd handler? */ |
| 131 | if (!io_notify) { |
| 132 | if (node) { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 133 | g_source_remove_poll(&ctx->source, &node->pfd); |
| 134 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 135 | /* aio_poll is in progress, just mark the node as deleted */ |
| 136 | if (qemu_lockcnt_count(&ctx->list_lock)) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 137 | node->deleted = 1; |
| 138 | node->pfd.revents = 0; |
| 139 | } else { |
| 140 | /* Otherwise, delete it for real. We can't just mark it as |
| 141 | * deleted because deleted nodes are only cleaned up after |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 142 | * releasing the list_lock. |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 143 | */ |
| 144 | QLIST_REMOVE(node, node); |
| 145 | g_free(node); |
| 146 | } |
| 147 | } |
| 148 | } else { |
| 149 | if (node == NULL) { |
| 150 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 151 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 152 | node->e = e; |
| 153 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); |
| 154 | node->pfd.events = G_IO_IN; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 155 | node->is_external = is_external; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 156 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 157 | |
| 158 | g_source_add_poll(&ctx->source, &node->pfd); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 159 | } |
| 160 | /* Update handler with latest information */ |
| 161 | node->io_notify = io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 162 | } |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 163 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 164 | qemu_lockcnt_unlock(&ctx->list_lock); |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 165 | aio_notify(ctx); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 166 | } |
| 167 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 168 | void aio_set_event_notifier_poll(AioContext *ctx, |
| 169 | EventNotifier *notifier, |
| 170 | EventNotifierHandler *io_poll_begin, |
| 171 | EventNotifierHandler *io_poll_end) |
| 172 | { |
| 173 | /* Not implemented */ |
| 174 | } |
| 175 | |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 176 | bool aio_prepare(AioContext *ctx) |
| 177 | { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 178 | static struct timeval tv0; |
| 179 | AioHandler *node; |
| 180 | bool have_select_revents = false; |
| 181 | fd_set rfds, wfds; |
| 182 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 183 | /* |
| 184 | * We have to walk very carefully in case aio_set_fd_handler is |
| 185 | * called while we're walking. |
| 186 | */ |
| 187 | qemu_lockcnt_inc(&ctx->list_lock); |
| 188 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 189 | /* fill fd sets */ |
| 190 | FD_ZERO(&rfds); |
| 191 | FD_ZERO(&wfds); |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 192 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 193 | if (node->io_read) { |
| 194 | FD_SET ((SOCKET)node->pfd.fd, &rfds); |
| 195 | } |
| 196 | if (node->io_write) { |
| 197 | FD_SET ((SOCKET)node->pfd.fd, &wfds); |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 202 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 203 | node->pfd.revents = 0; |
| 204 | if (FD_ISSET(node->pfd.fd, &rfds)) { |
| 205 | node->pfd.revents |= G_IO_IN; |
| 206 | have_select_revents = true; |
| 207 | } |
| 208 | |
| 209 | if (FD_ISSET(node->pfd.fd, &wfds)) { |
| 210 | node->pfd.revents |= G_IO_OUT; |
| 211 | have_select_revents = true; |
| 212 | } |
| 213 | } |
| 214 | } |
| 215 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 216 | qemu_lockcnt_dec(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 217 | return have_select_revents; |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 218 | } |
| 219 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 220 | bool aio_pending(AioContext *ctx) |
| 221 | { |
| 222 | AioHandler *node; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 223 | bool result = false; |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 224 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 225 | /* |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 226 | * We have to walk very carefully in case aio_set_fd_handler is |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 227 | * called while we're walking. |
| 228 | */ |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 229 | qemu_lockcnt_inc(&ctx->list_lock); |
| 230 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
| 231 | if (node->pfd.revents && node->io_notify) { |
| 232 | result = true; |
| 233 | break; |
| 234 | } |
| 235 | |
| 236 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { |
| 237 | result = true; |
| 238 | break; |
| 239 | } |
| 240 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { |
| 241 | result = true; |
| 242 | break; |
| 243 | } |
| 244 | } |
| 245 | |
| 246 | qemu_lockcnt_dec(&ctx->list_lock); |
| 247 | return result; |
| 248 | } |
| 249 | |
| 250 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
| 251 | { |
| 252 | AioHandler *node; |
| 253 | bool progress = false; |
| 254 | AioHandler *tmp; |
| 255 | |
| 256 | qemu_lockcnt_inc(&ctx->list_lock); |
| 257 | |
| 258 | /* |
| 259 | * We have to walk very carefully in case aio_set_fd_handler is |
| 260 | * called while we're walking. |
| 261 | */ |
| 262 | QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 263 | int revents = node->pfd.revents; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 264 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 265 | if (!node->deleted && |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 266 | (revents || event_notifier_get_handle(node->e) == event) && |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 267 | node->io_notify) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 268 | node->pfd.revents = 0; |
| 269 | node->io_notify(node->e); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 270 | |
| 271 | /* aio_notify() does not count as progress */ |
Stefan Hajnoczi | 8b2d42d | 2013-08-22 15:28:35 +0200 | [diff] [blame] | 272 | if (node->e != &ctx->notifier) { |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 273 | progress = true; |
| 274 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 275 | } |
| 276 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 277 | if (!node->deleted && |
| 278 | (node->io_read || node->io_write)) { |
| 279 | node->pfd.revents = 0; |
| 280 | if ((revents & G_IO_IN) && node->io_read) { |
| 281 | node->io_read(node->opaque); |
| 282 | progress = true; |
| 283 | } |
| 284 | if ((revents & G_IO_OUT) && node->io_write) { |
| 285 | node->io_write(node->opaque); |
| 286 | progress = true; |
| 287 | } |
| 288 | |
| 289 | /* if the next select() will return an event, we have progressed */ |
| 290 | if (event == event_notifier_get_handle(&ctx->notifier)) { |
| 291 | WSANETWORKEVENTS ev; |
| 292 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); |
| 293 | if (ev.lNetworkEvents) { |
| 294 | progress = true; |
| 295 | } |
| 296 | } |
| 297 | } |
| 298 | |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 299 | if (node->deleted) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 300 | if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 301 | QLIST_REMOVE(node, node); |
| 302 | g_free(node); |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 303 | qemu_lockcnt_inc_and_unlock(&ctx->list_lock); |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 304 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 305 | } |
| 306 | } |
| 307 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 308 | qemu_lockcnt_dec(&ctx->list_lock); |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 309 | return progress; |
| 310 | } |
| 311 | |
Stefan Hajnoczi | 721671a | 2016-12-01 19:26:40 +0000 | [diff] [blame] | 312 | bool aio_dispatch(AioContext *ctx, bool dispatch_fds) |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 313 | { |
| 314 | bool progress; |
| 315 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 316 | progress = aio_bh_poll(ctx); |
Stefan Hajnoczi | 721671a | 2016-12-01 19:26:40 +0000 | [diff] [blame] | 317 | if (dispatch_fds) { |
| 318 | progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); |
| 319 | } |
Paolo Bonzini | d397ec99 | 2014-07-09 11:53:02 +0200 | [diff] [blame] | 320 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 321 | return progress; |
| 322 | } |
| 323 | |
| 324 | bool aio_poll(AioContext *ctx, bool blocking) |
| 325 | { |
| 326 | AioHandler *node; |
| 327 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 328 | bool progress, have_select_revents, first; |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 329 | int count; |
| 330 | int timeout; |
| 331 | |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 332 | aio_context_acquire(ctx); |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 333 | progress = false; |
| 334 | |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 335 | /* aio_notify can avoid the expensive event_notifier_set if |
| 336 | * everything (file descriptors, bottom halves, timers) will |
| 337 | * be re-evaluated before the next blocking poll(). This is |
| 338 | * already true when aio_poll is called with blocking == false; |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 339 | * if blocking == true, it is only true after poll() returns, |
| 340 | * so disable the optimization now. |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 341 | */ |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 342 | if (blocking) { |
| 343 | atomic_add(&ctx->notify_me, 2); |
| 344 | } |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 345 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 346 | qemu_lockcnt_inc(&ctx->list_lock); |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 347 | have_select_revents = aio_prepare(ctx); |
| 348 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 349 | /* fill fd sets */ |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 350 | count = 0; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 351 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 352 | if (!node->deleted && node->io_notify |
| 353 | && aio_node_check(ctx, node->is_external)) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 354 | events[count++] = event_notifier_get_handle(node->e); |
| 355 | } |
| 356 | } |
| 357 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 358 | qemu_lockcnt_dec(&ctx->list_lock); |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 359 | first = true; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 360 | |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 361 | /* ctx->notifier is always registered. */ |
| 362 | assert(count > 0); |
| 363 | |
| 364 | /* Multiple iterations, all of them non-blocking except the first, |
| 365 | * may be necessary to process all pending events. After the first |
| 366 | * WaitForMultipleObjects call ctx->notify_me will be decremented. |
| 367 | */ |
| 368 | do { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 369 | HANDLE event; |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 370 | int ret; |
| 371 | |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 372 | timeout = blocking && !have_select_revents |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 373 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 374 | if (timeout) { |
| 375 | aio_context_release(ctx); |
| 376 | } |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 377 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 378 | if (blocking) { |
| 379 | assert(first); |
| 380 | atomic_sub(&ctx->notify_me, 2); |
| 381 | } |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 382 | if (timeout) { |
| 383 | aio_context_acquire(ctx); |
| 384 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 385 | |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 386 | if (first) { |
Paolo Bonzini | 05e514b | 2015-07-21 16:07:53 +0200 | [diff] [blame] | 387 | aio_notify_accept(ctx); |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 388 | progress |= aio_bh_poll(ctx); |
| 389 | first = false; |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 390 | } |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 391 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 392 | /* if we have any signaled events, dispatch event */ |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 393 | event = NULL; |
| 394 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { |
| 395 | event = events[ret - WAIT_OBJECT_0]; |
Paolo Bonzini | a90d411 | 2014-09-15 14:52:58 +0200 | [diff] [blame] | 396 | events[ret - WAIT_OBJECT_0] = events[--count]; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 397 | } else if (!have_select_revents) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 398 | break; |
| 399 | } |
| 400 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 401 | have_select_revents = false; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 402 | blocking = false; |
| 403 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 404 | progress |= aio_dispatch_handlers(ctx, event); |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 405 | } while (count > 0); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 406 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 407 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 408 | |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 409 | aio_context_release(ctx); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 410 | return progress; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 411 | } |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 412 | |
Cao jin | 7e00346 | 2016-07-15 18:28:44 +0800 | [diff] [blame] | 413 | void aio_context_setup(AioContext *ctx) |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 414 | { |
| 415 | } |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 416 | |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 417 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
| 418 | int64_t grow, int64_t shrink, Error **errp) |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 419 | { |
| 420 | error_setg(errp, "AioContext polling is not implemented on Windows"); |
| 421 | } |