blob: 0b675923f611c6142a1e3708b93b0d9099f31e9f [file] [log] [blame]
Peter Maydell681c28a2016-02-08 18:08:51 +00001#include "qemu/osdep.h"
Paolo Bonzini74c856e2012-11-23 16:13:24 +01002#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +01003#include "block/aio.h"
4#include "block/thread-pool.h"
5#include "block/block.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +01006#include "qapi/error.h"
Alex Blighdae21b92013-08-21 16:02:49 +01007#include "qemu/timer.h"
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +03008#include "qemu/error-report.h"
Paolo Bonzini934ebf42017-02-13 14:52:21 +01009#include "qemu/main-loop.h"
Paolo Bonzini74c856e2012-11-23 16:13:24 +010010
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +010011static AioContext *ctx;
12static ThreadPool *pool;
Paolo Bonzini74c856e2012-11-23 16:13:24 +010013static int active;
14
15typedef struct {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020016 BlockAIOCB *aiocb;
Paolo Bonzini74c856e2012-11-23 16:13:24 +010017 int n;
18 int ret;
19} WorkerTestData;
20
21static int worker_cb(void *opaque)
22{
23 WorkerTestData *data = opaque;
Paolo Bonzini5444e762013-05-13 13:29:47 +020024 return atomic_fetch_inc(&data->n);
Paolo Bonzini74c856e2012-11-23 16:13:24 +010025}
26
27static int long_cb(void *opaque)
28{
29 WorkerTestData *data = opaque;
Paolo Bonziniaa0d7ed2019-03-14 19:20:07 +010030 if (atomic_cmpxchg(&data->n, 0, 1) == 0) {
31 g_usleep(2000000);
32 atomic_or(&data->n, 2);
33 }
Paolo Bonzini74c856e2012-11-23 16:13:24 +010034 return 0;
35}
36
37static void done_cb(void *opaque, int ret)
38{
39 WorkerTestData *data = opaque;
Fam Zheng3391f5e2014-09-11 13:41:12 +080040 g_assert(data->ret == -EINPROGRESS || data->ret == -ECANCELED);
Paolo Bonzini74c856e2012-11-23 16:13:24 +010041 data->ret = ret;
42 data->aiocb = NULL;
43
44 /* Callbacks are serialized, so no need to use atomic ops. */
45 active--;
46}
47
Paolo Bonzini74c856e2012-11-23 16:13:24 +010048static void test_submit(void)
49{
50 WorkerTestData data = { .n = 0 };
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +010051 thread_pool_submit(pool, worker_cb, &data);
Stefan Hajnoczi35ecde22013-04-16 17:49:42 +020052 while (data.n == 0) {
53 aio_poll(ctx, true);
54 }
Paolo Bonzini74c856e2012-11-23 16:13:24 +010055 g_assert_cmpint(data.n, ==, 1);
56}
57
58static void test_submit_aio(void)
59{
60 WorkerTestData data = { .n = 0, .ret = -EINPROGRESS };
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +010061 data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data,
62 done_cb, &data);
Paolo Bonzini74c856e2012-11-23 16:13:24 +010063
64 /* The callbacks are not called until after the first wait. */
65 active = 1;
66 g_assert_cmpint(data.ret, ==, -EINPROGRESS);
Stefan Hajnoczi35ecde22013-04-16 17:49:42 +020067 while (data.ret == -EINPROGRESS) {
68 aio_poll(ctx, true);
69 }
Paolo Bonzini74c856e2012-11-23 16:13:24 +010070 g_assert_cmpint(active, ==, 0);
71 g_assert_cmpint(data.n, ==, 1);
72 g_assert_cmpint(data.ret, ==, 0);
73}
74
75static void co_test_cb(void *opaque)
76{
77 WorkerTestData *data = opaque;
78
79 active = 1;
80 data->n = 0;
81 data->ret = -EINPROGRESS;
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +010082 thread_pool_submit_co(pool, worker_cb, data);
Paolo Bonzini74c856e2012-11-23 16:13:24 +010083
84 /* The test continues in test_submit_co, after qemu_coroutine_enter... */
85
86 g_assert_cmpint(data->n, ==, 1);
87 data->ret = 0;
88 active--;
89
Paolo Bonzini87f68d32014-07-07 15:18:02 +020090 /* The test continues in test_submit_co, after aio_poll... */
Paolo Bonzini74c856e2012-11-23 16:13:24 +010091}
92
93static void test_submit_co(void)
94{
95 WorkerTestData data;
Paolo Bonzini0b8b8752016-07-04 19:10:01 +020096 Coroutine *co = qemu_coroutine_create(co_test_cb, &data);
Paolo Bonzini74c856e2012-11-23 16:13:24 +010097
Paolo Bonzini0b8b8752016-07-04 19:10:01 +020098 qemu_coroutine_enter(co);
Paolo Bonzini74c856e2012-11-23 16:13:24 +010099
100 /* Back here once the worker has started. */
101
102 g_assert_cmpint(active, ==, 1);
103 g_assert_cmpint(data.ret, ==, -EINPROGRESS);
104
Paolo Bonzini87f68d32014-07-07 15:18:02 +0200105 /* aio_poll will execute the rest of the coroutine. */
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100106
Stefan Hajnoczi35ecde22013-04-16 17:49:42 +0200107 while (data.ret == -EINPROGRESS) {
108 aio_poll(ctx, true);
109 }
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100110
111 /* Back here after the coroutine has finished. */
112
113 g_assert_cmpint(active, ==, 0);
114 g_assert_cmpint(data.ret, ==, 0);
115}
116
117static void test_submit_many(void)
118{
119 WorkerTestData data[100];
120 int i;
121
122 /* Start more work items than there will be threads. */
123 for (i = 0; i < 100; i++) {
124 data[i].n = 0;
125 data[i].ret = -EINPROGRESS;
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +0100126 thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]);
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100127 }
128
129 active = 100;
130 while (active > 0) {
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +0100131 aio_poll(ctx, true);
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100132 }
133 for (i = 0; i < 100; i++) {
134 g_assert_cmpint(data[i].n, ==, 1);
135 g_assert_cmpint(data[i].ret, ==, 0);
136 }
137}
138
Fam Zheng3391f5e2014-09-11 13:41:12 +0800139static void do_test_cancel(bool sync)
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100140{
141 WorkerTestData data[100];
Paolo Bonzinid60478c2012-11-27 09:51:48 +0100142 int num_canceled;
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100143 int i;
144
145 /* Start more work items than there will be threads, to ensure
146 * the pool is full.
147 */
148 test_submit_many();
149
150 /* Start long running jobs, to ensure we can cancel some. */
151 for (i = 0; i < 100; i++) {
152 data[i].n = 0;
153 data[i].ret = -EINPROGRESS;
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +0100154 data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i],
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100155 done_cb, &data[i]);
156 }
157
158 /* Starting the threads may be left to a bottom half. Let it
159 * run, but do not waste too much time...
160 */
161 active = 100;
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +0100162 aio_notify(ctx);
163 aio_poll(ctx, false);
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100164
165 /* Wait some time for the threads to start, with some sanity
166 * testing on the behavior of the scheduler...
167 */
168 g_assert_cmpint(active, ==, 100);
169 g_usleep(1000000);
170 g_assert_cmpint(active, >, 50);
171
172 /* Cancel the jobs that haven't been started yet. */
Paolo Bonzinid60478c2012-11-27 09:51:48 +0100173 num_canceled = 0;
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100174 for (i = 0; i < 100; i++) {
Paolo Bonziniaa0d7ed2019-03-14 19:20:07 +0100175 if (atomic_cmpxchg(&data[i].n, 0, 4) == 0) {
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100176 data[i].ret = -ECANCELED;
Fam Zheng3391f5e2014-09-11 13:41:12 +0800177 if (sync) {
178 bdrv_aio_cancel(data[i].aiocb);
179 } else {
180 bdrv_aio_cancel_async(data[i].aiocb);
181 }
Paolo Bonzinid60478c2012-11-27 09:51:48 +0100182 num_canceled++;
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100183 }
184 }
Paolo Bonzinid60478c2012-11-27 09:51:48 +0100185 g_assert_cmpint(active, >, 0);
186 g_assert_cmpint(num_canceled, <, 100);
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100187
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100188 for (i = 0; i < 100; i++) {
Paolo Bonziniaa0d7ed2019-03-14 19:20:07 +0100189 if (data[i].aiocb && atomic_read(&data[i].n) < 4) {
Fam Zheng3391f5e2014-09-11 13:41:12 +0800190 if (sync) {
191 /* Canceling the others will be a blocking operation. */
192 bdrv_aio_cancel(data[i].aiocb);
193 } else {
194 bdrv_aio_cancel_async(data[i].aiocb);
195 }
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100196 }
197 }
198
199 /* Finish execution and execute any remaining callbacks. */
Stefan Hajnoczi35ecde22013-04-16 17:49:42 +0200200 while (active > 0) {
201 aio_poll(ctx, true);
202 }
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100203 g_assert_cmpint(active, ==, 0);
204 for (i = 0; i < 100; i++) {
Paolo Bonziniaa0d7ed2019-03-14 19:20:07 +0100205 g_assert(data[i].aiocb == NULL);
206 switch (data[i].n) {
207 case 0:
208 fprintf(stderr, "Callback not canceled but never started?\n");
209 abort();
210 case 3:
211 /* Couldn't be canceled asynchronously, must have completed. */
212 g_assert_cmpint(data[i].ret, ==, 0);
213 break;
214 case 4:
215 /* Could be canceled asynchronously, never started. */
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100216 g_assert_cmpint(data[i].ret, ==, -ECANCELED);
Paolo Bonziniaa0d7ed2019-03-14 19:20:07 +0100217 break;
218 default:
219 fprintf(stderr, "Callback aborted while running?\n");
220 abort();
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100221 }
222 }
223}
224
Fam Zheng3391f5e2014-09-11 13:41:12 +0800225static void test_cancel(void)
226{
227 do_test_cancel(true);
228}
229
230static void test_cancel_async(void)
231{
232 do_test_cancel(false);
233}
234
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100235int main(int argc, char **argv)
236{
Paolo Bonzini934ebf42017-02-13 14:52:21 +0100237 qemu_init_main_loop(&error_abort);
238 ctx = qemu_get_current_aio_context();
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +0100239 pool = aio_get_thread_pool(ctx);
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100240
241 g_test_init(&argc, &argv, NULL);
242 g_test_add_func("/thread-pool/submit", test_submit);
243 g_test_add_func("/thread-pool/submit-aio", test_submit_aio);
244 g_test_add_func("/thread-pool/submit-co", test_submit_co);
245 g_test_add_func("/thread-pool/submit-many", test_submit_many);
246 g_test_add_func("/thread-pool/cancel", test_cancel);
Fam Zheng3391f5e2014-09-11 13:41:12 +0800247 g_test_add_func("/thread-pool/cancel-async", test_cancel_async);
Stefan Hajnoczic4d9d192013-03-07 13:41:49 +0100248
Laurent Vivier4a4ff4c2018-03-23 15:32:02 +0100249 return g_test_run();
Paolo Bonzini74c856e2012-11-23 16:13:24 +0100250}