blob: 2feeee8787924d188fdc8a083bdcc357c666ed78 [file] [log] [blame]
aliguori5bb79102008-10-13 03:12:02 +00001/*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
aliguori5bb79102008-10-13 03:12:02 +000014 */
15
Peter Maydell1393a482016-01-26 18:16:54 +000016#include "qemu/osdep.h"
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020017#include "qemu/cutils.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010018#include "qemu/error-report.h"
Alex Bligh6a1751b2013-08-21 16:02:47 +010019#include "qemu/main-loop.h"
Juan Quintela795c40b2017-04-06 12:00:28 +020020#include "migration/blocker.h"
Paolo Bonzinicaf71f82012-12-17 18:19:50 +010021#include "migration/migration.h"
Juan Quintela0d82d0e2012-10-03 14:18:33 +020022#include "migration/qemu-file.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010023#include "sysemu/sysemu.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010024#include "block/block.h"
Markus Armbrustercc7a8ea2015-03-17 17:22:46 +010025#include "qapi/qmp/qerror.h"
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +000026#include "qapi/util.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010027#include "qemu/sockets.h"
Paolo Bonziniab28bd22015-07-09 08:55:38 +020028#include "qemu/rcu.h"
Paolo Bonzinicaf71f82012-12-17 18:19:50 +010029#include "migration/block.h"
Juan Quintelabe07b0a2017-04-20 13:12:24 +020030#include "postcopy-ram.h"
Juan Quintela766bd172012-07-23 05:45:29 +020031#include "qemu/thread.h"
Luiz Capitulino791e7c82011-09-13 17:37:16 -030032#include "qmp-commands.h"
Kazuya Saitoc09e5bb2013-02-22 17:36:19 +010033#include "trace.h"
Juan Quintela598cd2b2015-05-20 12:16:15 +020034#include "qapi-event.h"
Jason J. Herne070afca2015-09-08 13:12:35 -040035#include "qom/cpu.h"
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +000036#include "exec/memory.h"
37#include "exec/address-spaces.h"
Daniel P. Berrange61b67d42016-04-27 11:05:01 +010038#include "io/channel-buffer.h"
Daniel P. Berrangee1226362016-04-27 11:05:16 +010039#include "io/channel-tls.h"
zhanghailiang35a6ed42016-10-27 14:42:52 +080040#include "migration/colo.h"
aliguori065e2812008-11-11 16:46:33 +000041
Jason J. Hernedc325622015-09-08 13:12:37 -040042#define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
aliguori5bb79102008-10-13 03:12:02 +000043
Juan Quintela5b4e1eb2012-12-19 10:40:48 +010044/* Amount of time to allocate to each "chunk" of bandwidth-throttled
45 * data. */
46#define BUFFER_DELAY 100
47#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
48
Ashijeet Acharya2ff30252016-09-15 21:50:28 +053049/* Time in milliseconds we are allowed to stop the source,
50 * for sending the last part */
51#define DEFAULT_MIGRATE_SET_DOWNTIME 300
52
Daniel Henrique Barboza87c9cc12017-02-22 12:17:29 -030053/* Maximum migrate downtime set to 2000 seconds */
54#define MAX_MIGRATE_DOWNTIME_SECONDS 2000
55#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
56
Liang Li8706d2d2015-03-23 16:32:17 +080057/* Default compression thread count */
58#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
Liang Li3fcb38c2015-03-23 16:32:18 +080059/* Default decompression thread count, usually decompression is at
60 * least 4 times as fast as compression.*/
61#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
Liang Li8706d2d2015-03-23 16:32:17 +080062/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
63#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
Jason J. Herne1626fee2015-09-08 13:12:34 -040064/* Define default autoconverge cpu throttle migration parameters */
Jason J. Herned85a31d2016-04-21 14:07:18 -040065#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
66#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
Liang Li8706d2d2015-03-23 16:32:17 +080067
Orit Wasserman17ad9b32012-08-06 21:42:53 +030068/* Migration XBZRLE default cache size */
69#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
70
zhanghailiang68b53592016-10-27 14:43:01 +080071/* The delay time (in ms) between two COLO checkpoints
72 * Note: Please change this default value to 10000 when we support hybrid mode.
73 */
74#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200
75
Gerd Hoffmann99a0db92010-12-13 17:30:12 +010076static NotifierList migration_state_notifiers =
77 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
78
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +000079static bool deferred_incoming;
80
Juan Quintela17549e82011-10-05 13:50:43 +020081/* When we add fault tolerance, we could have several
82 migrations at once. For now we don't need to add
83 dynamic creation of migration */
84
Dr. David Alan Gilbertbca78562015-05-21 13:24:14 +010085/* For outgoing */
Juan Quintela859bc752012-08-13 09:42:49 +020086MigrationState *migrate_get_current(void)
Juan Quintela17549e82011-10-05 13:50:43 +020087{
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +000088 static bool once;
Juan Quintela17549e82011-10-05 13:50:43 +020089 static MigrationState current_migration = {
zhanghailiang31194732015-03-13 16:08:38 +080090 .state = MIGRATION_STATUS_NONE,
Orit Wasserman17ad9b32012-08-06 21:42:53 +030091 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
Michael R. Hines7e114f82013-06-25 21:35:30 -040092 .mbps = -1,
Daniel P. Berrange2594f562016-04-27 11:05:14 +010093 .parameters = {
94 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL,
95 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
96 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
97 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
98 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
Ashijeet Acharya2ff30252016-09-15 21:50:28 +053099 .max_bandwidth = MAX_THROTTLE,
100 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME,
zhanghailiang68b53592016-10-27 14:43:01 +0800101 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY,
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100102 },
Juan Quintela17549e82011-10-05 13:50:43 +0200103 };
104
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +0000105 if (!once) {
Daniel P. Berrange4af245d2017-03-15 16:16:03 +0000106 current_migration.parameters.tls_creds = g_strdup("");
107 current_migration.parameters.tls_hostname = g_strdup("");
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +0000108 once = true;
109 }
Juan Quintela17549e82011-10-05 13:50:43 +0200110 return &current_migration;
111}
112
Dr. David Alan Gilbertbca78562015-05-21 13:24:14 +0100113MigrationIncomingState *migration_incoming_get_current(void)
114{
Juan Quintelab4b076d2017-01-23 22:32:06 +0100115 static bool once;
116 static MigrationIncomingState mis_current;
Dr. David Alan Gilbertbca78562015-05-21 13:24:14 +0100117
Juan Quintelab4b076d2017-01-23 22:32:06 +0100118 if (!once) {
119 mis_current.state = MIGRATION_STATUS_NONE;
120 memset(&mis_current, 0, sizeof(MigrationIncomingState));
121 QLIST_INIT(&mis_current.loadvm_handlers);
122 qemu_mutex_init(&mis_current.rp_mutex);
123 qemu_event_init(&mis_current.main_thread_load_event, false);
124 once = true;
125 }
126 return &mis_current;
Dr. David Alan Gilbertbca78562015-05-21 13:24:14 +0100127}
128
129void migration_incoming_state_destroy(void)
130{
Juan Quintelab4b076d2017-01-23 22:32:06 +0100131 struct MigrationIncomingState *mis = migration_incoming_get_current();
132
133 qemu_event_destroy(&mis->main_thread_load_event);
134 loadvm_free_handlers(mis);
Dr. David Alan Gilbertbca78562015-05-21 13:24:14 +0100135}
136
Juan Quinteladf4b1022014-10-08 10:58:10 +0200137
138typedef struct {
Juan Quintela13d16812014-10-08 13:58:24 +0200139 bool optional;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200140 uint32_t size;
141 uint8_t runstate[100];
Juan Quintela172c4352015-07-08 13:56:26 +0200142 RunState state;
143 bool received;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200144} GlobalState;
145
146static GlobalState global_state;
147
Juan Quintela560d0272015-07-15 09:53:46 +0200148int global_state_store(void)
Juan Quinteladf4b1022014-10-08 10:58:10 +0200149{
150 if (!runstate_store((char *)global_state.runstate,
151 sizeof(global_state.runstate))) {
152 error_report("runstate name too big: %s", global_state.runstate);
153 trace_migrate_state_too_big();
154 return -EINVAL;
155 }
156 return 0;
157}
158
Anthony PERARDc69adea2015-08-03 15:29:19 +0100159void global_state_store_running(void)
160{
161 const char *state = RunState_lookup[RUN_STATE_RUNNING];
162 strncpy((char *)global_state.runstate,
163 state, sizeof(global_state.runstate));
164}
165
Juan Quintela172c4352015-07-08 13:56:26 +0200166static bool global_state_received(void)
Juan Quinteladf4b1022014-10-08 10:58:10 +0200167{
Juan Quintela172c4352015-07-08 13:56:26 +0200168 return global_state.received;
169}
170
171static RunState global_state_get_runstate(void)
172{
173 return global_state.state;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200174}
175
Juan Quintela13d16812014-10-08 13:58:24 +0200176void global_state_set_optional(void)
177{
178 global_state.optional = true;
179}
180
181static bool global_state_needed(void *opaque)
182{
183 GlobalState *s = opaque;
184 char *runstate = (char *)s->runstate;
185
186 /* If it is not optional, it is mandatory */
187
188 if (s->optional == false) {
189 return true;
190 }
191
192 /* If state is running or paused, it is not needed */
193
194 if (strcmp(runstate, "running") == 0 ||
195 strcmp(runstate, "paused") == 0) {
196 return false;
197 }
198
199 /* for any other state it is needed */
200 return true;
201}
202
Juan Quinteladf4b1022014-10-08 10:58:10 +0200203static int global_state_post_load(void *opaque, int version_id)
204{
205 GlobalState *s = opaque;
Juan Quintela172c4352015-07-08 13:56:26 +0200206 Error *local_err = NULL;
207 int r;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200208 char *runstate = (char *)s->runstate;
209
Juan Quintela172c4352015-07-08 13:56:26 +0200210 s->received = true;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200211 trace_migrate_global_state_post_load(runstate);
212
Eric Blake7fb1cf12015-11-18 01:52:57 -0700213 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
Juan Quinteladf4b1022014-10-08 10:58:10 +0200214 -1, &local_err);
215
Juan Quintela172c4352015-07-08 13:56:26 +0200216 if (r == -1) {
217 if (local_err) {
218 error_report_err(local_err);
Juan Quinteladf4b1022014-10-08 10:58:10 +0200219 }
Juan Quintela172c4352015-07-08 13:56:26 +0200220 return -EINVAL;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200221 }
Juan Quintela172c4352015-07-08 13:56:26 +0200222 s->state = r;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200223
Juan Quintela172c4352015-07-08 13:56:26 +0200224 return 0;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200225}
226
227static void global_state_pre_save(void *opaque)
228{
229 GlobalState *s = opaque;
230
231 trace_migrate_global_state_pre_save((char *)s->runstate);
232 s->size = strlen((char *)s->runstate) + 1;
233}
234
235static const VMStateDescription vmstate_globalstate = {
236 .name = "globalstate",
237 .version_id = 1,
238 .minimum_version_id = 1,
239 .post_load = global_state_post_load,
240 .pre_save = global_state_pre_save,
Juan Quintela13d16812014-10-08 13:58:24 +0200241 .needed = global_state_needed,
Juan Quinteladf4b1022014-10-08 10:58:10 +0200242 .fields = (VMStateField[]) {
243 VMSTATE_UINT32(size, GlobalState),
244 VMSTATE_BUFFER(runstate, GlobalState),
245 VMSTATE_END_OF_LIST()
246 },
247};
248
249void register_global_state(void)
250{
251 /* We would use it independently that we receive it */
252 strcpy((char *)&global_state.runstate, "");
Juan Quintela172c4352015-07-08 13:56:26 +0200253 global_state.received = false;
Juan Quinteladf4b1022014-10-08 10:58:10 +0200254 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
255}
256
Juan Quintelab05dc722015-07-07 14:44:05 +0200257static void migrate_generate_event(int new_state)
258{
259 if (migrate_use_events()) {
260 qapi_event_send_migration(new_state, &error_abort);
Juan Quintelab05dc722015-07-07 14:44:05 +0200261 }
262}
263
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +0000264/*
265 * Called on -incoming with a defer: uri.
266 * The migration can be started later after any parameters have been
267 * changed.
268 */
269static void deferred_incoming_migration(Error **errp)
270{
271 if (deferred_incoming) {
272 error_setg(errp, "Incoming migration already deferred");
273 }
274 deferred_incoming = true;
275}
276
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +0000277/* Request a range of pages from the source VM at the given
278 * start address.
279 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
280 * as the last request (a name must have been given previously)
281 * Start: Address offset within the RB
282 * Len: Length in bytes required - must be a multiple of pagesize
283 */
284void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
285 ram_addr_t start, size_t len)
286{
Stefan Weilcb8d4c82016-03-23 15:59:57 +0100287 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +0000288 size_t msglen = 12; /* start + len */
289
290 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
291 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
292
293 if (rbname) {
294 int rbname_len = strlen(rbname);
295 assert(rbname_len < 256);
296
297 bufc[msglen++] = rbname_len;
298 memcpy(bufc + msglen, rbname, rbname_len);
299 msglen += rbname_len;
300 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
301 } else {
302 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
303 }
304}
305
Paolo Bonzini43eaae22012-10-02 18:21:18 +0200306void qemu_start_incoming_migration(const char *uri, Error **errp)
aliguori5bb79102008-10-13 03:12:02 +0000307{
aliguori34c9dd82008-10-13 03:14:31 +0000308 const char *p;
309
Juan Quintela7cf1fe62015-05-20 17:15:42 +0200310 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +0000311 if (!strcmp(uri, "defer")) {
312 deferred_incoming_migration(errp);
313 } else if (strstart(uri, "tcp:", &p)) {
Paolo Bonzini43eaae22012-10-02 18:21:18 +0200314 tcp_start_incoming_migration(p, errp);
Michael R. Hines2da776d2013-07-22 10:01:54 -0400315#ifdef CONFIG_RDMA
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +0000316 } else if (strstart(uri, "rdma:", &p)) {
Michael R. Hines2da776d2013-07-22 10:01:54 -0400317 rdma_start_incoming_migration(p, errp);
318#endif
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +0000319 } else if (strstart(uri, "exec:", &p)) {
Paolo Bonzini43eaae22012-10-02 18:21:18 +0200320 exec_start_incoming_migration(p, errp);
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +0000321 } else if (strstart(uri, "unix:", &p)) {
Paolo Bonzini43eaae22012-10-02 18:21:18 +0200322 unix_start_incoming_migration(p, errp);
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +0000323 } else if (strstart(uri, "fd:", &p)) {
Paolo Bonzini43eaae22012-10-02 18:21:18 +0200324 fd_start_incoming_migration(p, errp);
Dr. David Alan Gilbertadde2202015-02-19 11:40:27 +0000325 } else {
Markus Armbruster312fd5f2013-02-08 21:22:16 +0100326 error_setg(errp, "unknown migration protocol: %s", uri);
Juan Quintela8ca5e802010-06-09 14:10:54 +0200327 }
aliguori5bb79102008-10-13 03:12:02 +0000328}
329
Denis V. Lunev0aa6aef2016-02-24 11:53:38 +0300330static void process_incoming_migration_bh(void *opaque)
331{
332 Error *local_err = NULL;
333 MigrationIncomingState *mis = opaque;
334
Kevin Wolface21a52017-05-04 18:52:36 +0200335 /* Make sure all file formats flush their mutable metadata.
336 * If we get an error here, just don't restart the VM yet. */
Denis V. Lunev0aa6aef2016-02-24 11:53:38 +0300337 bdrv_invalidate_cache_all(&local_err);
Kevin Wolfd35ff5e2017-04-04 17:29:03 +0200338 if (local_err) {
Kevin Wolface21a52017-05-04 18:52:36 +0200339 error_report_err(local_err);
Kevin Wolfd35ff5e2017-04-04 17:29:03 +0200340 local_err = NULL;
341 autostart = false;
342 }
343
Denis V. Lunev0aa6aef2016-02-24 11:53:38 +0300344 /*
345 * This must happen after all error conditions are dealt with and
346 * we're sure the VM is going to be running on this host.
347 */
348 qemu_announce_self();
349
350 /* If global state section was not received or we are in running
351 state, we need to obey autostart. Any other state is set with
352 runstate_set. */
353
354 if (!global_state_received() ||
355 global_state_get_runstate() == RUN_STATE_RUNNING) {
356 if (autostart) {
357 vm_start();
358 } else {
359 runstate_set(RUN_STATE_PAUSED);
360 }
361 } else {
362 runstate_set(global_state_get_runstate());
363 }
364 migrate_decompress_threads_join();
365 /*
366 * This must happen after any state changes since as soon as an external
367 * observer sees this event they might start to prod at the VM assuming
368 * it's ready to use.
369 */
370 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
371 MIGRATION_STATUS_COMPLETED);
372 qemu_bh_delete(mis->bh);
373 migration_incoming_state_destroy();
374}
375
Paolo Bonzini82a4da72012-08-07 10:57:43 +0200376static void process_incoming_migration_co(void *opaque)
Juan Quintela511c0232010-06-09 14:10:55 +0200377{
Paolo Bonzini82a4da72012-08-07 10:57:43 +0200378 QEMUFile *f = opaque;
Juan Quintelab4b076d2017-01-23 22:32:06 +0100379 MigrationIncomingState *mis = migration_incoming_get_current();
Dr. David Alan Gilberte9bef232015-11-05 18:11:21 +0000380 PostcopyState ps;
Paolo Bonzini1c12e1f2012-08-07 10:51:51 +0200381 int ret;
382
Juan Quintelab4b076d2017-01-23 22:32:06 +0100383 mis->from_src_file = f;
Dr. David Alan Gilbert67f11b52017-02-24 18:28:34 +0000384 mis->largest_page_size = qemu_ram_pagesize_largest();
Dr. David Alan Gilbert093e3c42015-11-05 18:10:52 +0000385 postcopy_state_set(POSTCOPY_INCOMING_NONE);
zhanghailiang93d7af62015-12-16 11:47:34 +0000386 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
387 MIGRATION_STATUS_ACTIVE);
Paolo Bonzini1c12e1f2012-08-07 10:51:51 +0200388 ret = qemu_loadvm_state(f);
Dr. David Alan Gilbertbca78562015-05-21 13:24:14 +0100389
Dr. David Alan Gilberte9bef232015-11-05 18:11:21 +0000390 ps = postcopy_state_get();
391 trace_process_incoming_migration_co_end(ret, ps);
392 if (ps != POSTCOPY_INCOMING_NONE) {
393 if (ps == POSTCOPY_INCOMING_ADVISE) {
394 /*
395 * Where a migration had postcopy enabled (and thus went to advise)
396 * but managed to complete within the precopy period, we can use
397 * the normal exit.
398 */
399 postcopy_ram_incoming_cleanup(mis);
400 } else if (ret >= 0) {
401 /*
402 * Postcopy was started, cleanup should happen at the end of the
403 * postcopy thread.
404 */
405 trace_process_incoming_migration_co_postcopy_end_main();
406 return;
407 }
408 /* Else if something went wrong then just fall out of the normal exit */
409 }
410
zhanghailiang25d0c162016-10-27 14:42:55 +0800411 /* we get COLO info, and know if we are in COLO mode */
412 if (!ret && migration_incoming_enable_colo()) {
413 mis->migration_incoming_co = qemu_coroutine_self();
414 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
415 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
416 mis->have_colo_incoming_thread = true;
417 qemu_coroutine_yield();
418
419 /* Wait checkpoint incoming thread exit before free resource */
420 qemu_thread_join(&mis->colo_incoming_thread);
421 }
422
Paolo Bonzini1c12e1f2012-08-07 10:51:51 +0200423 if (ret < 0) {
zhanghailiang93d7af62015-12-16 11:47:34 +0000424 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
425 MIGRATION_STATUS_FAILED);
Peter Lievendb80fac2014-06-10 11:29:16 +0200426 error_report("load of migration failed: %s", strerror(-ret));
Liang Li3fcb38c2015-03-23 16:32:18 +0800427 migrate_decompress_threads_join();
Eric Blake4aead692013-04-16 15:50:41 -0600428 exit(EXIT_FAILURE);
Juan Quintela511c0232010-06-09 14:10:55 +0200429 }
Juan Quintela511c0232010-06-09 14:10:55 +0200430
Laurent Viviere8199e42017-04-12 15:53:11 +0200431 qemu_fclose(f);
432 free_xbzrle_decoded_buf();
433
Denis V. Lunev0aa6aef2016-02-24 11:53:38 +0300434 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
435 qemu_bh_schedule(mis->bh);
Juan Quintela511c0232010-06-09 14:10:55 +0200436}
437
Daniel P. Berrange22724f42016-06-01 11:17:14 +0100438void migration_fd_process_incoming(QEMUFile *f)
Paolo Bonzini82a4da72012-08-07 10:57:43 +0200439{
Paolo Bonzini0b8b8752016-07-04 19:10:01 +0200440 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
Paolo Bonzini82a4da72012-08-07 10:57:43 +0200441
Liang Li3fcb38c2015-03-23 16:32:18 +0800442 migrate_decompress_threads_create();
Daniel P. Berrange06ad5132016-04-27 11:04:56 +0100443 qemu_file_set_blocking(f, false);
Paolo Bonzini0b8b8752016-07-04 19:10:01 +0200444 qemu_coroutine_enter(co);
Paolo Bonzini82a4da72012-08-07 10:57:43 +0200445}
446
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100447
Daniel P. Berrange22724f42016-06-01 11:17:14 +0100448void migration_channel_process_incoming(MigrationState *s,
449 QIOChannel *ioc)
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100450{
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100451 trace_migration_set_incoming_channel(
452 ioc, object_get_typename(OBJECT(ioc)));
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100453
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100454 if (s->parameters.tls_creds &&
Daniel P. Berrange4af245d2017-03-15 16:16:03 +0000455 *s->parameters.tls_creds &&
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100456 !object_dynamic_cast(OBJECT(ioc),
457 TYPE_QIO_CHANNEL_TLS)) {
458 Error *local_err = NULL;
Daniel P. Berrange22724f42016-06-01 11:17:14 +0100459 migration_tls_channel_process_incoming(s, ioc, &local_err);
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100460 if (local_err) {
461 error_report_err(local_err);
462 }
463 } else {
464 QEMUFile *f = qemu_fopen_channel_input(ioc);
Daniel P. Berrange22724f42016-06-01 11:17:14 +0100465 migration_fd_process_incoming(f);
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100466 }
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100467}
468
469
Daniel P. Berrange22724f42016-06-01 11:17:14 +0100470void migration_channel_connect(MigrationState *s,
471 QIOChannel *ioc,
472 const char *hostname)
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100473{
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100474 trace_migration_set_outgoing_channel(
475 ioc, object_get_typename(OBJECT(ioc)), hostname);
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100476
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100477 if (s->parameters.tls_creds &&
Daniel P. Berrange4af245d2017-03-15 16:16:03 +0000478 *s->parameters.tls_creds &&
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100479 !object_dynamic_cast(OBJECT(ioc),
480 TYPE_QIO_CHANNEL_TLS)) {
481 Error *local_err = NULL;
Daniel P. Berrange22724f42016-06-01 11:17:14 +0100482 migration_tls_channel_connect(s, ioc, hostname, &local_err);
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100483 if (local_err) {
484 migrate_fd_error(s, local_err);
485 error_free(local_err);
486 }
487 } else {
488 QEMUFile *f = qemu_fopen_channel_output(ioc);
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100489
Daniel P. Berrangee1226362016-04-27 11:05:16 +0100490 s->to_dst_file = f;
491
492 migrate_fd_connect(s);
493 }
Daniel P. Berrange48f07482016-04-27 11:04:59 +0100494}
495
496
Dr. David Alan Gilbert6decec92015-11-05 18:10:47 +0000497/*
498 * Send a message on the return channel back to the source
499 * of the migration.
500 */
501void migrate_send_rp_message(MigrationIncomingState *mis,
502 enum mig_rp_message_type message_type,
503 uint16_t len, void *data)
504{
505 trace_migrate_send_rp_message((int)message_type, len);
506 qemu_mutex_lock(&mis->rp_mutex);
507 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
508 qemu_put_be16(mis->to_src_file, len);
509 qemu_put_buffer(mis->to_src_file, data, len);
510 qemu_fflush(mis->to_src_file);
511 qemu_mutex_unlock(&mis->rp_mutex);
512}
513
514/*
515 * Send a 'SHUT' message on the return channel with the given value
516 * to indicate that we've finished with the RP. Non-0 value indicates
517 * error.
518 */
519void migrate_send_rp_shut(MigrationIncomingState *mis,
520 uint32_t value)
521{
522 uint32_t buf;
523
524 buf = cpu_to_be32(value);
525 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
526}
527
528/*
529 * Send a 'PONG' message on the return channel with the given value
530 * (normally in response to a 'PING')
531 */
532void migrate_send_rp_pong(MigrationIncomingState *mis,
533 uint32_t value)
534{
535 uint32_t buf;
536
537 buf = cpu_to_be32(value);
538 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
539}
540
Orit Wassermanbbf6da32012-08-06 21:42:47 +0300541MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
542{
543 MigrationCapabilityStatusList *head = NULL;
544 MigrationCapabilityStatusList *caps;
545 MigrationState *s = migrate_get_current();
546 int i;
547
Michael Tokarev387eede2013-10-05 13:18:28 +0400548 caps = NULL; /* silence compiler warning */
Eric Blake7fb1cf12015-11-18 01:52:57 -0700549 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
Dr. David Alan Gilberted1701c2017-05-15 15:05:29 +0100550#ifndef CONFIG_LIVE_BLOCK_MIGRATION
551 if (i == MIGRATION_CAPABILITY_BLOCK) {
552 continue;
553 }
554#endif
zhanghailiang35a6ed42016-10-27 14:42:52 +0800555 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
556 continue;
557 }
Orit Wassermanbbf6da32012-08-06 21:42:47 +0300558 if (head == NULL) {
559 head = g_malloc0(sizeof(*caps));
560 caps = head;
561 } else {
562 caps->next = g_malloc0(sizeof(*caps));
563 caps = caps->next;
564 }
565 caps->value =
566 g_malloc(sizeof(*caps->value));
567 caps->value->capability = i;
568 caps->value->state = s->enabled_capabilities[i];
569 }
570
571 return head;
572}
573
Liang Li85de8322015-03-23 16:32:28 +0800574MigrationParameters *qmp_query_migrate_parameters(Error **errp)
575{
576 MigrationParameters *params;
577 MigrationState *s = migrate_get_current();
578
579 params = g_malloc0(sizeof(*params));
Eric Blakede63ab62016-09-08 22:14:15 -0500580 params->has_compress_level = true;
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100581 params->compress_level = s->parameters.compress_level;
Eric Blakede63ab62016-09-08 22:14:15 -0500582 params->has_compress_threads = true;
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100583 params->compress_threads = s->parameters.compress_threads;
Eric Blakede63ab62016-09-08 22:14:15 -0500584 params->has_decompress_threads = true;
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100585 params->decompress_threads = s->parameters.decompress_threads;
Eric Blakede63ab62016-09-08 22:14:15 -0500586 params->has_cpu_throttle_initial = true;
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100587 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
Eric Blakede63ab62016-09-08 22:14:15 -0500588 params->has_cpu_throttle_increment = true;
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100589 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
Eric Blakede63ab62016-09-08 22:14:15 -0500590 params->has_tls_creds = !!s->parameters.tls_creds;
Daniel P. Berrange69ef1f32016-04-27 11:05:15 +0100591 params->tls_creds = g_strdup(s->parameters.tls_creds);
Eric Blakede63ab62016-09-08 22:14:15 -0500592 params->has_tls_hostname = !!s->parameters.tls_hostname;
Daniel P. Berrange69ef1f32016-04-27 11:05:15 +0100593 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
Ashijeet Acharya2ff30252016-09-15 21:50:28 +0530594 params->has_max_bandwidth = true;
595 params->max_bandwidth = s->parameters.max_bandwidth;
596 params->has_downtime_limit = true;
597 params->downtime_limit = s->parameters.downtime_limit;
zhanghailiangfe39a4d2016-11-02 15:42:09 +0800598 params->has_x_checkpoint_delay = true;
zhanghailiang68b53592016-10-27 14:43:01 +0800599 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
Juan Quintela2833c592017-04-05 18:32:37 +0200600 params->has_block_incremental = true;
601 params->block_incremental = s->parameters.block_incremental;
Liang Li85de8322015-03-23 16:32:28 +0800602
603 return params;
604}
605
Dr. David Alan Gilbertf6844b92015-11-05 18:10:48 +0000606/*
607 * Return true if we're already in the middle of a migration
608 * (i.e. any of the active or setup states)
609 */
610static bool migration_is_setup_or_active(int state)
611{
612 switch (state) {
613 case MIGRATION_STATUS_ACTIVE:
Dr. David Alan Gilbert9ec055a2015-11-05 18:10:58 +0000614 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
Dr. David Alan Gilbertf6844b92015-11-05 18:10:48 +0000615 case MIGRATION_STATUS_SETUP:
616 return true;
617
618 default:
619 return false;
620
621 }
622}
623
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300624static void get_xbzrle_cache_stats(MigrationInfo *info)
625{
626 if (migrate_use_xbzrle()) {
627 info->has_xbzrle_cache = true;
628 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
629 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
630 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
631 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
632 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
ChenLiang8bc39232014-04-04 17:57:56 +0800633 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300634 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
635 }
636}
637
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100638static void populate_ram_info(MigrationInfo *info, MigrationState *s)
639{
640 info->has_ram = true;
641 info->ram = g_malloc0(sizeof(*info->ram));
642 info->ram->transferred = ram_bytes_transferred();
643 info->ram->total = ram_bytes_total();
644 info->ram->duplicate = dup_mig_pages_transferred();
Juan Quintelabedf53c2017-03-13 20:35:54 +0100645 /* legacy value. It is not used anymore */
646 info->ram->skipped = 0;
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100647 info->ram->normal = norm_mig_pages_transferred();
Juan Quintela29cc3d82017-03-13 20:43:34 +0100648 info->ram->normal_bytes = norm_mig_pages_transferred() *
Juan Quintela20afaed2017-03-21 09:09:14 +0100649 qemu_target_page_size();
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100650 info->ram->mbps = s->mbps;
Juan Quintela42d219d2017-03-14 18:01:38 +0100651 info->ram->dirty_sync_count = ram_dirty_sync_count();
Juan Quintela96506892017-03-14 18:41:03 +0100652 info->ram->postcopy_requests = ram_postcopy_requests();
Chao Fan030ce1f2017-03-21 10:22:43 +0800653 info->ram->page_size = qemu_target_page_size();
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100654
655 if (s->state != MIGRATION_STATUS_COMPLETED) {
656 info->ram->remaining = ram_bytes_remaining();
Juan Quintela47ad8612017-03-14 18:20:30 +0100657 info->ram->dirty_pages_rate = ram_dirty_pages_rate();
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100658 }
659}
660
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300661MigrationInfo *qmp_query_migrate(Error **errp)
aliguori5bb79102008-10-13 03:12:02 +0000662{
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300663 MigrationInfo *info = g_malloc0(sizeof(*info));
Juan Quintela17549e82011-10-05 13:50:43 +0200664 MigrationState *s = migrate_get_current();
aliguori376253e2009-03-05 23:01:23 +0000665
Juan Quintela17549e82011-10-05 13:50:43 +0200666 switch (s->state) {
zhanghailiang31194732015-03-13 16:08:38 +0800667 case MIGRATION_STATUS_NONE:
Juan Quintela17549e82011-10-05 13:50:43 +0200668 /* no migration has happened ever */
669 break;
zhanghailiang31194732015-03-13 16:08:38 +0800670 case MIGRATION_STATUS_SETUP:
Michael R. Hines29ae8a42013-07-22 10:01:57 -0400671 info->has_status = true;
Michael R. Hinesed4fbd12013-07-22 10:01:58 -0400672 info->has_total_time = false;
Michael R. Hines29ae8a42013-07-22 10:01:57 -0400673 break;
zhanghailiang31194732015-03-13 16:08:38 +0800674 case MIGRATION_STATUS_ACTIVE:
675 case MIGRATION_STATUS_CANCELLING:
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300676 info->has_status = true;
Juan Quintela7aa939a2012-08-18 13:17:10 +0200677 info->has_total_time = true;
Alex Blighbc72ad62013-08-21 16:03:08 +0100678 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
Juan Quintela7aa939a2012-08-18 13:17:10 +0200679 - s->total_time;
Juan Quintela2c52ddf2012-08-13 09:53:12 +0200680 info->has_expected_downtime = true;
681 info->expected_downtime = s->expected_downtime;
Michael R. Hinesed4fbd12013-07-22 10:01:58 -0400682 info->has_setup_time = true;
683 info->setup_time = s->setup_time;
Luiz Capitulinoc86a6682009-12-10 17:16:05 -0200684
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100685 populate_ram_info(info, s);
Juan Quintela8d017192012-08-13 12:31:25 +0200686
Juan Quintela17549e82011-10-05 13:50:43 +0200687 if (blk_mig_active()) {
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300688 info->has_disk = true;
689 info->disk = g_malloc0(sizeof(*info->disk));
690 info->disk->transferred = blk_mig_bytes_transferred();
691 info->disk->remaining = blk_mig_bytes_remaining();
692 info->disk->total = blk_mig_bytes_total();
aliguoriff8d81d2008-10-24 22:10:31 +0000693 }
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300694
Jason J. Herne47828932015-09-08 13:12:36 -0400695 if (cpu_throttle_active()) {
Jason J. Herned85a31d2016-04-21 14:07:18 -0400696 info->has_cpu_throttle_percentage = true;
697 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
Jason J. Herne47828932015-09-08 13:12:36 -0400698 }
699
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300700 get_xbzrle_cache_stats(info);
Juan Quintela17549e82011-10-05 13:50:43 +0200701 break;
Dr. David Alan Gilbert9ec055a2015-11-05 18:10:58 +0000702 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
703 /* Mostly the same as active; TODO add some postcopy stats */
704 info->has_status = true;
705 info->has_total_time = true;
706 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
707 - s->total_time;
708 info->has_expected_downtime = true;
709 info->expected_downtime = s->expected_downtime;
710 info->has_setup_time = true;
711 info->setup_time = s->setup_time;
712
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100713 populate_ram_info(info, s);
Dr. David Alan Gilbert9ec055a2015-11-05 18:10:58 +0000714
715 if (blk_mig_active()) {
716 info->has_disk = true;
717 info->disk = g_malloc0(sizeof(*info->disk));
718 info->disk->transferred = blk_mig_bytes_transferred();
719 info->disk->remaining = blk_mig_bytes_remaining();
720 info->disk->total = blk_mig_bytes_total();
721 }
722
723 get_xbzrle_cache_stats(info);
724 break;
zhanghailiang0b827d52016-10-27 14:42:54 +0800725 case MIGRATION_STATUS_COLO:
726 info->has_status = true;
727 /* TODO: display COLO specific information (checkpoint info etc.) */
728 break;
zhanghailiang31194732015-03-13 16:08:38 +0800729 case MIGRATION_STATUS_COMPLETED:
Orit Wassermanf36d55a2012-08-06 21:42:57 +0300730 get_xbzrle_cache_stats(info);
731
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300732 info->has_status = true;
Pawit Pornkitprasan00c14992013-07-19 11:23:45 +0900733 info->has_total_time = true;
Juan Quintela7aa939a2012-08-18 13:17:10 +0200734 info->total_time = s->total_time;
Juan Quintela9c5a9fc2012-08-13 09:35:16 +0200735 info->has_downtime = true;
736 info->downtime = s->downtime;
Michael R. Hinesed4fbd12013-07-22 10:01:58 -0400737 info->has_setup_time = true;
738 info->setup_time = s->setup_time;
Juan Quintelad5f8a572012-05-21 22:01:07 +0200739
Dr. David Alan Gilberta22463a2016-06-13 12:16:41 +0100740 populate_ram_info(info, s);
Juan Quintela17549e82011-10-05 13:50:43 +0200741 break;
zhanghailiang31194732015-03-13 16:08:38 +0800742 case MIGRATION_STATUS_FAILED:
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300743 info->has_status = true;
Daniel P. Berranged59ce6f2016-04-27 11:05:00 +0100744 if (s->error) {
745 info->has_error_desc = true;
746 info->error_desc = g_strdup(error_get_pretty(s->error));
747 }
Juan Quintela17549e82011-10-05 13:50:43 +0200748 break;
zhanghailiang31194732015-03-13 16:08:38 +0800749 case MIGRATION_STATUS_CANCELLED:
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300750 info->has_status = true;
Juan Quintela17549e82011-10-05 13:50:43 +0200751 break;
aliguori5bb79102008-10-13 03:12:02 +0000752 }
zhanghailiangcde63fb2015-03-13 16:08:41 +0800753 info->status = s->state;
Luiz Capitulino791e7c82011-09-13 17:37:16 -0300754
755 return info;
aliguori5bb79102008-10-13 03:12:02 +0000756}
757
Orit Wasserman00458432012-08-06 21:42:48 +0300758void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
759 Error **errp)
760{
761 MigrationState *s = migrate_get_current();
762 MigrationCapabilityStatusList *cap;
Dr. David Alan Gilbert096631b2016-06-13 12:16:45 +0100763 bool old_postcopy_cap = migrate_postcopy_ram();
Orit Wasserman00458432012-08-06 21:42:48 +0300764
Dr. David Alan Gilbertf6844b92015-11-05 18:10:48 +0000765 if (migration_is_setup_or_active(s->state)) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100766 error_setg(errp, QERR_MIGRATION_ACTIVE);
Orit Wasserman00458432012-08-06 21:42:48 +0300767 return;
768 }
769
770 for (cap = params; cap; cap = cap->next) {
Dr. David Alan Gilberted1701c2017-05-15 15:05:29 +0100771#ifndef CONFIG_LIVE_BLOCK_MIGRATION
772 if (cap->value->capability == MIGRATION_CAPABILITY_BLOCK
773 && cap->value->state) {
774 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
775 "block migration");
776 error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
777 continue;
778 }
779#endif
zhanghailiang35a6ed42016-10-27 14:42:52 +0800780 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
781 if (!colo_supported()) {
782 error_setg(errp, "COLO is not currently supported, please"
783 " configure with --enable-colo option in order to"
784 " support COLO feature");
785 continue;
786 }
787 }
Orit Wasserman00458432012-08-06 21:42:48 +0300788 s->enabled_capabilities[cap->value->capability] = cap->value->state;
789 }
Dr. David Alan Gilbert53dd3702015-11-05 18:10:51 +0000790
791 if (migrate_postcopy_ram()) {
792 if (migrate_use_compression()) {
793 /* The decompression threads asynchronously write into RAM
794 * rather than use the atomic copies needed to avoid
795 * userfaulting. It should be possible to fix the decompression
796 * threads for compatibility in future.
797 */
798 error_report("Postcopy is not currently compatible with "
799 "compression");
Dr. David Alan Gilbert32c3db52016-03-11 09:53:36 +0000800 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
Dr. David Alan Gilbert53dd3702015-11-05 18:10:51 +0000801 false;
802 }
Dr. David Alan Gilbert096631b2016-06-13 12:16:45 +0100803 /* This check is reasonably expensive, so only when it's being
804 * set the first time, also it's only the destination that needs
805 * special support.
806 */
807 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
808 !postcopy_ram_supported_by_host()) {
809 /* postcopy_ram_supported_by_host will have emitted a more
810 * detailed message
811 */
812 error_report("Postcopy is not supported");
813 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
814 false;
815 }
Dr. David Alan Gilbert53dd3702015-11-05 18:10:51 +0000816 }
Orit Wasserman00458432012-08-06 21:42:48 +0300817}
818
Eric Blake7f375e02016-09-08 22:14:16 -0500819void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
Liang Li85de8322015-03-23 16:32:28 +0800820{
821 MigrationState *s = migrate_get_current();
822
Eric Blake7f375e02016-09-08 22:14:16 -0500823 if (params->has_compress_level &&
824 (params->compress_level < 0 || params->compress_level > 9)) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100825 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
826 "is invalid, it should be in the range of 0 to 9");
Liang Li85de8322015-03-23 16:32:28 +0800827 return;
828 }
Eric Blake7f375e02016-09-08 22:14:16 -0500829 if (params->has_compress_threads &&
830 (params->compress_threads < 1 || params->compress_threads > 255)) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100831 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
832 "compress_threads",
833 "is invalid, it should be in the range of 1 to 255");
Liang Li85de8322015-03-23 16:32:28 +0800834 return;
835 }
Eric Blake7f375e02016-09-08 22:14:16 -0500836 if (params->has_decompress_threads &&
837 (params->decompress_threads < 1 || params->decompress_threads > 255)) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100838 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
839 "decompress_threads",
840 "is invalid, it should be in the range of 1 to 255");
Liang Li85de8322015-03-23 16:32:28 +0800841 return;
842 }
Eric Blake7f375e02016-09-08 22:14:16 -0500843 if (params->has_cpu_throttle_initial &&
844 (params->cpu_throttle_initial < 1 ||
845 params->cpu_throttle_initial > 99)) {
Jason J. Herne1626fee2015-09-08 13:12:34 -0400846 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
Jason J. Herned85a31d2016-04-21 14:07:18 -0400847 "cpu_throttle_initial",
Jason J. Herne1626fee2015-09-08 13:12:34 -0400848 "an integer in the range of 1 to 99");
Ashijeet Acharya091ecc82016-09-10 02:43:17 +0530849 return;
Jason J. Herne1626fee2015-09-08 13:12:34 -0400850 }
Eric Blake7f375e02016-09-08 22:14:16 -0500851 if (params->has_cpu_throttle_increment &&
852 (params->cpu_throttle_increment < 1 ||
853 params->cpu_throttle_increment > 99)) {
Jason J. Herne1626fee2015-09-08 13:12:34 -0400854 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
Jason J. Herned85a31d2016-04-21 14:07:18 -0400855 "cpu_throttle_increment",
Jason J. Herne1626fee2015-09-08 13:12:34 -0400856 "an integer in the range of 1 to 99");
Ashijeet Acharya091ecc82016-09-10 02:43:17 +0530857 return;
Jason J. Herne1626fee2015-09-08 13:12:34 -0400858 }
Ashijeet Acharya2ff30252016-09-15 21:50:28 +0530859 if (params->has_max_bandwidth &&
860 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
861 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
862 " range of 0 to %zu bytes/second", SIZE_MAX);
863 return;
864 }
865 if (params->has_downtime_limit &&
Daniel Henrique Barboza87c9cc12017-02-22 12:17:29 -0300866 (params->downtime_limit < 0 ||
867 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
868 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
869 "the range of 0 to %d milliseconds",
870 MAX_MIGRATE_DOWNTIME);
Ashijeet Acharya2ff30252016-09-15 21:50:28 +0530871 return;
872 }
zhanghailiang68b53592016-10-27 14:43:01 +0800873 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
874 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
875 "x_checkpoint_delay",
876 "is invalid, it should be positive");
877 }
Liang Li85de8322015-03-23 16:32:28 +0800878
Eric Blake7f375e02016-09-08 22:14:16 -0500879 if (params->has_compress_level) {
880 s->parameters.compress_level = params->compress_level;
Liang Li85de8322015-03-23 16:32:28 +0800881 }
Eric Blake7f375e02016-09-08 22:14:16 -0500882 if (params->has_compress_threads) {
883 s->parameters.compress_threads = params->compress_threads;
Liang Li85de8322015-03-23 16:32:28 +0800884 }
Eric Blake7f375e02016-09-08 22:14:16 -0500885 if (params->has_decompress_threads) {
886 s->parameters.decompress_threads = params->decompress_threads;
Liang Li85de8322015-03-23 16:32:28 +0800887 }
Eric Blake7f375e02016-09-08 22:14:16 -0500888 if (params->has_cpu_throttle_initial) {
889 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
Jason J. Herne1626fee2015-09-08 13:12:34 -0400890 }
Eric Blake7f375e02016-09-08 22:14:16 -0500891 if (params->has_cpu_throttle_increment) {
892 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
Jason J. Herne1626fee2015-09-08 13:12:34 -0400893 }
Eric Blake7f375e02016-09-08 22:14:16 -0500894 if (params->has_tls_creds) {
Daniel P. Berrange69ef1f32016-04-27 11:05:15 +0100895 g_free(s->parameters.tls_creds);
Eric Blake7f375e02016-09-08 22:14:16 -0500896 s->parameters.tls_creds = g_strdup(params->tls_creds);
Daniel P. Berrange69ef1f32016-04-27 11:05:15 +0100897 }
Eric Blake7f375e02016-09-08 22:14:16 -0500898 if (params->has_tls_hostname) {
Daniel P. Berrange69ef1f32016-04-27 11:05:15 +0100899 g_free(s->parameters.tls_hostname);
Eric Blake7f375e02016-09-08 22:14:16 -0500900 s->parameters.tls_hostname = g_strdup(params->tls_hostname);
Daniel P. Berrange69ef1f32016-04-27 11:05:15 +0100901 }
Ashijeet Acharya2ff30252016-09-15 21:50:28 +0530902 if (params->has_max_bandwidth) {
903 s->parameters.max_bandwidth = params->max_bandwidth;
904 if (s->to_dst_file) {
905 qemu_file_set_rate_limit(s->to_dst_file,
906 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
907 }
908 }
909 if (params->has_downtime_limit) {
910 s->parameters.downtime_limit = params->downtime_limit;
911 }
zhanghailiang68b53592016-10-27 14:43:01 +0800912
913 if (params->has_x_checkpoint_delay) {
914 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
zhanghailiang479125d2017-01-17 20:57:42 +0800915 if (migration_in_colo_state()) {
916 colo_checkpoint_notify(s);
917 }
zhanghailiang68b53592016-10-27 14:43:01 +0800918 }
Juan Quintela2833c592017-04-05 18:32:37 +0200919 if (params->has_block_incremental) {
920 s->parameters.block_incremental = params->block_incremental;
921 }
Liang Li85de8322015-03-23 16:32:28 +0800922}
923
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100924
Dr. David Alan Gilbert4886a1b2015-11-05 18:10:56 +0000925void qmp_migrate_start_postcopy(Error **errp)
926{
927 MigrationState *s = migrate_get_current();
928
929 if (!migrate_postcopy_ram()) {
Dr. David Alan Gilberta54d3402015-11-12 11:34:44 +0000930 error_setg(errp, "Enable postcopy with migrate_set_capability before"
Dr. David Alan Gilbert4886a1b2015-11-05 18:10:56 +0000931 " the start of migration");
932 return;
933 }
934
935 if (s->state == MIGRATION_STATUS_NONE) {
936 error_setg(errp, "Postcopy must be started after migration has been"
937 " started");
938 return;
939 }
940 /*
941 * we don't error if migration has finished since that would be racy
942 * with issuing this command.
943 */
944 atomic_set(&s->start_postcopy, true);
945}
946
aliguori065e2812008-11-11 16:46:33 +0000947/* shared migration helpers */
948
zhanghailiang48781e52015-12-16 11:47:33 +0000949void migrate_set_state(int *state, int old_state, int new_state)
Zhanghaoyu (A)51cf4c12013-11-07 11:01:15 +0000950{
zhanghailiang48781e52015-12-16 11:47:33 +0000951 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
Juan Quintela4ba4bc52015-07-08 13:58:27 +0200952 trace_migrate_set_state(new_state);
Juan Quintelab05dc722015-07-07 14:44:05 +0200953 migrate_generate_event(new_state);
Zhanghaoyu (A)51cf4c12013-11-07 11:01:15 +0000954 }
955}
956
Juan Quintela2833c592017-04-05 18:32:37 +0200957void migrate_set_block_enabled(bool value, Error **errp)
958{
959 MigrationCapabilityStatusList *cap;
960
961 cap = g_new0(MigrationCapabilityStatusList, 1);
962 cap->value = g_new0(MigrationCapabilityStatus, 1);
963 cap->value->capability = MIGRATION_CAPABILITY_BLOCK;
964 cap->value->state = value;
965 qmp_migrate_set_capabilities(cap, errp);
966 qapi_free_MigrationCapabilityStatusList(cap);
967}
968
969static void migrate_set_block_incremental(MigrationState *s, bool value)
970{
971 s->parameters.block_incremental = value;
972}
973
974static void block_cleanup_parameters(MigrationState *s)
975{
976 if (s->must_remove_block_options) {
977 /* setting to false can never fail */
978 migrate_set_block_enabled(false, &error_abort);
979 migrate_set_block_incremental(s, false);
980 s->must_remove_block_options = false;
981 }
982}
983
Paolo Bonzinibb1fadc2013-02-22 17:36:21 +0100984static void migrate_fd_cleanup(void *opaque)
aliguori065e2812008-11-11 16:46:33 +0000985{
Paolo Bonzinibb1fadc2013-02-22 17:36:21 +0100986 MigrationState *s = opaque;
987
988 qemu_bh_delete(s->cleanup_bh);
989 s->cleanup_bh = NULL;
990
Juan Quintelaec481c62017-03-20 22:12:40 +0100991 migration_page_queue_free();
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +0000992
zhanghailiang89a02a92016-01-15 11:37:42 +0800993 if (s->to_dst_file) {
Alexey Kardashevskiy9013dca2014-03-11 10:42:29 +1100994 trace_migrate_fd_cleanup();
Paolo Bonzini404a7c02013-02-22 17:36:46 +0100995 qemu_mutex_unlock_iothread();
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +0000996 if (s->migration_thread_running) {
997 qemu_thread_join(&s->thread);
998 s->migration_thread_running = false;
999 }
Paolo Bonzini404a7c02013-02-22 17:36:46 +01001000 qemu_mutex_lock_iothread();
1001
Liang Li8706d2d2015-03-23 16:32:17 +08001002 migrate_compress_threads_join();
zhanghailiang89a02a92016-01-15 11:37:42 +08001003 qemu_fclose(s->to_dst_file);
1004 s->to_dst_file = NULL;
aliguori065e2812008-11-11 16:46:33 +00001005 }
1006
Dr. David Alan Gilbert9ec055a2015-11-05 18:10:58 +00001007 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
1008 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
Paolo Bonzini7a2c1722013-02-22 17:36:09 +01001009
Liang Li94f5a432015-11-02 15:37:00 +08001010 if (s->state == MIGRATION_STATUS_CANCELLING) {
zhanghailiang48781e52015-12-16 11:47:33 +00001011 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
Liang Li94f5a432015-11-02 15:37:00 +08001012 MIGRATION_STATUS_CANCELLED);
Paolo Bonzini7a2c1722013-02-22 17:36:09 +01001013 }
Paolo Bonzinia3fa1d72013-02-22 17:36:18 +01001014
1015 notifier_list_notify(&migration_state_notifiers, s);
Juan Quintela2833c592017-04-05 18:32:37 +02001016 block_cleanup_parameters(s);
aliguori065e2812008-11-11 16:46:33 +00001017}
1018
Daniel P. Berranged59ce6f2016-04-27 11:05:00 +01001019void migrate_fd_error(MigrationState *s, const Error *error)
aliguori065e2812008-11-11 16:46:33 +00001020{
Peter Maydell25174052016-10-21 18:41:45 +01001021 trace_migrate_fd_error(error_get_pretty(error));
zhanghailiang89a02a92016-01-15 11:37:42 +08001022 assert(s->to_dst_file == NULL);
zhanghailiang48781e52015-12-16 11:47:33 +00001023 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1024 MIGRATION_STATUS_FAILED);
Daniel P. Berranged59ce6f2016-04-27 11:05:00 +01001025 if (!s->error) {
1026 s->error = error_copy(error);
1027 }
Paolo Bonzinibb1fadc2013-02-22 17:36:21 +01001028 notifier_list_notify(&migration_state_notifiers, s);
Juan Quintela2833c592017-04-05 18:32:37 +02001029 block_cleanup_parameters(s);
Juan Quintela458cf282011-02-22 23:32:54 +01001030}
1031
Juan Quintela0edda1c2010-05-11 16:28:39 +02001032static void migrate_fd_cancel(MigrationState *s)
aliguori065e2812008-11-11 16:46:33 +00001033{
Zhanghaoyu (A)6f2b8112013-11-07 08:21:23 +00001034 int old_state ;
zhanghailiang89a02a92016-01-15 11:37:42 +08001035 QEMUFile *f = migrate_get_current()->to_dst_file;
Alexey Kardashevskiy9013dca2014-03-11 10:42:29 +11001036 trace_migrate_fd_cancel();
aliguori065e2812008-11-11 16:46:33 +00001037
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001038 if (s->rp_state.from_dst_file) {
1039 /* shutdown the rp socket, so causing the rp thread to shutdown */
1040 qemu_file_shutdown(s->rp_state.from_dst_file);
1041 }
1042
Zhanghaoyu (A)6f2b8112013-11-07 08:21:23 +00001043 do {
1044 old_state = s->state;
Dr. David Alan Gilbertf6844b92015-11-05 18:10:48 +00001045 if (!migration_is_setup_or_active(old_state)) {
Zhanghaoyu (A)6f2b8112013-11-07 08:21:23 +00001046 break;
1047 }
zhanghailiang48781e52015-12-16 11:47:33 +00001048 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
zhanghailiang31194732015-03-13 16:08:38 +08001049 } while (s->state != MIGRATION_STATUS_CANCELLING);
Dr. David Alan Gilberta26ba262015-01-08 11:11:32 +00001050
1051 /*
1052 * If we're unlucky the migration code might be stuck somewhere in a
1053 * send/write while the network has failed and is waiting to timeout;
1054 * if we've got shutdown(2) available then we can force it to quit.
1055 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1056 * called in a bh, so there is no race against this cancel.
1057 */
zhanghailiang31194732015-03-13 16:08:38 +08001058 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
Dr. David Alan Gilberta26ba262015-01-08 11:11:32 +00001059 qemu_file_shutdown(f);
1060 }
zhanghailiang1d2acc32017-01-24 15:59:52 +08001061 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1062 Error *local_err = NULL;
1063
1064 bdrv_invalidate_cache_all(&local_err);
1065 if (local_err) {
1066 error_report_err(local_err);
1067 } else {
1068 s->block_inactive = false;
1069 }
1070 }
Juan Quintela2833c592017-04-05 18:32:37 +02001071 block_cleanup_parameters(s);
aliguori065e2812008-11-11 16:46:33 +00001072}
1073
Gerd Hoffmann99a0db92010-12-13 17:30:12 +01001074void add_migration_state_change_notifier(Notifier *notify)
1075{
1076 notifier_list_add(&migration_state_notifiers, notify);
1077}
1078
1079void remove_migration_state_change_notifier(Notifier *notify)
1080{
Paolo Bonzini31552522012-01-13 17:34:01 +01001081 notifier_remove(notify);
Gerd Hoffmann99a0db92010-12-13 17:30:12 +01001082}
1083
Stefan Hajnoczi02edd2e2013-07-29 15:01:58 +02001084bool migration_in_setup(MigrationState *s)
Gerd Hoffmannafe2df62011-10-25 13:50:11 +02001085{
zhanghailiang31194732015-03-13 16:08:38 +08001086 return s->state == MIGRATION_STATUS_SETUP;
Gerd Hoffmannafe2df62011-10-25 13:50:11 +02001087}
1088
Juan Quintela70736932011-02-23 00:43:59 +01001089bool migration_has_finished(MigrationState *s)
Gerd Hoffmann99a0db92010-12-13 17:30:12 +01001090{
zhanghailiang31194732015-03-13 16:08:38 +08001091 return s->state == MIGRATION_STATUS_COMPLETED;
Gerd Hoffmann99a0db92010-12-13 17:30:12 +01001092}
Juan Quintela0edda1c2010-05-11 16:28:39 +02001093
Gerd Hoffmannafe2df62011-10-25 13:50:11 +02001094bool migration_has_failed(MigrationState *s)
1095{
zhanghailiang31194732015-03-13 16:08:38 +08001096 return (s->state == MIGRATION_STATUS_CANCELLED ||
1097 s->state == MIGRATION_STATUS_FAILED);
Gerd Hoffmannafe2df62011-10-25 13:50:11 +02001098}
1099
Juan Quintela57273092017-03-20 22:25:28 +01001100bool migration_in_postcopy(void)
Dr. David Alan Gilbert9ec055a2015-11-05 18:10:58 +00001101{
Juan Quintela57273092017-03-20 22:25:28 +01001102 MigrationState *s = migrate_get_current();
1103
Dr. David Alan Gilbert9ec055a2015-11-05 18:10:58 +00001104 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1105}
1106
Dr. David Alan Gilbertb82fc322016-02-22 17:17:32 +00001107bool migration_in_postcopy_after_devices(MigrationState *s)
1108{
Juan Quintela57273092017-03-20 22:25:28 +01001109 return migration_in_postcopy() && s->postcopy_after_devices;
Dr. David Alan Gilbertb82fc322016-02-22 17:17:32 +00001110}
1111
Juan Quintelafab35002017-03-22 17:36:57 +01001112bool migration_is_idle(void)
Ashijeet Acharyafe44dc92017-01-16 17:01:53 +05301113{
Juan Quintelafab35002017-03-22 17:36:57 +01001114 MigrationState *s = migrate_get_current();
Ashijeet Acharyafe44dc92017-01-16 17:01:53 +05301115
1116 switch (s->state) {
1117 case MIGRATION_STATUS_NONE:
1118 case MIGRATION_STATUS_CANCELLED:
1119 case MIGRATION_STATUS_COMPLETED:
1120 case MIGRATION_STATUS_FAILED:
1121 return true;
1122 case MIGRATION_STATUS_SETUP:
1123 case MIGRATION_STATUS_CANCELLING:
1124 case MIGRATION_STATUS_ACTIVE:
1125 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1126 case MIGRATION_STATUS_COLO:
1127 return false;
1128 case MIGRATION_STATUS__MAX:
1129 g_assert_not_reached();
1130 }
1131
1132 return false;
1133}
1134
Juan Quintelaa0762d92017-04-05 21:00:09 +02001135MigrationState *migrate_init(void)
Juan Quintela0edda1c2010-05-11 16:28:39 +02001136{
Juan Quintela17549e82011-10-05 13:50:43 +02001137 MigrationState *s = migrate_get_current();
Orit Wassermanbbf6da32012-08-06 21:42:47 +03001138
Dr. David Alan Gilbert389775d2015-11-12 15:38:27 +00001139 /*
1140 * Reinitialise all migration state, except
1141 * parameters/capabilities that the user set, and
1142 * locks.
1143 */
1144 s->bytes_xfer = 0;
1145 s->xfer_limit = 0;
1146 s->cleanup_bh = 0;
zhanghailiang89a02a92016-01-15 11:37:42 +08001147 s->to_dst_file = NULL;
Dr. David Alan Gilbert389775d2015-11-12 15:38:27 +00001148 s->state = MIGRATION_STATUS_NONE;
Dr. David Alan Gilbert389775d2015-11-12 15:38:27 +00001149 s->rp_state.from_dst_file = NULL;
1150 s->rp_state.error = false;
1151 s->mbps = 0.0;
1152 s->downtime = 0;
1153 s->expected_downtime = 0;
Dr. David Alan Gilbert389775d2015-11-12 15:38:27 +00001154 s->setup_time = 0;
Dr. David Alan Gilbert389775d2015-11-12 15:38:27 +00001155 s->start_postcopy = false;
Dr. David Alan Gilbertb82fc322016-02-22 17:17:32 +00001156 s->postcopy_after_devices = false;
Dr. David Alan Gilbert389775d2015-11-12 15:38:27 +00001157 s->migration_thread_running = false;
Daniel P. Berranged59ce6f2016-04-27 11:05:00 +01001158 error_free(s->error);
1159 s->error = NULL;
Juan Quintela1299c632011-11-09 21:29:01 +01001160
zhanghailiang48781e52015-12-16 11:47:33 +00001161 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
Juan Quintela0edda1c2010-05-11 16:28:39 +02001162
Alex Blighbc72ad62013-08-21 16:03:08 +01001163 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela0edda1c2010-05-11 16:28:39 +02001164 return s;
1165}
Juan Quintelacab30142011-02-22 23:54:21 +01001166
Anthony Liguorifa2756b2011-11-14 15:09:43 -06001167static GSList *migration_blockers;
1168
Ashijeet Acharyafe44dc92017-01-16 17:01:53 +05301169int migrate_add_blocker(Error *reason, Error **errp)
Anthony Liguorifa2756b2011-11-14 15:09:43 -06001170{
Ashijeet Acharyab67b8c32017-01-16 17:01:54 +05301171 if (only_migratable) {
1172 error_propagate(errp, error_copy(reason));
1173 error_prepend(errp, "disallowing migration blocker "
1174 "(--only_migratable) for: ");
1175 return -EACCES;
1176 }
1177
Juan Quintelafab35002017-03-22 17:36:57 +01001178 if (migration_is_idle()) {
Ashijeet Acharyafe44dc92017-01-16 17:01:53 +05301179 migration_blockers = g_slist_prepend(migration_blockers, reason);
1180 return 0;
1181 }
1182
1183 error_propagate(errp, error_copy(reason));
1184 error_prepend(errp, "disallowing migration blocker (migration in "
1185 "progress) for: ");
1186 return -EBUSY;
Anthony Liguorifa2756b2011-11-14 15:09:43 -06001187}
1188
1189void migrate_del_blocker(Error *reason)
1190{
1191 migration_blockers = g_slist_remove(migration_blockers, reason);
1192}
1193
Dr. David Alan Gilbertbf1ae1f2015-02-19 11:40:28 +00001194void qmp_migrate_incoming(const char *uri, Error **errp)
1195{
1196 Error *local_err = NULL;
Dr. David Alan Gilbert4debb5f2015-02-26 14:54:41 +00001197 static bool once = true;
Dr. David Alan Gilbertbf1ae1f2015-02-19 11:40:28 +00001198
1199 if (!deferred_incoming) {
Dr. David Alan Gilbert4debb5f2015-02-26 14:54:41 +00001200 error_setg(errp, "For use with '-incoming defer'");
Dr. David Alan Gilbertbf1ae1f2015-02-19 11:40:28 +00001201 return;
1202 }
Dr. David Alan Gilbert4debb5f2015-02-26 14:54:41 +00001203 if (!once) {
1204 error_setg(errp, "The incoming migration has already been started");
1205 }
Dr. David Alan Gilbertbf1ae1f2015-02-19 11:40:28 +00001206
1207 qemu_start_incoming_migration(uri, &local_err);
1208
1209 if (local_err) {
1210 error_propagate(errp, local_err);
1211 return;
1212 }
1213
Dr. David Alan Gilbert4debb5f2015-02-26 14:54:41 +00001214 once = false;
Dr. David Alan Gilbertbf1ae1f2015-02-19 11:40:28 +00001215}
1216
Greg Kurz24f39022016-05-04 21:44:19 +02001217bool migration_is_blocked(Error **errp)
1218{
1219 if (qemu_savevm_state_blocked(errp)) {
1220 return true;
1221 }
1222
1223 if (migration_blockers) {
1224 *errp = error_copy(migration_blockers->data);
1225 return true;
1226 }
1227
1228 return false;
1229}
1230
Luiz Capitulinoe1c37d02011-12-05 14:48:01 -02001231void qmp_migrate(const char *uri, bool has_blk, bool blk,
1232 bool has_inc, bool inc, bool has_detach, bool detach,
1233 Error **errp)
Juan Quintelacab30142011-02-22 23:54:21 +01001234{
Paolo Bonzinibe7059c2012-10-03 14:34:33 +02001235 Error *local_err = NULL;
Juan Quintela17549e82011-10-05 13:50:43 +02001236 MigrationState *s = migrate_get_current();
Juan Quintelacab30142011-02-22 23:54:21 +01001237 const char *p;
Juan Quintelacab30142011-02-22 23:54:21 +01001238
Dr. David Alan Gilbertf6844b92015-11-05 18:10:48 +00001239 if (migration_is_setup_or_active(s->state) ||
zhanghailiang0b827d52016-10-27 14:42:54 +08001240 s->state == MIGRATION_STATUS_CANCELLING ||
1241 s->state == MIGRATION_STATUS_COLO) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001242 error_setg(errp, QERR_MIGRATION_ACTIVE);
Luiz Capitulinoe1c37d02011-12-05 14:48:01 -02001243 return;
Juan Quintelacab30142011-02-22 23:54:21 +01001244 }
Dr. David Alan Gilbertca999932014-04-14 17:03:59 +01001245 if (runstate_check(RUN_STATE_INMIGRATE)) {
1246 error_setg(errp, "Guest is waiting for an incoming migration");
1247 return;
1248 }
1249
Greg Kurz24f39022016-05-04 21:44:19 +02001250 if (migration_is_blocked(errp)) {
Luiz Capitulinoe1c37d02011-12-05 14:48:01 -02001251 return;
Anthony Liguorifa2756b2011-11-14 15:09:43 -06001252 }
1253
Juan Quintela2833c592017-04-05 18:32:37 +02001254 if ((has_blk && blk) || (has_inc && inc)) {
1255 if (migrate_use_block() || migrate_use_block_incremental()) {
1256 error_setg(errp, "Command options are incompatible with "
1257 "current migration capabilities");
1258 return;
1259 }
1260 migrate_set_block_enabled(true, &local_err);
1261 if (local_err) {
1262 error_propagate(errp, local_err);
1263 return;
1264 }
1265 s->must_remove_block_options = true;
1266 }
1267
1268 if (has_inc && inc) {
1269 migrate_set_block_incremental(s, true);
1270 }
1271
Juan Quintelaa0762d92017-04-05 21:00:09 +02001272 s = migrate_init();
Juan Quintelacab30142011-02-22 23:54:21 +01001273
1274 if (strstart(uri, "tcp:", &p)) {
Paolo Bonzinif37afb52012-10-02 10:02:46 +02001275 tcp_start_outgoing_migration(s, p, &local_err);
Michael R. Hines2da776d2013-07-22 10:01:54 -04001276#ifdef CONFIG_RDMA
Michael R. Hines41310c62013-12-19 04:52:01 +08001277 } else if (strstart(uri, "rdma:", &p)) {
Michael R. Hines2da776d2013-07-22 10:01:54 -04001278 rdma_start_outgoing_migration(s, p, &local_err);
1279#endif
Juan Quintelacab30142011-02-22 23:54:21 +01001280 } else if (strstart(uri, "exec:", &p)) {
Paolo Bonzinif37afb52012-10-02 10:02:46 +02001281 exec_start_outgoing_migration(s, p, &local_err);
Juan Quintelacab30142011-02-22 23:54:21 +01001282 } else if (strstart(uri, "unix:", &p)) {
Paolo Bonzinif37afb52012-10-02 10:02:46 +02001283 unix_start_outgoing_migration(s, p, &local_err);
Juan Quintelacab30142011-02-22 23:54:21 +01001284 } else if (strstart(uri, "fd:", &p)) {
Paolo Bonzinif37afb52012-10-02 10:02:46 +02001285 fd_start_outgoing_migration(s, p, &local_err);
Juan Quintelacab30142011-02-22 23:54:21 +01001286 } else {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001287 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1288 "a valid migration protocol");
zhanghailiang48781e52015-12-16 11:47:33 +00001289 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1290 MIGRATION_STATUS_FAILED);
Luiz Capitulinoe1c37d02011-12-05 14:48:01 -02001291 return;
Juan Quintelacab30142011-02-22 23:54:21 +01001292 }
1293
Paolo Bonzinif37afb52012-10-02 10:02:46 +02001294 if (local_err) {
Daniel P. Berranged59ce6f2016-04-27 11:05:00 +01001295 migrate_fd_error(s, local_err);
Paolo Bonzinif37afb52012-10-02 10:02:46 +02001296 error_propagate(errp, local_err);
Luiz Capitulinoe1c37d02011-12-05 14:48:01 -02001297 return;
Juan Quintela1299c632011-11-09 21:29:01 +01001298 }
Juan Quintelacab30142011-02-22 23:54:21 +01001299}
1300
Luiz Capitulino6cdedb02011-11-27 22:54:09 -02001301void qmp_migrate_cancel(Error **errp)
Juan Quintelacab30142011-02-22 23:54:21 +01001302{
Juan Quintela17549e82011-10-05 13:50:43 +02001303 migrate_fd_cancel(migrate_get_current());
Juan Quintelacab30142011-02-22 23:54:21 +01001304}
1305
Orit Wasserman9e1ba4c2012-08-06 21:42:54 +03001306void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1307{
1308 MigrationState *s = migrate_get_current();
Orit Wassermanc91e6812014-01-30 20:08:34 +02001309 int64_t new_size;
Orit Wasserman9e1ba4c2012-08-06 21:42:54 +03001310
1311 /* Check for truncation */
1312 if (value != (size_t)value) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001313 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1314 "exceeding address space");
Orit Wasserman9e1ba4c2012-08-06 21:42:54 +03001315 return;
1316 }
1317
Orit Wassermana5615b12014-01-30 20:08:36 +02001318 /* Cache should not be larger than guest ram size */
1319 if (value > ram_bytes_total()) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001320 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1321 "exceeds guest ram size ");
Orit Wassermana5615b12014-01-30 20:08:36 +02001322 return;
1323 }
1324
Orit Wassermanc91e6812014-01-30 20:08:34 +02001325 new_size = xbzrle_cache_resize(value);
1326 if (new_size < 0) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001327 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1328 "is smaller than page size");
Orit Wassermanc91e6812014-01-30 20:08:34 +02001329 return;
1330 }
1331
1332 s->xbzrle_cache_size = new_size;
Orit Wasserman9e1ba4c2012-08-06 21:42:54 +03001333}
1334
1335int64_t qmp_query_migrate_cache_size(Error **errp)
1336{
1337 return migrate_xbzrle_cache_size();
1338}
1339
Luiz Capitulino3dc85382011-11-28 11:59:37 -02001340void qmp_migrate_set_speed(int64_t value, Error **errp)
Juan Quintelacab30142011-02-22 23:54:21 +01001341{
Ashijeet Acharya2ff30252016-09-15 21:50:28 +05301342 MigrationParameters p = {
1343 .has_max_bandwidth = true,
1344 .max_bandwidth = value,
1345 };
Juan Quintelacab30142011-02-22 23:54:21 +01001346
Ashijeet Acharya2ff30252016-09-15 21:50:28 +05301347 qmp_migrate_set_parameters(&p, errp);
Juan Quintelacab30142011-02-22 23:54:21 +01001348}
1349
Luiz Capitulino4f0a9932011-11-27 23:18:01 -02001350void qmp_migrate_set_downtime(double value, Error **errp)
Juan Quintelacab30142011-02-22 23:54:21 +01001351{
Daniel Henrique Barboza87c9cc12017-02-22 12:17:29 -03001352 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
1353 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
1354 "the range of 0 to %d seconds",
1355 MAX_MIGRATE_DOWNTIME_SECONDS);
1356 return;
1357 }
1358
Ashijeet Acharya2ff30252016-09-15 21:50:28 +05301359 value *= 1000; /* Convert to milliseconds */
1360 value = MAX(0, MIN(INT64_MAX, value));
1361
1362 MigrationParameters p = {
1363 .has_downtime_limit = true,
1364 .downtime_limit = value,
1365 };
1366
1367 qmp_migrate_set_parameters(&p, errp);
aliguori5bb79102008-10-13 03:12:02 +00001368}
Orit Wasserman17ad9b32012-08-06 21:42:53 +03001369
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001370bool migrate_release_ram(void)
1371{
1372 MigrationState *s;
1373
1374 s = migrate_get_current();
1375
1376 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
1377}
1378
Dr. David Alan Gilbert53dd3702015-11-05 18:10:51 +00001379bool migrate_postcopy_ram(void)
1380{
1381 MigrationState *s;
1382
1383 s = migrate_get_current();
1384
Dr. David Alan Gilbert32c3db52016-03-11 09:53:36 +00001385 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
Dr. David Alan Gilbert53dd3702015-11-05 18:10:51 +00001386}
1387
Chegu Vinodbde1e2e2013-06-24 03:49:42 -06001388bool migrate_auto_converge(void)
1389{
1390 MigrationState *s;
1391
1392 s = migrate_get_current();
1393
1394 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1395}
1396
Peter Lieven323004a2013-07-18 09:48:50 +02001397bool migrate_zero_blocks(void)
1398{
1399 MigrationState *s;
1400
1401 s = migrate_get_current();
1402
1403 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1404}
1405
Liang Li8706d2d2015-03-23 16:32:17 +08001406bool migrate_use_compression(void)
1407{
Liang Lidde4e692015-03-23 16:32:26 +08001408 MigrationState *s;
1409
1410 s = migrate_get_current();
1411
1412 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
Liang Li8706d2d2015-03-23 16:32:17 +08001413}
1414
1415int migrate_compress_level(void)
1416{
1417 MigrationState *s;
1418
1419 s = migrate_get_current();
1420
Daniel P. Berrange2594f562016-04-27 11:05:14 +01001421 return s->parameters.compress_level;
Liang Li8706d2d2015-03-23 16:32:17 +08001422}
1423
1424int migrate_compress_threads(void)
1425{
1426 MigrationState *s;
1427
1428 s = migrate_get_current();
1429
Daniel P. Berrange2594f562016-04-27 11:05:14 +01001430 return s->parameters.compress_threads;
Liang Li8706d2d2015-03-23 16:32:17 +08001431}
1432
Liang Li3fcb38c2015-03-23 16:32:18 +08001433int migrate_decompress_threads(void)
1434{
1435 MigrationState *s;
1436
1437 s = migrate_get_current();
1438
Daniel P. Berrange2594f562016-04-27 11:05:14 +01001439 return s->parameters.decompress_threads;
Liang Li3fcb38c2015-03-23 16:32:18 +08001440}
1441
Juan Quintelab05dc722015-07-07 14:44:05 +02001442bool migrate_use_events(void)
1443{
1444 MigrationState *s;
1445
1446 s = migrate_get_current();
1447
1448 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1449}
1450
Orit Wasserman17ad9b32012-08-06 21:42:53 +03001451int migrate_use_xbzrle(void)
1452{
1453 MigrationState *s;
1454
1455 s = migrate_get_current();
1456
1457 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1458}
1459
1460int64_t migrate_xbzrle_cache_size(void)
1461{
1462 MigrationState *s;
1463
1464 s = migrate_get_current();
1465
1466 return s->xbzrle_cache_size;
1467}
Juan Quintela0d82d0e2012-10-03 14:18:33 +02001468
Juan Quintela2833c592017-04-05 18:32:37 +02001469bool migrate_use_block(void)
1470{
1471 MigrationState *s;
1472
1473 s = migrate_get_current();
1474
1475 return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
1476}
1477
1478bool migrate_use_block_incremental(void)
1479{
1480 MigrationState *s;
1481
1482 s = migrate_get_current();
1483
1484 return s->parameters.block_incremental;
1485}
1486
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001487/* migration thread support */
1488/*
1489 * Something bad happened to the RP stream, mark an error
1490 * The caller shall print or trace something to indicate why
1491 */
1492static void mark_source_rp_bad(MigrationState *s)
1493{
1494 s->rp_state.error = true;
1495}
1496
1497static struct rp_cmd_args {
1498 ssize_t len; /* -1 = variable */
1499 const char *name;
1500} rp_cmd_args[] = {
1501 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1502 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1503 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001504 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1505 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001506 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1507};
1508
1509/*
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001510 * Process a request for pages received on the return path,
1511 * We're allowed to send more than requested (e.g. to round to our page size)
1512 * and we don't need to send pages that have already been sent.
1513 */
1514static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1515 ram_addr_t start, size_t len)
1516{
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001517 long our_host_ps = getpagesize();
1518
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001519 trace_migrate_handle_rp_req_pages(rbname, start, len);
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001520
1521 /*
1522 * Since we currently insist on matching page sizes, just sanity check
1523 * we're being asked for whole host pages.
1524 */
1525 if (start & (our_host_ps-1) ||
1526 (len & (our_host_ps-1))) {
1527 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1528 " len: %zd", __func__, start, len);
1529 mark_source_rp_bad(ms);
1530 return;
1531 }
1532
Juan Quintela96506892017-03-14 18:41:03 +01001533 if (ram_save_queue_pages(rbname, start, len)) {
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001534 mark_source_rp_bad(ms);
1535 }
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001536}
1537
1538/*
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001539 * Handles messages sent on the return path towards the source VM
1540 *
1541 */
1542static void *source_return_path_thread(void *opaque)
1543{
1544 MigrationState *ms = opaque;
1545 QEMUFile *rp = ms->rp_state.from_dst_file;
1546 uint16_t header_len, header_type;
Peter Xu568b01c2016-03-09 14:12:12 +08001547 uint8_t buf[512];
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001548 uint32_t tmp32, sibling_error;
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001549 ram_addr_t start = 0; /* =0 to silence warning */
1550 size_t len = 0, expected_len;
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001551 int res;
1552
1553 trace_source_return_path_thread_entry();
1554 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1555 migration_is_setup_or_active(ms->state)) {
1556 trace_source_return_path_thread_loop_top();
1557 header_type = qemu_get_be16(rp);
1558 header_len = qemu_get_be16(rp);
1559
1560 if (header_type >= MIG_RP_MSG_MAX ||
1561 header_type == MIG_RP_MSG_INVALID) {
1562 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1563 header_type, header_len);
1564 mark_source_rp_bad(ms);
1565 goto out;
1566 }
1567
1568 if ((rp_cmd_args[header_type].len != -1 &&
1569 header_len != rp_cmd_args[header_type].len) ||
Peter Xu568b01c2016-03-09 14:12:12 +08001570 header_len > sizeof(buf)) {
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001571 error_report("RP: Received '%s' message (0x%04x) with"
1572 "incorrect length %d expecting %zu",
1573 rp_cmd_args[header_type].name, header_type, header_len,
1574 (size_t)rp_cmd_args[header_type].len);
1575 mark_source_rp_bad(ms);
1576 goto out;
1577 }
1578
1579 /* We know we've got a valid header by this point */
1580 res = qemu_get_buffer(rp, buf, header_len);
1581 if (res != header_len) {
1582 error_report("RP: Failed reading data for message 0x%04x"
1583 " read %d expected %d",
1584 header_type, res, header_len);
1585 mark_source_rp_bad(ms);
1586 goto out;
1587 }
1588
1589 /* OK, we have the message and the data */
1590 switch (header_type) {
1591 case MIG_RP_MSG_SHUT:
Peter Maydell4d885132016-06-10 17:09:22 +01001592 sibling_error = ldl_be_p(buf);
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001593 trace_source_return_path_thread_shut(sibling_error);
1594 if (sibling_error) {
1595 error_report("RP: Sibling indicated error %d", sibling_error);
1596 mark_source_rp_bad(ms);
1597 }
1598 /*
1599 * We'll let the main thread deal with closing the RP
1600 * we could do a shutdown(2) on it, but we're the only user
1601 * anyway, so there's nothing gained.
1602 */
1603 goto out;
1604
1605 case MIG_RP_MSG_PONG:
Peter Maydell4d885132016-06-10 17:09:22 +01001606 tmp32 = ldl_be_p(buf);
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001607 trace_source_return_path_thread_pong(tmp32);
1608 break;
1609
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001610 case MIG_RP_MSG_REQ_PAGES:
Peter Maydell4d885132016-06-10 17:09:22 +01001611 start = ldq_be_p(buf);
1612 len = ldl_be_p(buf + 8);
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001613 migrate_handle_rp_req_pages(ms, NULL, start, len);
1614 break;
1615
1616 case MIG_RP_MSG_REQ_PAGES_ID:
1617 expected_len = 12 + 1; /* header + termination */
1618
1619 if (header_len >= expected_len) {
Peter Maydell4d885132016-06-10 17:09:22 +01001620 start = ldq_be_p(buf);
1621 len = ldl_be_p(buf + 8);
Dr. David Alan Gilbert1e2d90e2015-11-05 18:11:07 +00001622 /* Now we expect an idstr */
1623 tmp32 = buf[12]; /* Length of the following idstr */
1624 buf[13 + tmp32] = '\0';
1625 expected_len += tmp32;
1626 }
1627 if (header_len != expected_len) {
1628 error_report("RP: Req_Page_id with length %d expecting %zd",
1629 header_len, expected_len);
1630 mark_source_rp_bad(ms);
1631 goto out;
1632 }
1633 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1634 break;
1635
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001636 default:
1637 break;
1638 }
1639 }
Dr. David Alan Gilbert5df54162015-11-18 11:48:41 +00001640 if (qemu_file_get_error(rp)) {
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001641 trace_source_return_path_thread_bad_end();
1642 mark_source_rp_bad(ms);
1643 }
1644
1645 trace_source_return_path_thread_end();
1646out:
1647 ms->rp_state.from_dst_file = NULL;
1648 qemu_fclose(rp);
1649 return NULL;
1650}
1651
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001652static int open_return_path_on_source(MigrationState *ms)
1653{
1654
zhanghailiang89a02a92016-01-15 11:37:42 +08001655 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001656 if (!ms->rp_state.from_dst_file) {
1657 return -1;
1658 }
1659
1660 trace_open_return_path_on_source();
1661 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1662 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1663
1664 trace_open_return_path_on_source_continue();
1665
1666 return 0;
1667}
1668
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001669/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1670static int await_return_path_close_on_source(MigrationState *ms)
1671{
1672 /*
1673 * If this is a normal exit then the destination will send a SHUT and the
1674 * rp_thread will exit, however if there's an error we need to cause
1675 * it to exit.
1676 */
zhanghailiang89a02a92016-01-15 11:37:42 +08001677 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001678 /*
1679 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1680 * waiting for the destination.
1681 */
1682 qemu_file_shutdown(ms->rp_state.from_dst_file);
1683 mark_source_rp_bad(ms);
1684 }
1685 trace_await_return_path_close_on_source_joining();
1686 qemu_thread_join(&ms->rp_state.rp_thread);
1687 trace_await_return_path_close_on_source_close();
1688 return ms->rp_state.error;
1689}
1690
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001691/*
1692 * Switch from normal iteration to postcopy
1693 * Returns non-0 on error
1694 */
1695static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1696{
1697 int ret;
Daniel P. Berrange61b67d42016-04-27 11:05:01 +01001698 QIOChannelBuffer *bioc;
1699 QEMUFile *fb;
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001700 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Dr. David Alan Gilbertef8d6482017-02-02 15:59:09 +00001701 bool restart_block = false;
zhanghailiang48781e52015-12-16 11:47:33 +00001702 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001703 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1704
1705 trace_postcopy_start();
1706 qemu_mutex_lock_iothread();
1707 trace_postcopy_start_set_run();
1708
1709 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1710 *old_vm_running = runstate_is_running();
1711 global_state_store();
1712 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
Kevin Wolf76b1c7f2015-12-22 14:07:08 +01001713 if (ret < 0) {
1714 goto fail;
1715 }
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001716
Kevin Wolf76b1c7f2015-12-22 14:07:08 +01001717 ret = bdrv_inactivate_all();
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001718 if (ret < 0) {
1719 goto fail;
1720 }
Dr. David Alan Gilbertef8d6482017-02-02 15:59:09 +00001721 restart_block = true;
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001722
1723 /*
Dr. David Alan Gilbert1c0d2492015-11-11 14:02:27 +00001724 * Cause any non-postcopiable, but iterative devices to
1725 * send out their final data.
1726 */
zhanghailiang89a02a92016-01-15 11:37:42 +08001727 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
Dr. David Alan Gilbert1c0d2492015-11-11 14:02:27 +00001728
1729 /*
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001730 * in Finish migrate and with the io-lock held everything should
1731 * be quiet, but we've potentially still got dirty pages and we
1732 * need to tell the destination to throw any pages it's already received
1733 * that are dirty
1734 */
1735 if (ram_postcopy_send_discard_bitmap(ms)) {
1736 error_report("postcopy send discard bitmap failed");
1737 goto fail;
1738 }
1739
1740 /*
1741 * send rest of state - note things that are doing postcopy
1742 * will notice we're in POSTCOPY_ACTIVE and not actually
1743 * wrap their state up here
1744 */
zhanghailiang89a02a92016-01-15 11:37:42 +08001745 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001746 /* Ping just for debugging, helps line traces up */
zhanghailiang89a02a92016-01-15 11:37:42 +08001747 qemu_savevm_send_ping(ms->to_dst_file, 2);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001748
1749 /*
1750 * While loading the device state we may trigger page transfer
1751 * requests and the fd must be free to process those, and thus
1752 * the destination must read the whole device state off the fd before
1753 * it starts processing it. Unfortunately the ad-hoc migration format
1754 * doesn't allow the destination to know the size to read without fully
1755 * parsing it through each devices load-state code (especially the open
1756 * coded devices that use get/put).
1757 * So we wrap the device state up in a package with a length at the start;
1758 * to do this we use a qemu_buf to hold the whole of the device state.
1759 */
Daniel P. Berrange61b67d42016-04-27 11:05:01 +01001760 bioc = qio_channel_buffer_new(4096);
Daniel P. Berrange6f01f132016-09-30 11:57:14 +01001761 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
Daniel P. Berrange61b67d42016-04-27 11:05:01 +01001762 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1763 object_unref(OBJECT(bioc));
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001764
Dr. David Alan Gilbertc76201a2015-11-05 18:11:18 +00001765 /*
1766 * Make sure the receiver can get incoming pages before we send the rest
1767 * of the state
1768 */
1769 qemu_savevm_send_postcopy_listen(fb);
1770
Dr. David Alan Gilbert1c0d2492015-11-11 14:02:27 +00001771 qemu_savevm_state_complete_precopy(fb, false);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001772 qemu_savevm_send_ping(fb, 3);
1773
1774 qemu_savevm_send_postcopy_run(fb);
1775
1776 /* <><> end of stuff going into the package */
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001777
Dr. David Alan Gilbertef8d6482017-02-02 15:59:09 +00001778 /* Last point of recovery; as soon as we send the package the destination
1779 * can open devices and potentially start running.
1780 * Lets just check again we've not got any errors.
1781 */
1782 ret = qemu_file_get_error(ms->to_dst_file);
1783 if (ret) {
1784 error_report("postcopy_start: Migration stream errored (pre package)");
1785 goto fail_closefb;
1786 }
1787
1788 restart_block = false;
1789
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001790 /* Now send that blob */
Daniel P. Berrange61b67d42016-04-27 11:05:01 +01001791 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001792 goto fail_closefb;
1793 }
1794 qemu_fclose(fb);
Dr. David Alan Gilbertb82fc322016-02-22 17:17:32 +00001795
1796 /* Send a notify to give a chance for anything that needs to happen
1797 * at the transition to postcopy and after the device state; in particular
1798 * spice needs to trigger a transition now
1799 */
1800 ms->postcopy_after_devices = true;
1801 notifier_list_notify(&migration_state_notifiers, ms);
1802
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001803 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1804
1805 qemu_mutex_unlock_iothread();
1806
1807 /*
1808 * Although this ping is just for debug, it could potentially be
1809 * used for getting a better measurement of downtime at the source.
1810 */
zhanghailiang89a02a92016-01-15 11:37:42 +08001811 qemu_savevm_send_ping(ms->to_dst_file, 4);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001812
Pavel Butsykinced1c612017-02-03 18:23:21 +03001813 if (migrate_release_ram()) {
1814 ram_postcopy_migrated_memory_release(ms);
1815 }
1816
zhanghailiang89a02a92016-01-15 11:37:42 +08001817 ret = qemu_file_get_error(ms->to_dst_file);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001818 if (ret) {
1819 error_report("postcopy_start: Migration stream errored");
zhanghailiang48781e52015-12-16 11:47:33 +00001820 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001821 MIGRATION_STATUS_FAILED);
1822 }
1823
1824 return ret;
1825
1826fail_closefb:
1827 qemu_fclose(fb);
1828fail:
zhanghailiang48781e52015-12-16 11:47:33 +00001829 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001830 MIGRATION_STATUS_FAILED);
Dr. David Alan Gilbertef8d6482017-02-02 15:59:09 +00001831 if (restart_block) {
1832 /* A failure happened early enough that we know the destination hasn't
1833 * accessed block devices, so we're safe to recover.
1834 */
1835 Error *local_err = NULL;
1836
1837 bdrv_invalidate_cache_all(&local_err);
1838 if (local_err) {
1839 error_report_err(local_err);
1840 }
1841 }
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001842 qemu_mutex_unlock_iothread();
1843 return -1;
1844}
1845
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001846/**
1847 * migration_completion: Used by migration_thread when there's not much left.
1848 * The caller 'breaks' the loop when this returns.
1849 *
1850 * @s: Current migration state
Dr. David Alan Gilbert36f48562015-11-05 18:10:57 +00001851 * @current_active_state: The migration state we expect to be in
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001852 * @*old_vm_running: Pointer to old_vm_running flag
1853 * @*start_time: Pointer to time to update
1854 */
Dr. David Alan Gilbert36f48562015-11-05 18:10:57 +00001855static void migration_completion(MigrationState *s, int current_active_state,
1856 bool *old_vm_running,
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001857 int64_t *start_time)
1858{
1859 int ret;
1860
Dr. David Alan Gilbertb10ac0c2015-11-05 18:11:06 +00001861 if (s->state == MIGRATION_STATUS_ACTIVE) {
1862 qemu_mutex_lock_iothread();
1863 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1864 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1865 *old_vm_running = runstate_is_running();
1866 ret = global_state_store();
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001867
Dr. David Alan Gilbertb10ac0c2015-11-05 18:11:06 +00001868 if (!ret) {
1869 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
zhanghailiang0b827d52016-10-27 14:42:54 +08001870 /*
1871 * Don't mark the image with BDRV_O_INACTIVE flag if
1872 * we will go into COLO stage later.
1873 */
1874 if (ret >= 0 && !migrate_colo_enabled()) {
Kevin Wolf76b1c7f2015-12-22 14:07:08 +01001875 ret = bdrv_inactivate_all();
1876 }
1877 if (ret >= 0) {
zhanghailiang89a02a92016-01-15 11:37:42 +08001878 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1879 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
zhanghailiang1d2acc32017-01-24 15:59:52 +08001880 s->block_inactive = true;
Dr. David Alan Gilbertb10ac0c2015-11-05 18:11:06 +00001881 }
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001882 }
Dr. David Alan Gilbertb10ac0c2015-11-05 18:11:06 +00001883 qemu_mutex_unlock_iothread();
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001884
Dr. David Alan Gilbertb10ac0c2015-11-05 18:11:06 +00001885 if (ret < 0) {
1886 goto fail;
1887 }
1888 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1889 trace_migration_completion_postcopy_end();
1890
zhanghailiang89a02a92016-01-15 11:37:42 +08001891 qemu_savevm_state_complete_postcopy(s->to_dst_file);
Dr. David Alan Gilbertb10ac0c2015-11-05 18:11:06 +00001892 trace_migration_completion_postcopy_end_after_complete();
1893 }
1894
1895 /*
1896 * If rp was opened we must clean up the thread before
1897 * cleaning everything else up (since if there are no failures
1898 * it will wait for the destination to send it's status in
1899 * a SHUT command).
1900 * Postcopy opens rp if enabled (even if it's not avtivated)
1901 */
1902 if (migrate_postcopy_ram()) {
1903 int rp_error;
1904 trace_migration_completion_postcopy_end_before_rp();
1905 rp_error = await_return_path_close_on_source(s);
1906 trace_migration_completion_postcopy_end_after_rp(rp_error);
1907 if (rp_error) {
Greg Kurzfe904ea2016-05-18 15:44:36 +02001908 goto fail_invalidate;
Dr. David Alan Gilbertb10ac0c2015-11-05 18:11:06 +00001909 }
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001910 }
1911
zhanghailiang89a02a92016-01-15 11:37:42 +08001912 if (qemu_file_get_error(s->to_dst_file)) {
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001913 trace_migration_completion_file_err();
Greg Kurzfe904ea2016-05-18 15:44:36 +02001914 goto fail_invalidate;
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001915 }
1916
zhanghailiang0b827d52016-10-27 14:42:54 +08001917 if (!migrate_colo_enabled()) {
1918 migrate_set_state(&s->state, current_active_state,
1919 MIGRATION_STATUS_COMPLETED);
1920 }
1921
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001922 return;
1923
Greg Kurzfe904ea2016-05-18 15:44:36 +02001924fail_invalidate:
1925 /* If not doing postcopy, vm_start() will be called: let's regain
1926 * control on images.
1927 */
1928 if (s->state == MIGRATION_STATUS_ACTIVE) {
1929 Error *local_err = NULL;
1930
zhanghailiang1d2acc32017-01-24 15:59:52 +08001931 qemu_mutex_lock_iothread();
Greg Kurzfe904ea2016-05-18 15:44:36 +02001932 bdrv_invalidate_cache_all(&local_err);
1933 if (local_err) {
1934 error_report_err(local_err);
zhanghailiang1d2acc32017-01-24 15:59:52 +08001935 } else {
1936 s->block_inactive = false;
Greg Kurzfe904ea2016-05-18 15:44:36 +02001937 }
zhanghailiang1d2acc32017-01-24 15:59:52 +08001938 qemu_mutex_unlock_iothread();
Greg Kurzfe904ea2016-05-18 15:44:36 +02001939 }
1940
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001941fail:
zhanghailiang48781e52015-12-16 11:47:33 +00001942 migrate_set_state(&s->state, current_active_state,
1943 MIGRATION_STATUS_FAILED);
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01001944}
1945
zhanghailiang35a6ed42016-10-27 14:42:52 +08001946bool migrate_colo_enabled(void)
1947{
1948 MigrationState *s = migrate_get_current();
1949 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
1950}
1951
Dr. David Alan Gilbert70b20472015-11-05 18:10:49 +00001952/*
1953 * Master migration thread on the source VM.
1954 * It drives the migration and pumps the data down the outgoing channel.
1955 */
Juan Quintela5f496a12013-02-22 17:36:30 +01001956static void *migration_thread(void *opaque)
Juan Quintela0d82d0e2012-10-03 14:18:33 +02001957{
Juan Quintela9848a402012-12-19 09:55:50 +01001958 MigrationState *s = opaque;
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001959 /* Used by the bandwidth calcs, updated later */
Alex Blighbc72ad62013-08-21 16:03:08 +01001960 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1961 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
Paolo Bonzinibe7172e2013-02-22 17:36:43 +01001962 int64_t initial_bytes = 0;
Peter Xufaec0662017-04-01 16:18:43 +08001963 /*
1964 * The final stage happens when the remaining data is smaller than
1965 * this threshold; it's calculated from the requested downtime and
1966 * measured bandwidth
1967 */
1968 int64_t threshold_size = 0;
Paolo Bonzinia3fa1d72013-02-22 17:36:18 +01001969 int64_t start_time = initial_time;
Liang Li94f5a432015-11-02 15:37:00 +08001970 int64_t end_time;
Paolo Bonzinia3fa1d72013-02-22 17:36:18 +01001971 bool old_vm_running = false;
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001972 bool entered_postcopy = false;
1973 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1974 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
zhanghailiang0b827d52016-10-27 14:42:54 +08001975 bool enable_colo = migrate_colo_enabled();
Juan Quintela76f59332012-10-03 20:16:24 +02001976
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001977 rcu_register_thread();
1978
zhanghailiang89a02a92016-01-15 11:37:42 +08001979 qemu_savevm_state_header(s->to_dst_file);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001980
1981 if (migrate_postcopy_ram()) {
1982 /* Now tell the dest that it should open its end so it can reply */
zhanghailiang89a02a92016-01-15 11:37:42 +08001983 qemu_savevm_send_open_return_path(s->to_dst_file);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001984
1985 /* And do a ping that will make stuff easier to debug */
zhanghailiang89a02a92016-01-15 11:37:42 +08001986 qemu_savevm_send_ping(s->to_dst_file, 1);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001987
1988 /*
1989 * Tell the destination that we *might* want to do postcopy later;
1990 * if the other end can't do postcopy it should fail now, nice and
1991 * early.
1992 */
zhanghailiang89a02a92016-01-15 11:37:42 +08001993 qemu_savevm_send_postcopy_advise(s->to_dst_file);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00001994 }
1995
Juan Quintelaa0762d92017-04-05 21:00:09 +02001996 qemu_savevm_state_begin(s->to_dst_file);
Juan Quintela0d82d0e2012-10-03 14:18:33 +02001997
Alex Blighbc72ad62013-08-21 16:03:08 +01001998 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
zhanghailiang48781e52015-12-16 11:47:33 +00001999 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2000 MIGRATION_STATUS_ACTIVE);
Michael R. Hines29ae8a42013-07-22 10:01:57 -04002001
Dr. David Alan Gilbert9ec055a2015-11-05 18:10:58 +00002002 trace_migration_thread_setup_complete();
2003
2004 while (s->state == MIGRATION_STATUS_ACTIVE ||
2005 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
Juan Quintelaa3e879c2013-02-01 12:39:08 +01002006 int64_t current_time;
Juan Quintelac369f402012-10-03 20:33:34 +02002007 uint64_t pending_size;
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002008
zhanghailiang89a02a92016-01-15 11:37:42 +08002009 if (!qemu_file_rate_limit(s->to_dst_file)) {
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002010 uint64_t pend_post, pend_nonpost;
2011
Peter Xufaec0662017-04-01 16:18:43 +08002012 qemu_savevm_state_pending(s->to_dst_file, threshold_size,
2013 &pend_nonpost, &pend_post);
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002014 pending_size = pend_nonpost + pend_post;
Peter Xufaec0662017-04-01 16:18:43 +08002015 trace_migrate_pending(pending_size, threshold_size,
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002016 pend_post, pend_nonpost);
Peter Xufaec0662017-04-01 16:18:43 +08002017 if (pending_size && pending_size >= threshold_size) {
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002018 /* Still a significant amount to transfer */
2019
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002020 if (migrate_postcopy_ram() &&
2021 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
Peter Xufaec0662017-04-01 16:18:43 +08002022 pend_nonpost <= threshold_size &&
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002023 atomic_read(&s->start_postcopy)) {
2024
2025 if (!postcopy_start(s, &old_vm_running)) {
2026 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
2027 entered_postcopy = true;
2028 }
2029
2030 continue;
2031 }
2032 /* Just another iteration step */
zhanghailiang89a02a92016-01-15 11:37:42 +08002033 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
Juan Quintelac369f402012-10-03 20:33:34 +02002034 } else {
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01002035 trace_migration_thread_low_pending(pending_size);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002036 migration_completion(s, current_active_state,
Dr. David Alan Gilbert36f48562015-11-05 18:10:57 +00002037 &old_vm_running, &start_time);
Dr. David Alan Gilbert09f6c852015-08-13 11:51:31 +01002038 break;
Juan Quintelac369f402012-10-03 20:33:34 +02002039 }
2040 }
Paolo Bonzinif4410a52013-02-22 17:36:20 +01002041
zhanghailiang89a02a92016-01-15 11:37:42 +08002042 if (qemu_file_get_error(s->to_dst_file)) {
zhanghailiang48781e52015-12-16 11:47:33 +00002043 migrate_set_state(&s->state, current_active_state,
2044 MIGRATION_STATUS_FAILED);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002045 trace_migration_thread_file_err();
Paolo Bonzinifd45ee22013-02-22 17:36:33 +01002046 break;
2047 }
Alex Blighbc72ad62013-08-21 16:03:08 +01002048 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002049 if (current_time >= initial_time + BUFFER_DELAY) {
zhanghailiang89a02a92016-01-15 11:37:42 +08002050 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
2051 initial_bytes;
Michael Roth77417f12013-05-16 16:25:44 -05002052 uint64_t time_spent = current_time - initial_time;
Paolo Bonzinia694ee32015-01-26 12:12:27 +01002053 double bandwidth = (double)transferred_bytes / time_spent;
Peter Xufaec0662017-04-01 16:18:43 +08002054 threshold_size = bandwidth * s->parameters.downtime_limit;
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002055
Wei Yang5b648de2016-01-24 14:09:57 +00002056 s->mbps = (((double) transferred_bytes * 8.0) /
2057 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
Michael R. Hines7e114f82013-06-25 21:35:30 -04002058
Alexey Kardashevskiy9013dca2014-03-11 10:42:29 +11002059 trace_migrate_transferred(transferred_bytes, time_spent,
Peter Xufaec0662017-04-01 16:18:43 +08002060 bandwidth, threshold_size);
Juan Quintela90f8ae72013-02-01 13:22:37 +01002061 /* if we haven't sent anything, we don't want to recalculate
2062 10000 is a small enough number for our purposes */
Juan Quintela47ad8612017-03-14 18:20:30 +01002063 if (ram_dirty_pages_rate() && transferred_bytes > 10000) {
2064 s->expected_downtime = ram_dirty_pages_rate() *
Juan Quintela20afaed2017-03-21 09:09:14 +01002065 qemu_target_page_size() / bandwidth;
Juan Quintela90f8ae72013-02-01 13:22:37 +01002066 }
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002067
zhanghailiang89a02a92016-01-15 11:37:42 +08002068 qemu_file_reset_rate_limit(s->to_dst_file);
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002069 initial_time = current_time;
zhanghailiang89a02a92016-01-15 11:37:42 +08002070 initial_bytes = qemu_ftell(s->to_dst_file);
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002071 }
zhanghailiang89a02a92016-01-15 11:37:42 +08002072 if (qemu_file_rate_limit(s->to_dst_file)) {
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002073 /* usleep expects microseconds */
2074 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
2075 }
Paolo Bonzinia3fa1d72013-02-22 17:36:18 +01002076 }
2077
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002078 trace_migration_thread_after_loop();
Jason J. Herne070afca2015-09-08 13:12:35 -04002079 /* If we enabled cpu throttling for auto-converge, turn it off. */
2080 cpu_throttle_stop();
Liang Li94f5a432015-11-02 15:37:00 +08002081 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Jason J. Herne070afca2015-09-08 13:12:35 -04002082
Paolo Bonzinif4410a52013-02-22 17:36:20 +01002083 qemu_mutex_lock_iothread();
zhanghailiang0b827d52016-10-27 14:42:54 +08002084 /*
2085 * The resource has been allocated by migration will be reused in COLO
2086 * process, so don't release them.
2087 */
2088 if (!enable_colo) {
2089 qemu_savevm_state_cleanup();
2090 }
zhanghailiang31194732015-03-13 16:08:38 +08002091 if (s->state == MIGRATION_STATUS_COMPLETED) {
zhanghailiang89a02a92016-01-15 11:37:42 +08002092 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
Paolo Bonzinia3fa1d72013-02-22 17:36:18 +01002093 s->total_time = end_time - s->total_time;
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002094 if (!entered_postcopy) {
2095 s->downtime = end_time - start_time;
2096 }
Peter Lievend6ed7312014-05-12 10:46:00 +02002097 if (s->total_time) {
2098 s->mbps = (((double) transferred_bytes * 8.0) /
2099 ((double) s->total_time)) / 1000;
2100 }
Paolo Bonzinia3fa1d72013-02-22 17:36:18 +01002101 runstate_set(RUN_STATE_POSTMIGRATE);
2102 } else {
zhanghailiang0b827d52016-10-27 14:42:54 +08002103 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
2104 migrate_start_colo_process(s);
2105 qemu_savevm_state_cleanup();
2106 /*
2107 * Fixme: we will run VM in COLO no matter its old running state.
2108 * After exited COLO, we will keep running.
2109 */
2110 old_vm_running = true;
2111 }
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002112 if (old_vm_running && !entered_postcopy) {
Paolo Bonzinia3fa1d72013-02-22 17:36:18 +01002113 vm_start();
Dr. David Alan Gilbert42da5552016-07-15 17:44:46 +01002114 } else {
2115 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2116 runstate_set(RUN_STATE_POSTMIGRATE);
2117 }
Paolo Bonzinidba433c2013-02-22 17:36:17 +01002118 }
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002119 }
Paolo Bonzinibb1fadc2013-02-22 17:36:21 +01002120 qemu_bh_schedule(s->cleanup_bh);
Paolo Bonzinidba433c2013-02-22 17:36:17 +01002121 qemu_mutex_unlock_iothread();
Paolo Bonzinif4410a52013-02-22 17:36:20 +01002122
Paolo Bonziniab28bd22015-07-09 08:55:38 +02002123 rcu_unregister_thread();
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002124 return NULL;
2125}
2126
Juan Quintela9848a402012-12-19 09:55:50 +01002127void migrate_fd_connect(MigrationState *s)
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002128{
Ashijeet Acharya2ff30252016-09-15 21:50:28 +05302129 s->expected_downtime = s->parameters.downtime_limit;
Paolo Bonzinibb1fadc2013-02-22 17:36:21 +01002130 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002131
Daniel P. Berrange9e4d2b92016-04-27 11:04:57 +01002132 qemu_file_set_blocking(s->to_dst_file, true);
zhanghailiang89a02a92016-01-15 11:37:42 +08002133 qemu_file_set_rate_limit(s->to_dst_file,
Ashijeet Acharya2ff30252016-09-15 21:50:28 +05302134 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
Paolo Bonzini442773c2013-02-22 17:36:44 +01002135
Stefan Hajnoczi9287ac22013-07-29 15:01:57 +02002136 /* Notify before starting migration thread */
2137 notifier_list_notify(&migration_state_notifiers, s);
2138
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002139 /*
2140 * Open the return path; currently for postcopy but other things might
2141 * also want it.
2142 */
2143 if (migrate_postcopy_ram()) {
2144 if (open_return_path_on_source(s)) {
2145 error_report("Unable to open return-path for postcopy");
zhanghailiang48781e52015-12-16 11:47:33 +00002146 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002147 MIGRATION_STATUS_FAILED);
2148 migrate_fd_cleanup(s);
2149 return;
2150 }
2151 }
2152
Liang Li8706d2d2015-03-23 16:32:17 +08002153 migrate_compress_threads_create();
Pankaj Gupta009fad72017-01-23 19:12:56 +05302154 qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
Paolo Bonzinibb1fadc2013-02-22 17:36:21 +01002155 QEMU_THREAD_JOINABLE);
Dr. David Alan Gilbert1d34e4b2015-11-05 18:11:05 +00002156 s->migration_thread_running = true;
Juan Quintela0d82d0e2012-10-03 14:18:33 +02002157}
Dr. David Alan Gilbert093e3c42015-11-05 18:10:52 +00002158