blob: 499eccaeee0696703b3d3494920b241c125614ba [file] [log] [blame]
Jeff Cody747ff602012-09-27 13:29:13 -04001/*
2 * Live block commit
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Jeff Cody <jcody@redhat.com>
8 * Based on stream.c by Stefan Hajnoczi
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
Peter Maydell80c71a22016-01-18 18:01:42 +000015#include "qemu/osdep.h"
Jeff Cody747ff602012-09-27 13:29:13 -040016#include "trace.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010017#include "block/block_int.h"
18#include "block/blockjob.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010019#include "qapi/error.h"
Markus Armbrustercc7a8ea2015-03-17 17:22:46 +010020#include "qapi/qmp/qerror.h"
Jeff Cody747ff602012-09-27 13:29:13 -040021#include "qemu/ratelimit.h"
Max Reitz373340b2015-10-19 17:53:22 +020022#include "sysemu/block-backend.h"
Jeff Cody747ff602012-09-27 13:29:13 -040023
24enum {
25 /*
26 * Size of data buffer for populating the image file. This should be large
27 * enough to process multiple clusters in a single call, so that populating
28 * contiguous regions of the image is efficient.
29 */
30 COMMIT_BUFFER_SIZE = 512 * 1024, /* in bytes */
31};
32
33#define SLICE_TIME 100000000ULL /* ns */
34
35typedef struct CommitBlockJob {
36 BlockJob common;
37 RateLimit limit;
38 BlockDriverState *active;
Kevin Wolf46534562016-04-14 13:09:53 +020039 BlockBackend *top;
40 BlockBackend *base;
Paolo Bonzini92aa5c62012-09-28 17:22:55 +020041 BlockdevOnError on_error;
Jeff Cody747ff602012-09-27 13:29:13 -040042 int base_flags;
43 int orig_overlay_flags;
Jeff Cody54e26902014-06-25 15:40:10 -040044 char *backing_file_str;
Jeff Cody747ff602012-09-27 13:29:13 -040045} CommitBlockJob;
46
Kevin Wolf46534562016-04-14 13:09:53 +020047static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
Jeff Cody747ff602012-09-27 13:29:13 -040048 int64_t sector_num, int nb_sectors,
49 void *buf)
50{
51 int ret = 0;
Kevin Wolf46534562016-04-14 13:09:53 +020052 QEMUIOVector qiov;
53 struct iovec iov = {
54 .iov_base = buf,
55 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
56 };
Jeff Cody747ff602012-09-27 13:29:13 -040057
Kevin Wolf46534562016-04-14 13:09:53 +020058 qemu_iovec_init_external(&qiov, &iov, 1);
59
60 ret = blk_co_preadv(bs, sector_num * BDRV_SECTOR_SIZE,
61 qiov.size, &qiov, 0);
62 if (ret < 0) {
Jeff Cody747ff602012-09-27 13:29:13 -040063 return ret;
64 }
65
Kevin Wolf46534562016-04-14 13:09:53 +020066 ret = blk_co_pwritev(base, sector_num * BDRV_SECTOR_SIZE,
67 qiov.size, &qiov, 0);
68 if (ret < 0) {
Jeff Cody747ff602012-09-27 13:29:13 -040069 return ret;
70 }
71
72 return 0;
73}
74
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +010075typedef struct {
76 int ret;
77} CommitCompleteData;
78
79static void commit_complete(BlockJob *job, void *opaque)
Jeff Cody747ff602012-09-27 13:29:13 -040080{
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +010081 CommitBlockJob *s = container_of(job, CommitBlockJob, common);
82 CommitCompleteData *data = opaque;
Jeff Cody747ff602012-09-27 13:29:13 -040083 BlockDriverState *active = s->active;
Kevin Wolf46534562016-04-14 13:09:53 +020084 BlockDriverState *top = blk_bs(s->top);
85 BlockDriverState *base = blk_bs(s->base);
Alberto Garcia4d6f8cb2016-08-21 23:36:03 -040086 BlockDriverState *overlay_bs = bdrv_find_overlay(active, top);
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +010087 int ret = data->ret;
88
89 if (!block_job_is_cancelled(&s->common) && ret == 0) {
90 /* success */
91 ret = bdrv_drop_intermediate(active, top, base, s->backing_file_str);
92 }
93
94 /* restore base open flags here if appropriate (e.g., change the base back
95 * to r/o). These reopens do not need to be atomic, since we won't abort
96 * even on failure here */
97 if (s->base_flags != bdrv_get_flags(base)) {
98 bdrv_reopen(base, s->base_flags, NULL);
99 }
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100100 if (overlay_bs && s->orig_overlay_flags != bdrv_get_flags(overlay_bs)) {
101 bdrv_reopen(overlay_bs, s->orig_overlay_flags, NULL);
102 }
103 g_free(s->backing_file_str);
Kevin Wolf46534562016-04-14 13:09:53 +0200104 blk_unref(s->top);
105 blk_unref(s->base);
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100106 block_job_completed(&s->common, ret);
107 g_free(data);
108}
109
110static void coroutine_fn commit_run(void *opaque)
111{
112 CommitBlockJob *s = opaque;
113 CommitCompleteData *data;
Jeff Cody747ff602012-09-27 13:29:13 -0400114 int64_t sector_num, end;
Sascha Silbef14a39c2016-06-28 17:28:41 +0200115 uint64_t delay_ns = 0;
Jeff Cody747ff602012-09-27 13:29:13 -0400116 int ret = 0;
117 int n = 0;
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100118 void *buf = NULL;
Jeff Cody747ff602012-09-27 13:29:13 -0400119 int bytes_written = 0;
120 int64_t base_len;
121
Kevin Wolf46534562016-04-14 13:09:53 +0200122 ret = s->common.len = blk_getlength(s->top);
Jeff Cody747ff602012-09-27 13:29:13 -0400123
124
125 if (s->common.len < 0) {
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100126 goto out;
Jeff Cody747ff602012-09-27 13:29:13 -0400127 }
128
Kevin Wolf46534562016-04-14 13:09:53 +0200129 ret = base_len = blk_getlength(s->base);
Jeff Cody747ff602012-09-27 13:29:13 -0400130 if (base_len < 0) {
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100131 goto out;
Jeff Cody747ff602012-09-27 13:29:13 -0400132 }
133
134 if (base_len < s->common.len) {
Kevin Wolf46534562016-04-14 13:09:53 +0200135 ret = blk_truncate(s->base, s->common.len);
Jeff Cody747ff602012-09-27 13:29:13 -0400136 if (ret) {
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100137 goto out;
Jeff Cody747ff602012-09-27 13:29:13 -0400138 }
139 }
140
Jeff Cody747ff602012-09-27 13:29:13 -0400141 end = s->common.len >> BDRV_SECTOR_BITS;
Kevin Wolf46534562016-04-14 13:09:53 +0200142 buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
Jeff Cody747ff602012-09-27 13:29:13 -0400143
144 for (sector_num = 0; sector_num < end; sector_num += n) {
Jeff Cody747ff602012-09-27 13:29:13 -0400145 bool copy;
146
Jeff Cody747ff602012-09-27 13:29:13 -0400147 /* Note that even when no rate limit is applied we need to yield
Kevin Wolfc57b6652012-11-13 16:35:13 +0100148 * with no pending I/O here so that bdrv_drain_all() returns.
Jeff Cody747ff602012-09-27 13:29:13 -0400149 */
Alex Bligh7483d1e2013-08-21 16:03:05 +0100150 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
Jeff Cody747ff602012-09-27 13:29:13 -0400151 if (block_job_is_cancelled(&s->common)) {
152 break;
153 }
154 /* Copy if allocated above the base */
Kevin Wolf46534562016-04-14 13:09:53 +0200155 ret = bdrv_is_allocated_above(blk_bs(s->top), blk_bs(s->base),
156 sector_num,
Paolo Bonzini4f578632013-09-04 19:00:24 +0200157 COMMIT_BUFFER_SIZE / BDRV_SECTOR_SIZE,
158 &n);
Jeff Cody747ff602012-09-27 13:29:13 -0400159 copy = (ret == 1);
160 trace_commit_one_iteration(s, sector_num, n, ret);
161 if (copy) {
Kevin Wolf46534562016-04-14 13:09:53 +0200162 ret = commit_populate(s->top, s->base, sector_num, n, buf);
Jeff Cody747ff602012-09-27 13:29:13 -0400163 bytes_written += n * BDRV_SECTOR_SIZE;
164 }
165 if (ret < 0) {
Kevin Wolf1e8fb7f2016-06-29 17:38:57 +0200166 BlockErrorAction action =
167 block_job_error_action(&s->common, false, s->on_error, -ret);
168 if (action == BLOCK_ERROR_ACTION_REPORT) {
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100169 goto out;
Jeff Cody747ff602012-09-27 13:29:13 -0400170 } else {
171 n = 0;
172 continue;
173 }
174 }
175 /* Publish progress */
176 s->common.offset += n * BDRV_SECTOR_SIZE;
Sascha Silbef14a39c2016-06-28 17:28:41 +0200177
178 if (copy && s->common.speed) {
179 delay_ns = ratelimit_calculate_delay(&s->limit, n);
180 }
Jeff Cody747ff602012-09-27 13:29:13 -0400181 }
182
183 ret = 0;
184
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100185out:
Jeff Cody747ff602012-09-27 13:29:13 -0400186 qemu_vfree(buf);
187
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100188 data = g_malloc(sizeof(*data));
189 data->ret = ret;
190 block_job_defer_to_main_loop(&s->common, commit_complete, data);
Jeff Cody747ff602012-09-27 13:29:13 -0400191}
192
193static void commit_set_speed(BlockJob *job, int64_t speed, Error **errp)
194{
195 CommitBlockJob *s = container_of(job, CommitBlockJob, common);
196
197 if (speed < 0) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100198 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
Jeff Cody747ff602012-09-27 13:29:13 -0400199 return;
200 }
201 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
202}
203
Fam Zheng3fc4b102013-10-08 17:29:38 +0800204static const BlockJobDriver commit_job_driver = {
Jeff Cody747ff602012-09-27 13:29:13 -0400205 .instance_size = sizeof(CommitBlockJob),
Fam Zheng79e14bf2013-10-08 17:29:40 +0800206 .job_type = BLOCK_JOB_TYPE_COMMIT,
Jeff Cody747ff602012-09-27 13:29:13 -0400207 .set_speed = commit_set_speed,
208};
209
Alberto Garciafd62c602016-07-05 17:29:00 +0300210void commit_start(const char *job_id, BlockDriverState *bs,
211 BlockDriverState *base, BlockDriverState *top, int64_t speed,
Markus Armbruster097310b2014-10-07 13:59:15 +0200212 BlockdevOnError on_error, BlockCompletionFunc *cb,
Jeff Cody54e26902014-06-25 15:40:10 -0400213 void *opaque, const char *backing_file_str, Error **errp)
Jeff Cody747ff602012-09-27 13:29:13 -0400214{
215 CommitBlockJob *s;
216 BlockReopenQueue *reopen_queue = NULL;
217 int orig_overlay_flags;
218 int orig_base_flags;
219 BlockDriverState *overlay_bs;
220 Error *local_err = NULL;
221
Fam Zheng18da7f92013-12-16 14:45:33 +0800222 assert(top != bs);
Jeff Cody747ff602012-09-27 13:29:13 -0400223 if (top == base) {
224 error_setg(errp, "Invalid files for merge: top and base are the same");
225 return;
226 }
227
Jeff Cody747ff602012-09-27 13:29:13 -0400228 overlay_bs = bdrv_find_overlay(bs, top);
229
230 if (overlay_bs == NULL) {
231 error_setg(errp, "Could not find overlay image for %s:", top->filename);
232 return;
233 }
234
Alberto Garciafd62c602016-07-05 17:29:00 +0300235 s = block_job_create(job_id, &commit_job_driver, bs, speed,
236 cb, opaque, errp);
Alberto Garcia834fe282016-05-27 12:53:39 +0200237 if (!s) {
238 return;
239 }
240
Jeff Cody747ff602012-09-27 13:29:13 -0400241 orig_base_flags = bdrv_get_flags(base);
242 orig_overlay_flags = bdrv_get_flags(overlay_bs);
243
244 /* convert base & overlay_bs to r/w, if necessary */
Alberto Garcia3db2bd52015-10-28 15:43:49 +0200245 if (!(orig_base_flags & BDRV_O_RDWR)) {
246 reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL,
247 orig_base_flags | BDRV_O_RDWR);
248 }
Alberto Garcia0fe282b2016-09-15 17:53:04 +0300249 if (!(orig_overlay_flags & BDRV_O_RDWR)) {
250 reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs, NULL,
251 orig_overlay_flags | BDRV_O_RDWR);
252 }
Jeff Cody747ff602012-09-27 13:29:13 -0400253 if (reopen_queue) {
Paolo Bonzini720150f2016-10-27 12:49:02 +0200254 bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
Jeff Cody747ff602012-09-27 13:29:13 -0400255 if (local_err != NULL) {
256 error_propagate(errp, local_err);
Alberto Garcia834fe282016-05-27 12:53:39 +0200257 block_job_unref(&s->common);
Jeff Cody747ff602012-09-27 13:29:13 -0400258 return;
259 }
260 }
261
262
Kevin Wolf46534562016-04-14 13:09:53 +0200263 s->base = blk_new();
264 blk_insert_bs(s->base, base);
265
266 s->top = blk_new();
267 blk_insert_bs(s->top, top);
268
Jeff Cody747ff602012-09-27 13:29:13 -0400269 s->active = bs;
270
271 s->base_flags = orig_base_flags;
272 s->orig_overlay_flags = orig_overlay_flags;
273
Jeff Cody54e26902014-06-25 15:40:10 -0400274 s->backing_file_str = g_strdup(backing_file_str);
275
Jeff Cody747ff602012-09-27 13:29:13 -0400276 s->on_error = on_error;
Paolo Bonzini0b8b8752016-07-04 19:10:01 +0200277 s->common.co = qemu_coroutine_create(commit_run, s);
Jeff Cody747ff602012-09-27 13:29:13 -0400278
279 trace_commit_start(bs, base, top, s, s->common.co, opaque);
Paolo Bonzini0b8b8752016-07-04 19:10:01 +0200280 qemu_coroutine_enter(s->common.co);
Jeff Cody747ff602012-09-27 13:29:13 -0400281}
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200282
283
284#define COMMIT_BUF_SECTORS 2048
285
286/* commit COW file into the raw image */
287int bdrv_commit(BlockDriverState *bs)
288{
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200289 BlockBackend *src, *backing;
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200290 BlockDriver *drv = bs->drv;
291 int64_t sector, total_sectors, length, backing_length;
292 int n, ro, open_flags;
293 int ret = 0;
294 uint8_t *buf = NULL;
295
296 if (!drv)
297 return -ENOMEDIUM;
298
299 if (!bs->backing) {
300 return -ENOTSUP;
301 }
302
303 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
304 bdrv_op_is_blocked(bs->backing->bs, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
305 return -EBUSY;
306 }
307
308 ro = bs->backing->bs->read_only;
309 open_flags = bs->backing->bs->open_flags;
310
311 if (ro) {
312 if (bdrv_reopen(bs->backing->bs, open_flags | BDRV_O_RDWR, NULL)) {
313 return -EACCES;
314 }
315 }
316
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200317 src = blk_new();
318 blk_insert_bs(src, bs);
319
320 backing = blk_new();
321 blk_insert_bs(backing, bs->backing->bs);
322
323 length = blk_getlength(src);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200324 if (length < 0) {
325 ret = length;
326 goto ro_cleanup;
327 }
328
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200329 backing_length = blk_getlength(backing);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200330 if (backing_length < 0) {
331 ret = backing_length;
332 goto ro_cleanup;
333 }
334
335 /* If our top snapshot is larger than the backing file image,
336 * grow the backing file image if possible. If not possible,
337 * we must return an error */
338 if (length > backing_length) {
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200339 ret = blk_truncate(backing, length);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200340 if (ret < 0) {
341 goto ro_cleanup;
342 }
343 }
344
345 total_sectors = length >> BDRV_SECTOR_BITS;
346
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200347 /* blk_try_blockalign() for src will choose an alignment that works for
348 * backing as well, so no need to compare the alignment manually. */
349 buf = blk_try_blockalign(src, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200350 if (buf == NULL) {
351 ret = -ENOMEM;
352 goto ro_cleanup;
353 }
354
355 for (sector = 0; sector < total_sectors; sector += n) {
356 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
357 if (ret < 0) {
358 goto ro_cleanup;
359 }
360 if (ret) {
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200361 ret = blk_pread(src, sector * BDRV_SECTOR_SIZE, buf,
362 n * BDRV_SECTOR_SIZE);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200363 if (ret < 0) {
364 goto ro_cleanup;
365 }
366
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200367 ret = blk_pwrite(backing, sector * BDRV_SECTOR_SIZE, buf,
368 n * BDRV_SECTOR_SIZE, 0);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200369 if (ret < 0) {
370 goto ro_cleanup;
371 }
372 }
373 }
374
375 if (drv->bdrv_make_empty) {
376 ret = drv->bdrv_make_empty(bs);
377 if (ret < 0) {
378 goto ro_cleanup;
379 }
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200380 blk_flush(src);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200381 }
382
383 /*
384 * Make sure all data we wrote to the backing device is actually
385 * stable on disk.
386 */
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200387 blk_flush(backing);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200388
389 ret = 0;
390ro_cleanup:
391 qemu_vfree(buf);
392
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200393 blk_unref(src);
394 blk_unref(backing);
395
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200396 if (ro) {
397 /* ignoring error return here */
398 bdrv_reopen(bs->backing->bs, open_flags & ~BDRV_O_RDWR, NULL);
399 }
400
401 return ret;
402}