blob: 6c4b736ff807399b24d4cfdd0375cd37d997d558 [file] [log] [blame]
Jeff Cody747ff602012-09-27 13:29:13 -04001/*
2 * Live block commit
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Jeff Cody <jcody@redhat.com>
8 * Based on stream.c by Stefan Hajnoczi
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
Peter Maydell80c71a22016-01-18 18:01:42 +000015#include "qemu/osdep.h"
Kevin Wolfdcbf37c2017-03-09 11:49:16 +010016#include "qemu/cutils.h"
Jeff Cody747ff602012-09-27 13:29:13 -040017#include "trace.h"
Vincent Vanlaer71365ee2024-10-26 18:30:05 +020018#include "block/block-common.h"
19#include "block/coroutines.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010020#include "block/block_int.h"
John Snowc87621e2016-10-27 12:07:00 -040021#include "block/blockjob_int.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010022#include "qapi/error.h"
Jeff Cody747ff602012-09-27 13:29:13 -040023#include "qemu/ratelimit.h"
Peter Maydell5df022c2022-02-26 18:07:23 +000024#include "qemu/memalign.h"
Philippe Mathieu-Daudé32cad1f2024-12-03 15:20:13 +010025#include "system/block-backend.h"
Jeff Cody747ff602012-09-27 13:29:13 -040026
27enum {
28 /*
29 * Size of data buffer for populating the image file. This should be large
30 * enough to process multiple clusters in a single call, so that populating
31 * contiguous regions of the image is efficient.
32 */
33 COMMIT_BUFFER_SIZE = 512 * 1024, /* in bytes */
34};
35
Jeff Cody747ff602012-09-27 13:29:13 -040036typedef struct CommitBlockJob {
37 BlockJob common;
Kevin Wolf8dfba272017-01-16 16:22:34 +010038 BlockDriverState *commit_top_bs;
Kevin Wolf46534562016-04-14 13:09:53 +020039 BlockBackend *top;
40 BlockBackend *base;
John Snow22dffcb2018-09-06 09:02:13 -040041 BlockDriverState *base_bs;
Max Reitz9a71b9d2019-06-12 17:47:37 +020042 BlockDriverState *base_overlay;
Paolo Bonzini92aa5c62012-09-28 17:22:55 +020043 BlockdevOnError on_error;
Alberto Garciae70cdc52018-11-12 16:00:35 +020044 bool base_read_only;
Alberto Garciadf827332019-03-12 18:48:41 +020045 bool chain_frozen;
Jeff Cody54e26902014-06-25 15:40:10 -040046 char *backing_file_str;
Peter Krempa4b028cb2023-12-05 18:14:41 +010047 bool backing_mask_protocol;
Jeff Cody747ff602012-09-27 13:29:13 -040048} CommitBlockJob;
49
John Snow22dffcb2018-09-06 09:02:13 -040050static int commit_prepare(Job *job)
Jeff Cody747ff602012-09-27 13:29:13 -040051{
Kevin Wolf1908a552018-04-17 16:41:17 +020052 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
Kevin Wolf19ebd132017-06-02 23:04:55 +020053
Kevin Wolf9275fc72023-10-27 17:53:18 +020054 bdrv_graph_rdlock_main_loop();
Alberto Garciadf827332019-03-12 18:48:41 +020055 bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
56 s->chain_frozen = false;
Kevin Wolf9275fc72023-10-27 17:53:18 +020057 bdrv_graph_rdunlock_main_loop();
Alberto Garciadf827332019-03-12 18:48:41 +020058
Kevin Wolf8dfba272017-01-16 16:22:34 +010059 /* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
60 * the normal backing chain can be restored. */
61 blk_unref(s->base);
John Snow22dffcb2018-09-06 09:02:13 -040062 s->base = NULL;
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +010063
John Snow22dffcb2018-09-06 09:02:13 -040064 /* FIXME: bdrv_drop_intermediate treats total failures and partial failures
65 * identically. Further work is needed to disambiguate these cases. */
66 return bdrv_drop_intermediate(s->commit_top_bs, s->base_bs,
Peter Krempa4b028cb2023-12-05 18:14:41 +010067 s->backing_file_str,
68 s->backing_mask_protocol);
John Snow22dffcb2018-09-06 09:02:13 -040069}
70
71static void commit_abort(Job *job)
72{
73 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
74 BlockDriverState *top_bs = blk_bs(s->top);
Kevin Wolfccd6a372023-10-27 17:53:25 +020075 BlockDriverState *commit_top_backing_bs;
John Snow22dffcb2018-09-06 09:02:13 -040076
Alberto Garciadf827332019-03-12 18:48:41 +020077 if (s->chain_frozen) {
Kevin Wolf9275fc72023-10-27 17:53:18 +020078 bdrv_graph_rdlock_main_loop();
Alberto Garciadf827332019-03-12 18:48:41 +020079 bdrv_unfreeze_backing_chain(s->commit_top_bs, s->base_bs);
Kevin Wolf9275fc72023-10-27 17:53:18 +020080 bdrv_graph_rdunlock_main_loop();
Alberto Garciadf827332019-03-12 18:48:41 +020081 }
82
John Snow22dffcb2018-09-06 09:02:13 -040083 /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
84 bdrv_ref(top_bs);
85 bdrv_ref(s->commit_top_bs);
86
87 if (s->base) {
88 blk_unref(s->base);
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +010089 }
90
John Snow22dffcb2018-09-06 09:02:13 -040091 /* free the blockers on the intermediate nodes so that bdrv_replace_nodes
92 * can succeed */
93 block_job_remove_all_bdrv(&s->common);
94
95 /* If bdrv_drop_intermediate() failed (or was not invoked), remove the
96 * commit filter driver from the backing chain now. Do this as the final
97 * step so that the 'consistent read' permission can be granted.
98 *
99 * XXX Can (or should) we somehow keep 'consistent read' blocked even
100 * after the failed/cancelled commit job is gone? If we already wrote
101 * something to base, the intermediate images aren't valid any more. */
Kevin Wolf004915a2023-10-27 17:53:26 +0200102 bdrv_graph_rdlock_main_loop();
Kevin Wolfccd6a372023-10-27 17:53:25 +0200103 commit_top_backing_bs = s->commit_top_bs->backing->bs;
Kevin Wolf004915a2023-10-27 17:53:26 +0200104 bdrv_graph_rdunlock_main_loop();
105
Kevin Wolfccd6a372023-10-27 17:53:25 +0200106 bdrv_drained_begin(commit_top_backing_bs);
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500107 bdrv_graph_wrlock();
Kevin Wolfccd6a372023-10-27 17:53:25 +0200108 bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort);
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500109 bdrv_graph_wrunlock();
Kevin Wolfccd6a372023-10-27 17:53:25 +0200110 bdrv_drained_end(commit_top_backing_bs);
John Snow22dffcb2018-09-06 09:02:13 -0400111
112 bdrv_unref(s->commit_top_bs);
113 bdrv_unref(top_bs);
114}
115
116static void commit_clean(Job *job)
117{
118 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
119
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100120 /* restore base open flags here if appropriate (e.g., change the base back
121 * to r/o). These reopens do not need to be atomic, since we won't abort
122 * even on failure here */
Alberto Garciae70cdc52018-11-12 16:00:35 +0200123 if (s->base_read_only) {
124 bdrv_reopen_set_read_only(s->base_bs, true, NULL);
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100125 }
John Snow22dffcb2018-09-06 09:02:13 -0400126
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100127 g_free(s->backing_file_str);
Kevin Wolf46534562016-04-14 13:09:53 +0200128 blk_unref(s->top);
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100129}
130
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200131static int commit_iteration(CommitBlockJob *s, int64_t offset,
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200132 int64_t *requested_bytes, void *buf)
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200133{
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200134 BlockErrorAction action;
135 int64_t bytes = *requested_bytes;
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200136 int ret = 0;
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200137 bool error_in_source = true;
138
139 /* Copy if allocated above the base */
140 WITH_GRAPH_RDLOCK_GUARD() {
141 ret = bdrv_co_common_block_status_above(blk_bs(s->top),
142 s->base_overlay, true, true, offset, COMMIT_BUFFER_SIZE,
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200143 &bytes, NULL, NULL, NULL);
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200144 }
145
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200146 trace_commit_one_iteration(s, offset, bytes, ret);
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200147
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200148 if (ret < 0) {
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200149 goto fail;
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200150 }
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200151
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200152 if (ret & BDRV_BLOCK_ALLOCATED) {
Vincent Vanlaer6f3199f2024-10-26 18:30:08 +0200153 if (ret & BDRV_BLOCK_ZERO) {
154 /*
155 * If the top (sub)clusters are smaller than the base
156 * (sub)clusters, this will not unmap unless the underlying device
157 * does some tracking of these requests. Ideally, we would find
158 * the maximal extent of the zero clusters.
159 */
160 ret = blk_co_pwrite_zeroes(s->base, offset, bytes,
161 BDRV_REQ_MAY_UNMAP);
162 if (ret < 0) {
163 error_in_source = false;
164 goto fail;
165 }
166 } else {
167 assert(bytes < SIZE_MAX);
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200168
Vincent Vanlaer6f3199f2024-10-26 18:30:08 +0200169 ret = blk_co_pread(s->top, offset, bytes, buf, 0);
170 if (ret < 0) {
171 goto fail;
172 }
173
174 ret = blk_co_pwrite(s->base, offset, bytes, buf, 0);
175 if (ret < 0) {
176 error_in_source = false;
177 goto fail;
178 }
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200179 }
180
Vincent Vanlaer6f3199f2024-10-26 18:30:08 +0200181 /*
182 * Whether zeroes actually end up on disk depends on the details of
183 * the underlying driver. Therefore, this might rate limit more than
184 * is necessary.
185 */
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200186 block_job_ratelimit_processed_bytes(&s->common, bytes);
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200187 }
188
Vincent Vanlaer0648c762024-10-26 18:30:07 +0200189 /* Publish progress */
190
191 job_progress_update(&s->common.job, bytes);
192
193 *requested_bytes = bytes;
194
195 return 0;
196
197fail:
198 action = block_job_error_action(&s->common, s->on_error,
199 error_in_source, -ret);
200 if (action == BLOCK_ERROR_ACTION_REPORT) {
201 return ret;
202 }
203
204 *requested_bytes = 0;
205
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200206 return 0;
207}
208
John Snowf67432a2018-08-29 21:57:26 -0400209static int coroutine_fn commit_run(Job *job, Error **errp)
Stefan Hajnoczi9e85cd52014-10-21 12:03:59 +0100210{
John Snowf67432a2018-08-29 21:57:26 -0400211 CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
Eric Blake317a6672017-07-07 07:44:45 -0500212 int64_t offset;
Jeff Cody747ff602012-09-27 13:29:13 -0400213 int ret = 0;
Eric Blake51b0a482017-07-07 07:44:59 -0500214 int64_t n = 0; /* bytes */
Vladimir Sementsov-Ogievskiy71701702021-06-28 15:11:33 +0300215 QEMU_AUTO_VFREE void *buf = NULL;
Kevin Wolf05df8a62018-01-18 18:08:22 +0100216 int64_t len, base_len;
Jeff Cody747ff602012-09-27 13:29:13 -0400217
Emanuele Giuseppe Espositoc86422c2023-01-13 21:42:04 +0100218 len = blk_co_getlength(s->top);
Kevin Wolf05df8a62018-01-18 18:08:22 +0100219 if (len < 0) {
Vladimir Sementsov-Ogievskiy71701702021-06-28 15:11:33 +0300220 return len;
Jeff Cody747ff602012-09-27 13:29:13 -0400221 }
Kevin Wolf30a5c882018-05-04 12:17:20 +0200222 job_progress_set_remaining(&s->common.job, len);
Jeff Cody747ff602012-09-27 13:29:13 -0400223
Emanuele Giuseppe Espositoc86422c2023-01-13 21:42:04 +0100224 base_len = blk_co_getlength(s->base);
Jeff Cody747ff602012-09-27 13:29:13 -0400225 if (base_len < 0) {
Vladimir Sementsov-Ogievskiy71701702021-06-28 15:11:33 +0300226 return base_len;
Jeff Cody747ff602012-09-27 13:29:13 -0400227 }
228
Kevin Wolf05df8a62018-01-18 18:08:22 +0100229 if (base_len < len) {
Alberto Fariaa0667882022-10-13 14:37:01 +0200230 ret = blk_co_truncate(s->base, len, false, PREALLOC_MODE_OFF, 0, NULL);
Jeff Cody747ff602012-09-27 13:29:13 -0400231 if (ret) {
Vladimir Sementsov-Ogievskiy71701702021-06-28 15:11:33 +0300232 return ret;
Jeff Cody747ff602012-09-27 13:29:13 -0400233 }
234 }
235
Kevin Wolf46534562016-04-14 13:09:53 +0200236 buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
Jeff Cody747ff602012-09-27 13:29:13 -0400237
Kevin Wolf05df8a62018-01-18 18:08:22 +0100238 for (offset = 0; offset < len; offset += n) {
Jeff Cody747ff602012-09-27 13:29:13 -0400239 /* Note that even when no rate limit is applied we need to yield
Kevin Wolfc57b6652012-11-13 16:35:13 +0100240 * with no pending I/O here so that bdrv_drain_all() returns.
Jeff Cody747ff602012-09-27 13:29:13 -0400241 */
Kevin Wolf018e5982023-05-10 22:36:00 +0200242 block_job_ratelimit_sleep(&s->common);
Kevin Wolfdaa7f2f2018-04-17 12:56:07 +0200243 if (job_is_cancelled(&s->common.job)) {
Jeff Cody747ff602012-09-27 13:29:13 -0400244 break;
245 }
Vincent Vanlaer71365ee2024-10-26 18:30:05 +0200246
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200247 ret = commit_iteration(s, offset, &n, buf);
Kevin Wolf0c42e172020-02-14 21:08:09 +0100248
Jeff Cody747ff602012-09-27 13:29:13 -0400249 if (ret < 0) {
Vincent Vanlaer23743ab2024-10-26 18:30:06 +0200250 return ret;
Sascha Silbef14a39c2016-06-28 17:28:41 +0200251 }
Jeff Cody747ff602012-09-27 13:29:13 -0400252 }
253
Vladimir Sementsov-Ogievskiy71701702021-06-28 15:11:33 +0300254 return 0;
Jeff Cody747ff602012-09-27 13:29:13 -0400255}
256
Fam Zheng3fc4b102013-10-08 17:29:38 +0800257static const BlockJobDriver commit_job_driver = {
Kevin Wolf33e9e9b2018-04-12 17:29:59 +0200258 .job_driver = {
259 .instance_size = sizeof(CommitBlockJob),
Kevin Wolf252291e2018-04-12 17:57:08 +0200260 .job_type = JOB_TYPE_COMMIT,
Kevin Wolf80fa2c72018-04-13 18:50:05 +0200261 .free = block_job_free,
Kevin Wolfb15de822018-04-18 17:10:26 +0200262 .user_resume = block_job_user_resume,
John Snowf67432a2018-08-29 21:57:26 -0400263 .run = commit_run,
John Snow22dffcb2018-09-06 09:02:13 -0400264 .prepare = commit_prepare,
265 .abort = commit_abort,
266 .clean = commit_clean
Kevin Wolf33e9e9b2018-04-12 17:29:59 +0200267 },
Jeff Cody747ff602012-09-27 13:29:13 -0400268};
269
Kevin Wolfb9b10c32023-02-03 16:21:50 +0100270static int coroutine_fn GRAPH_RDLOCK
271bdrv_commit_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
272 QEMUIOVector *qiov, BdrvRequestFlags flags)
Kevin Wolf8dfba272017-01-16 16:22:34 +0100273{
274 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
275}
276
Kevin Wolf004915a2023-10-27 17:53:26 +0200277static GRAPH_RDLOCK void bdrv_commit_top_refresh_filename(BlockDriverState *bs)
Kevin Wolfdcbf37c2017-03-09 11:49:16 +0100278{
Kevin Wolfdcbf37c2017-03-09 11:49:16 +0100279 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
280 bs->backing->bs->filename);
281}
Kevin Wolf91965652017-03-08 15:07:12 +0100282
Kevin Wolf8dfba272017-01-16 16:22:34 +0100283static void bdrv_commit_top_child_perm(BlockDriverState *bs, BdrvChild *c,
Max Reitzbf8e9252020-05-13 13:05:16 +0200284 BdrvChildRole role,
Kevin Wolfe0995dc2017-09-14 12:47:11 +0200285 BlockReopenQueue *reopen_queue,
Kevin Wolf8dfba272017-01-16 16:22:34 +0100286 uint64_t perm, uint64_t shared,
287 uint64_t *nperm, uint64_t *nshared)
288{
289 *nperm = 0;
290 *nshared = BLK_PERM_ALL;
291}
292
293/* Dummy node that provides consistent read to its users without requiring it
294 * from its backing file and that allows writes on the backing file chain. */
295static BlockDriver bdrv_commit_top = {
Kevin Wolf91965652017-03-08 15:07:12 +0100296 .format_name = "commit_top",
297 .bdrv_co_preadv = bdrv_commit_top_preadv,
Kevin Wolfdcbf37c2017-03-09 11:49:16 +0100298 .bdrv_refresh_filename = bdrv_commit_top_refresh_filename,
Kevin Wolf91965652017-03-08 15:07:12 +0100299 .bdrv_child_perm = bdrv_commit_top_child_perm,
Max Reitz6540fd12020-05-13 13:05:11 +0200300
301 .is_filter = true,
Vladimir Sementsov-Ogievskiy046fd842022-07-26 23:11:20 +0300302 .filtered_child_is_backing = true,
Kevin Wolf8dfba272017-01-16 16:22:34 +0100303};
304
Alberto Garciafd62c602016-07-05 17:29:00 +0300305void commit_start(const char *job_id, BlockDriverState *bs,
John Snow53607822018-09-06 09:02:10 -0400306 BlockDriverState *base, BlockDriverState *top,
307 int creation_flags, int64_t speed,
John Snow8254b6d2016-10-27 12:06:58 -0400308 BlockdevOnError on_error, const char *backing_file_str,
Peter Krempa4b028cb2023-12-05 18:14:41 +0100309 bool backing_mask_protocol,
Kevin Wolf0db832f2017-02-20 18:10:05 +0100310 const char *filter_node_name, Error **errp)
Jeff Cody747ff602012-09-27 13:29:13 -0400311{
312 CommitBlockJob *s;
Alberto Garcia3e4c5122016-10-28 10:08:08 +0300313 BlockDriverState *iter;
Kevin Wolf8dfba272017-01-16 16:22:34 +0100314 BlockDriverState *commit_top_bs = NULL;
Max Reitz9a71b9d2019-06-12 17:47:37 +0200315 BlockDriverState *filtered_base;
Max Reitz9a71b9d2019-06-12 17:47:37 +0200316 int64_t base_size, top_size;
317 uint64_t base_perms, iter_shared_perms;
Kevin Wolfd7086422017-01-13 19:02:32 +0100318 int ret;
Jeff Cody747ff602012-09-27 13:29:13 -0400319
Emanuele Giuseppe Espositob4ad82a2022-03-03 10:15:57 -0500320 GLOBAL_STATE_CODE();
321
Fam Zheng18da7f92013-12-16 14:45:33 +0800322 assert(top != bs);
Kevin Wolfad747512023-10-27 17:53:17 +0200323 bdrv_graph_rdlock_main_loop();
Max Reitz9a71b9d2019-06-12 17:47:37 +0200324 if (bdrv_skip_filters(top) == bdrv_skip_filters(base)) {
Jeff Cody747ff602012-09-27 13:29:13 -0400325 error_setg(errp, "Invalid files for merge: top and base are the same");
Kevin Wolfad747512023-10-27 17:53:17 +0200326 bdrv_graph_rdunlock_main_loop();
Jeff Cody747ff602012-09-27 13:29:13 -0400327 return;
328 }
Kevin Wolfad747512023-10-27 17:53:17 +0200329 bdrv_graph_rdunlock_main_loop();
Jeff Cody747ff602012-09-27 13:29:13 -0400330
Max Reitz9a71b9d2019-06-12 17:47:37 +0200331 base_size = bdrv_getlength(base);
332 if (base_size < 0) {
333 error_setg_errno(errp, -base_size, "Could not inquire base image size");
334 return;
335 }
336
337 top_size = bdrv_getlength(top);
338 if (top_size < 0) {
339 error_setg_errno(errp, -top_size, "Could not inquire top image size");
340 return;
341 }
342
343 base_perms = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
344 if (base_size < top_size) {
345 base_perms |= BLK_PERM_RESIZE;
346 }
347
John Snow75859b92018-03-10 03:27:27 -0500348 s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
John Snow53607822018-09-06 09:02:10 -0400349 speed, creation_flags, NULL, NULL, errp);
Alberto Garcia834fe282016-05-27 12:53:39 +0200350 if (!s) {
351 return;
352 }
353
Kevin Wolfbde70712017-06-27 20:36:18 +0200354 /* convert base to r/w, if necessary */
Alberto Garciae70cdc52018-11-12 16:00:35 +0200355 s->base_read_only = bdrv_is_read_only(base);
356 if (s->base_read_only) {
357 if (bdrv_reopen_set_read_only(base, false, errp) != 0) {
Kevin Wolfd7086422017-01-13 19:02:32 +0100358 goto fail;
Jeff Cody747ff602012-09-27 13:29:13 -0400359 }
360 }
361
Kevin Wolf8dfba272017-01-16 16:22:34 +0100362 /* Insert commit_top block node above top, so we can block consistent read
363 * on the backing chain below it */
Kevin Wolf0db832f2017-02-20 18:10:05 +0100364 commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, filter_node_name, 0,
365 errp);
Kevin Wolf8dfba272017-01-16 16:22:34 +0100366 if (commit_top_bs == NULL) {
367 goto fail;
368 }
Kevin Wolfd3c8c672017-07-18 17:24:05 +0200369 if (!filter_node_name) {
370 commit_top_bs->implicit = true;
371 }
Max Reitze5182c12019-07-03 19:28:02 +0200372
373 /* So that we can always drop this node */
374 commit_top_bs->never_freeze = true;
375
Kevin Wolf0d0676a2017-04-06 19:07:14 +0200376 commit_top_bs->total_sectors = top->total_sectors;
Kevin Wolf8dfba272017-01-16 16:22:34 +0100377
Vladimir Sementsov-Ogievskiy934aee12021-02-02 15:49:44 +0300378 ret = bdrv_append(commit_top_bs, top, errp);
Vladimir Sementsov-Ogievskiyae9d4412021-04-28 18:17:32 +0300379 bdrv_unref(commit_top_bs); /* referenced by new parents or failed */
Vladimir Sementsov-Ogievskiy934aee12021-02-02 15:49:44 +0300380 if (ret < 0) {
Fam Zhengb69f00d2017-03-07 19:07:22 +0800381 commit_top_bs = NULL;
Fam Zhengb69f00d2017-03-07 19:07:22 +0800382 goto fail;
383 }
Kevin Wolf8dfba272017-01-16 16:22:34 +0100384
385 s->commit_top_bs = commit_top_bs;
Jeff Cody747ff602012-09-27 13:29:13 -0400386
Max Reitz9a71b9d2019-06-12 17:47:37 +0200387 /*
388 * Block all nodes between top and base, because they will
389 * disappear from the chain after this operation.
390 * Note that this assumes that the user is fine with removing all
391 * nodes (including R/W filters) between top and base. Assuring
392 * this is the responsibility of the interface (i.e. whoever calls
393 * commit_start()).
394 */
Fiona Ebnerffdcd082025-05-30 17:10:49 +0200395 bdrv_drain_all_begin();
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500396 bdrv_graph_wrlock();
Max Reitz9a71b9d2019-06-12 17:47:37 +0200397 s->base_overlay = bdrv_find_overlay(top, base);
398 assert(s->base_overlay);
399
400 /*
401 * The topmost node with
402 * bdrv_skip_filters(filtered_base) == bdrv_skip_filters(base)
403 */
404 filtered_base = bdrv_cow_bs(s->base_overlay);
405 assert(bdrv_skip_filters(filtered_base) == bdrv_skip_filters(base));
406
407 /*
408 * XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves
409 * at s->base (if writes are blocked for a node, they are also blocked
410 * for its backing file). The other options would be a second filter
411 * driver above s->base.
412 */
413 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
414
415 for (iter = top; iter != base; iter = bdrv_filter_or_cow_bs(iter)) {
416 if (iter == filtered_base) {
417 /*
418 * From here on, all nodes are filters on the base. This
419 * allows us to share BLK_PERM_CONSISTENT_READ.
420 */
421 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
422 }
423
Kevin Wolf8dfba272017-01-16 16:22:34 +0100424 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
Max Reitz9a71b9d2019-06-12 17:47:37 +0200425 iter_shared_perms, errp);
Kevin Wolf8dfba272017-01-16 16:22:34 +0100426 if (ret < 0) {
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500427 bdrv_graph_wrunlock();
Fiona Ebnerffdcd082025-05-30 17:10:49 +0200428 bdrv_drain_all_end();
Kevin Wolf8dfba272017-01-16 16:22:34 +0100429 goto fail;
430 }
Alberto Garcia3e4c5122016-10-28 10:08:08 +0300431 }
432
Alberto Garciadf827332019-03-12 18:48:41 +0200433 if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500434 bdrv_graph_wrunlock();
Fiona Ebnerffdcd082025-05-30 17:10:49 +0200435 bdrv_drain_all_end();
Alberto Garciadf827332019-03-12 18:48:41 +0200436 goto fail;
437 }
438 s->chain_frozen = true;
439
Kevin Wolf8dfba272017-01-16 16:22:34 +0100440 ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500441 bdrv_graph_wrunlock();
Fiona Ebnerffdcd082025-05-30 17:10:49 +0200442 bdrv_drain_all_end();
Kevin Wolff3bbc532023-10-27 17:53:14 +0200443
Kevin Wolf8dfba272017-01-16 16:22:34 +0100444 if (ret < 0) {
445 goto fail;
446 }
447
Kevin Wolfd861ab32019-04-25 14:25:10 +0200448 s->base = blk_new(s->common.job.aio_context,
Max Reitz9a71b9d2019-06-12 17:47:37 +0200449 base_perms,
Kevin Wolf8dfba272017-01-16 16:22:34 +0100450 BLK_PERM_CONSISTENT_READ
Kevin Wolf8dfba272017-01-16 16:22:34 +0100451 | BLK_PERM_WRITE_UNCHANGED);
Kevin Wolfd7086422017-01-13 19:02:32 +0100452 ret = blk_insert_bs(s->base, base, errp);
453 if (ret < 0) {
454 goto fail;
455 }
Kevin Wolfcf312932019-07-22 17:46:23 +0200456 blk_set_disable_request_queuing(s->base, true);
John Snow22dffcb2018-09-06 09:02:13 -0400457 s->base_bs = base;
Kevin Wolf46534562016-04-14 13:09:53 +0200458
Kevin Wolf8dfba272017-01-16 16:22:34 +0100459 /* Required permissions are already taken with block_job_add_bdrv() */
Kevin Wolfd861ab32019-04-25 14:25:10 +0200460 s->top = blk_new(s->common.job.aio_context, 0, BLK_PERM_ALL);
Kevin Wolfb2477672017-03-03 16:54:21 +0100461 ret = blk_insert_bs(s->top, top, errp);
Kevin Wolfd7086422017-01-13 19:02:32 +0100462 if (ret < 0) {
463 goto fail;
464 }
Kevin Wolfcf312932019-07-22 17:46:23 +0200465 blk_set_disable_request_queuing(s->top, true);
Kevin Wolf46534562016-04-14 13:09:53 +0200466
Jeff Cody54e26902014-06-25 15:40:10 -0400467 s->backing_file_str = g_strdup(backing_file_str);
Peter Krempa4b028cb2023-12-05 18:14:41 +0100468 s->backing_mask_protocol = backing_mask_protocol;
Jeff Cody747ff602012-09-27 13:29:13 -0400469 s->on_error = on_error;
Jeff Cody747ff602012-09-27 13:29:13 -0400470
John Snow5ccac6f2016-11-08 01:50:37 -0500471 trace_commit_start(bs, base, top, s);
Kevin Wolfda01ff72018-04-13 17:31:02 +0200472 job_start(&s->common.job);
Kevin Wolfd7086422017-01-13 19:02:32 +0100473 return;
474
475fail:
Alberto Garciadf827332019-03-12 18:48:41 +0200476 if (s->chain_frozen) {
Kevin Wolf9275fc72023-10-27 17:53:18 +0200477 bdrv_graph_rdlock_main_loop();
Alberto Garciadf827332019-03-12 18:48:41 +0200478 bdrv_unfreeze_backing_chain(commit_top_bs, base);
Kevin Wolf9275fc72023-10-27 17:53:18 +0200479 bdrv_graph_rdunlock_main_loop();
Alberto Garciadf827332019-03-12 18:48:41 +0200480 }
Kevin Wolfd7086422017-01-13 19:02:32 +0100481 if (s->base) {
482 blk_unref(s->base);
483 }
484 if (s->top) {
485 blk_unref(s->top);
486 }
Alberto Garcia065abf92019-04-29 15:51:08 +0200487 if (s->base_read_only) {
488 bdrv_reopen_set_read_only(base, true, NULL);
489 }
Alberto Garcia2468eed2019-02-15 15:49:32 +0200490 job_early_fail(&s->common.job);
491 /* commit_top_bs has to be replaced after deleting the block job,
492 * otherwise this would fail because of lack of permissions. */
Kevin Wolf8dfba272017-01-16 16:22:34 +0100493 if (commit_top_bs) {
Kevin Wolfccd6a372023-10-27 17:53:25 +0200494 bdrv_drained_begin(top);
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500495 bdrv_graph_wrlock();
Kevin Wolfbde70712017-06-27 20:36:18 +0200496 bdrv_replace_node(commit_top_bs, top, &error_abort);
Stefan Hajnoczi6bc30f12023-12-05 13:20:02 -0500497 bdrv_graph_wrunlock();
Kevin Wolfccd6a372023-10-27 17:53:25 +0200498 bdrv_drained_end(top);
Kevin Wolf8dfba272017-01-16 16:22:34 +0100499 }
Jeff Cody747ff602012-09-27 13:29:13 -0400500}
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200501
502
Eric Blaked6a644b2017-07-07 07:44:57 -0500503#define COMMIT_BUF_SIZE (2048 * BDRV_SECTOR_SIZE)
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200504
505/* commit COW file into the raw image */
506int bdrv_commit(BlockDriverState *bs)
507{
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200508 BlockBackend *src, *backing;
Kevin Wolfd3f06752017-01-19 18:16:03 +0100509 BlockDriverState *backing_file_bs = NULL;
510 BlockDriverState *commit_top_bs = NULL;
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200511 BlockDriver *drv = bs->drv;
Kevin Wolfd861ab32019-04-25 14:25:10 +0200512 AioContext *ctx;
Eric Blaked6a644b2017-07-07 07:44:57 -0500513 int64_t offset, length, backing_length;
Alberto Garciac742a362018-11-12 16:00:36 +0200514 int ro;
Eric Blaked6a644b2017-07-07 07:44:57 -0500515 int64_t n;
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200516 int ret = 0;
Vladimir Sementsov-Ogievskiy71701702021-06-28 15:11:33 +0300517 QEMU_AUTO_VFREE uint8_t *buf = NULL;
Kevin Wolfd3f06752017-01-19 18:16:03 +0100518 Error *local_err = NULL;
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200519
Emanuele Giuseppe Espositof791bf72022-03-03 10:15:49 -0500520 GLOBAL_STATE_CODE();
Kevin Wolf277f2002023-09-29 16:51:52 +0200521 GRAPH_RDLOCK_GUARD_MAINLOOP();
Emanuele Giuseppe Espositof791bf72022-03-03 10:15:49 -0500522
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200523 if (!drv)
524 return -ENOMEDIUM;
525
Max Reitz9a71b9d2019-06-12 17:47:37 +0200526 backing_file_bs = bdrv_cow_bs(bs);
527
528 if (!backing_file_bs) {
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200529 return -ENOTSUP;
530 }
531
532 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
Max Reitz9a71b9d2019-06-12 17:47:37 +0200533 bdrv_op_is_blocked(backing_file_bs, BLOCK_OP_TYPE_COMMIT_TARGET, NULL))
534 {
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200535 return -EBUSY;
536 }
537
Vladimir Sementsov-Ogievskiy307261b2021-05-27 18:40:54 +0300538 ro = bdrv_is_read_only(backing_file_bs);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200539
540 if (ro) {
Max Reitz9a71b9d2019-06-12 17:47:37 +0200541 if (bdrv_reopen_set_read_only(backing_file_bs, false, NULL)) {
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200542 return -EACCES;
543 }
544 }
545
Kevin Wolfd861ab32019-04-25 14:25:10 +0200546 ctx = bdrv_get_aio_context(bs);
Max Reitz2d97fde2020-04-29 16:11:26 +0200547 /* WRITE_UNCHANGED is required for bdrv_make_empty() */
548 src = blk_new(ctx, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED,
549 BLK_PERM_ALL);
Kevin Wolfd861ab32019-04-25 14:25:10 +0200550 backing = blk_new(ctx, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
Kevin Wolfd7086422017-01-13 19:02:32 +0100551
Kevin Wolfd3f06752017-01-19 18:16:03 +0100552 ret = blk_insert_bs(src, bs, &local_err);
Kevin Wolfd7086422017-01-13 19:02:32 +0100553 if (ret < 0) {
Kevin Wolfd3f06752017-01-19 18:16:03 +0100554 error_report_err(local_err);
Kevin Wolfd7086422017-01-13 19:02:32 +0100555 goto ro_cleanup;
556 }
557
Kevin Wolfd3f06752017-01-19 18:16:03 +0100558 /* Insert commit_top block node above backing, so we can write to it */
Kevin Wolfd3f06752017-01-19 18:16:03 +0100559 commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, NULL, BDRV_O_RDWR,
560 &local_err);
561 if (commit_top_bs == NULL) {
562 error_report_err(local_err);
563 goto ro_cleanup;
564 }
565
Kevin Wolf12fa4af2017-02-17 20:42:32 +0100566 bdrv_set_backing_hd(commit_top_bs, backing_file_bs, &error_abort);
567 bdrv_set_backing_hd(bs, commit_top_bs, &error_abort);
Kevin Wolfd3f06752017-01-19 18:16:03 +0100568
569 ret = blk_insert_bs(backing, backing_file_bs, &local_err);
Kevin Wolfd7086422017-01-13 19:02:32 +0100570 if (ret < 0) {
Kevin Wolfd3f06752017-01-19 18:16:03 +0100571 error_report_err(local_err);
Kevin Wolfd7086422017-01-13 19:02:32 +0100572 goto ro_cleanup;
573 }
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200574
575 length = blk_getlength(src);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200576 if (length < 0) {
577 ret = length;
578 goto ro_cleanup;
579 }
580
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200581 backing_length = blk_getlength(backing);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200582 if (backing_length < 0) {
583 ret = backing_length;
584 goto ro_cleanup;
585 }
586
587 /* If our top snapshot is larger than the backing file image,
588 * grow the backing file image if possible. If not possible,
589 * we must return an error */
590 if (length > backing_length) {
Kevin Wolf8c6242b2020-04-24 14:54:41 +0200591 ret = blk_truncate(backing, length, false, PREALLOC_MODE_OFF, 0,
Max Reitzc80d8b02019-09-18 11:51:40 +0200592 &local_err);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200593 if (ret < 0) {
Max Reitzed3d2ec2017-03-28 22:51:27 +0200594 error_report_err(local_err);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200595 goto ro_cleanup;
596 }
597 }
598
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200599 /* blk_try_blockalign() for src will choose an alignment that works for
600 * backing as well, so no need to compare the alignment manually. */
Eric Blaked6a644b2017-07-07 07:44:57 -0500601 buf = blk_try_blockalign(src, COMMIT_BUF_SIZE);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200602 if (buf == NULL) {
603 ret = -ENOMEM;
604 goto ro_cleanup;
605 }
606
Eric Blaked6a644b2017-07-07 07:44:57 -0500607 for (offset = 0; offset < length; offset += n) {
608 ret = bdrv_is_allocated(bs, offset, COMMIT_BUF_SIZE, &n);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200609 if (ret < 0) {
610 goto ro_cleanup;
611 }
612 if (ret) {
Alberto Fariaa9262f52022-07-05 17:15:11 +0100613 ret = blk_pread(src, offset, n, buf, 0);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200614 if (ret < 0) {
615 goto ro_cleanup;
616 }
617
Alberto Fariaa9262f52022-07-05 17:15:11 +0100618 ret = blk_pwrite(backing, offset, n, buf, 0);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200619 if (ret < 0) {
620 goto ro_cleanup;
621 }
622 }
623 }
624
Max Reitz2d97fde2020-04-29 16:11:26 +0200625 ret = blk_make_empty(src, NULL);
626 /* Ignore -ENOTSUP */
627 if (ret < 0 && ret != -ENOTSUP) {
628 goto ro_cleanup;
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200629 }
630
Max Reitz2d97fde2020-04-29 16:11:26 +0200631 blk_flush(src);
632
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200633 /*
634 * Make sure all data we wrote to the backing device is actually
635 * stable on disk.
636 */
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200637 blk_flush(backing);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200638
639 ret = 0;
640ro_cleanup:
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200641 blk_unref(backing);
Max Reitz9a71b9d2019-06-12 17:47:37 +0200642 if (bdrv_cow_bs(bs) != backing_file_bs) {
Kevin Wolf12fa4af2017-02-17 20:42:32 +0100643 bdrv_set_backing_hd(bs, backing_file_bs, &error_abort);
Kevin Wolfd3f06752017-01-19 18:16:03 +0100644 }
645 bdrv_unref(commit_top_bs);
646 blk_unref(src);
Kevin Wolff8e2bd52016-05-30 16:29:47 +0200647
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200648 if (ro) {
649 /* ignoring error return here */
Max Reitz9a71b9d2019-06-12 17:47:37 +0200650 bdrv_reopen_set_read_only(backing_file_bs, true, NULL);
Kevin Wolf83fd6dd2016-05-30 15:53:15 +0200651 }
652
653 return ret;
654}