aboutsummaryrefslogtreecommitdiff
path: root/block/backup.c
diff options
context:
space:
mode:
authorJohn Snow <jsnow@redhat.com>2019-07-29 16:35:55 -0400
committerJohn Snow <jsnow@redhat.com>2019-08-16 16:28:03 -0400
commit0fff1f13718a80c12a26dfaee17bdeb45ce51459 (patch)
treece6e35567e9ee56eccde7b75419bd968a8b6394f /block/backup.c
parent352092d3828adb67b28d30588e3c4635234e2113 (diff)
block/backup: improve sync=bitmap work estimates
When making backups based on bitmaps, the work estimate can be more accurate. Update iotests to reflect the new strategy. TOP work estimates are broken, but do not get worse with this commit. That issue is addressed in the following commits instead. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: 20190716000117.25219-7-jsnow@redhat.com Signed-off-by: John Snow <jsnow@redhat.com>
Diffstat (limited to 'block/backup.c')
-rw-r--r--block/backup.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/block/backup.c b/block/backup.c
index f704c83a98..b04ab2d5f0 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -459,9 +459,8 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
NULL, true);
assert(ret);
- /* TODO job_progress_set_remaining() would make more sense */
- job_progress_update(&job->common.job,
- job->len - bdrv_get_dirty_count(job->copy_bitmap));
+ job_progress_set_remaining(&job->common.job,
+ bdrv_get_dirty_count(job->copy_bitmap));
}
static int coroutine_fn backup_run(Job *job, Error **errp)
@@ -473,12 +472,11 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
QLIST_INIT(&s->inflight_reqs);
qemu_co_rwlock_init(&s->flush_rwlock);
- job_progress_set_remaining(job, s->len);
-
if (s->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
backup_incremental_init_copy_bitmap(s);
} else {
bdrv_set_dirty_bitmap(s->copy_bitmap, 0, s->len);
+ job_progress_set_remaining(job, s->len);
}
s->before_write.notify = backup_before_write_notify;