aboutsummaryrefslogtreecommitdiff
path: root/arch_init.c
diff options
context:
space:
mode:
authorPierre Riteau <Pierre.Riteau@irisa.fr>2010-05-12 15:12:44 +0200
committerAnthony Liguori <aliguori@us.ibm.com>2010-06-01 12:53:09 -0500
commit3fc250b4be58cac2e392d8d01949f9e8471c36f9 (patch)
treeb5d4474edb117b2cf5a0834869c5cb292fcbdef5 /arch_init.c
parenta132a679c33ae2f8f6935f92c2a8043015cd917c (diff)
migration: Fix calculation of bytes_transferred
When a page with all identical bytes is transferred, it is counted as a full page (TARGET_PAGE_SIZE) although only one byte is actually sent. Fix this by changing ram_save_block() to return the number of bytes sent instead of a boolean value. This makes bandwidth estimation, and consequently downtime estimation, more precise. Signed-off-by: Pierre Riteau <Pierre.Riteau@irisa.fr> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'arch_init.c')
-rw-r--r--arch_init.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/arch_init.c b/arch_init.c
index cfc03ead53..8e849a885e 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -108,7 +108,7 @@ static int ram_save_block(QEMUFile *f)
static ram_addr_t current_addr = 0;
ram_addr_t saved_addr = current_addr;
ram_addr_t addr = 0;
- int found = 0;
+ int bytes_sent = 0;
while (addr < last_ram_offset) {
if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
@@ -123,19 +123,20 @@ static int ram_save_block(QEMUFile *f)
if (is_dup_page(p, *p)) {
qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS);
qemu_put_byte(f, *p);
+ bytes_sent = 1;
} else {
qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE);
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+ bytes_sent = TARGET_PAGE_SIZE;
}
- found = 1;
break;
}
addr += TARGET_PAGE_SIZE;
current_addr = (saved_addr + addr) % last_ram_offset;
}
- return found;
+ return bytes_sent;
}
static uint64_t bytes_transferred;
@@ -206,11 +207,11 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
bwidth = qemu_get_clock_ns(rt_clock);
while (!qemu_file_rate_limit(f)) {
- int ret;
+ int bytes_sent;
- ret = ram_save_block(f);
- bytes_transferred += ret * TARGET_PAGE_SIZE;
- if (ret == 0) { /* no more blocks */
+ bytes_sent = ram_save_block(f);
+ bytes_transferred += bytes_sent;
+ if (bytes_sent == 0) { /* no more blocks */
break;
}
}
@@ -226,9 +227,11 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
/* try transferring iterative blocks of memory */
if (stage == 3) {
+ int bytes_sent;
+
/* flush all remaining blocks regardless of rate limiting */
- while (ram_save_block(f) != 0) {
- bytes_transferred += TARGET_PAGE_SIZE;
+ while ((bytes_sent = ram_save_block(f)) != 0) {
+ bytes_transferred += bytes_sent;
}
cpu_physical_memory_set_dirty_tracking(0);
}