aboutsummaryrefslogtreecommitdiff
path: root/arch_init.c
diff options
context:
space:
mode:
authorPeter Lieven <pl@kamp.de>2013-03-26 10:58:37 +0100
committerJuan Quintela <quintela@redhat.com>2013-03-26 13:32:33 +0100
commitf1c72795af573b24a7da5eb52375c9aba8a37972 (patch)
tree9475bdb59345550bb248fad75fa25e598811fe51 /arch_init.c
parent78d07ae7ac74bcc7f79aeefbaff17fb142f44b4d (diff)
migration: do not sent zero pages in bulk stage
during bulk stage of ram migration if a page is a zero page do not send it at all. the memory at the destination reads as zero anyway. even if there is an madvise with QEMU_MADV_DONTNEED at the target upon receipt of a zero page I have observed that the target starts swapping if the memory is overcommitted. it seems that the pages are dropped asynchronously. this patch also updates QMP to return the number of skipped pages in MigrationStats. Signed-off-by: Peter Lieven <pl@kamp.de> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'arch_init.c')
-rw-r--r--arch_init.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/arch_init.c b/arch_init.c
index 1291bd2b3a..3a0d02eafa 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -183,6 +183,7 @@ int64_t xbzrle_cache_resize(int64_t new_size)
/* accounting for migration statistics */
typedef struct AccountingInfo {
uint64_t dup_pages;
+ uint64_t skipped_pages;
uint64_t norm_pages;
uint64_t iterations;
uint64_t xbzrle_bytes;
@@ -208,6 +209,16 @@ uint64_t dup_mig_pages_transferred(void)
return acct_info.dup_pages;
}
+uint64_t skipped_mig_bytes_transferred(void)
+{
+ return acct_info.skipped_pages * TARGET_PAGE_SIZE;
+}
+
+uint64_t skipped_mig_pages_transferred(void)
+{
+ return acct_info.skipped_pages;
+}
+
uint64_t norm_mig_bytes_transferred(void)
{
return acct_info.norm_pages * TARGET_PAGE_SIZE;
@@ -440,10 +451,15 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
bytes_sent = -1;
if (is_zero_page(p)) {
acct_info.dup_pages++;
- bytes_sent = save_block_hdr(f, block, offset, cont,
- RAM_SAVE_FLAG_COMPRESS);
- qemu_put_byte(f, 0);
- bytes_sent++;
+ if (!ram_bulk_stage) {
+ bytes_sent = save_block_hdr(f, block, offset, cont,
+ RAM_SAVE_FLAG_COMPRESS);
+ qemu_put_byte(f, 0);
+ bytes_sent++;
+ } else {
+ acct_info.skipped_pages++;
+ bytes_sent = 0;
+ }
} else if (migrate_use_xbzrle()) {
current_addr = block->offset + offset;
bytes_sent = save_xbzrle_page(f, p, current_addr, block,