aboutsummaryrefslogtreecommitdiff
path: root/arch_init.c
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2010-06-25 11:08:38 -0600
committerAnthony Liguori <aliguori@us.ibm.com>2010-07-06 10:36:27 -0500
commitd17b5288d91c935cc8795fa0620721da0a3865e1 (patch)
tree23d324b4fd378a8fd2e1f285758a76a11ce19459 /arch_init.c
parentf292787d9addffd5f0a2df9516c158bfb5792b61 (diff)
Remove uses of ram.last_offset (aka last_ram_offset)
We currently need this either to allocate the next ram_addr_t for a new block, or for total memory to be migrated. Both of which we can calculate without need of this to keep us in a contiguous address space. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'arch_init.c')
-rw-r--r--arch_init.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch_init.c b/arch_init.c
index eb5b67cd3e..109dcefe32 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -108,9 +108,10 @@ static int ram_save_block(QEMUFile *f)
static ram_addr_t current_addr = 0;
ram_addr_t saved_addr = current_addr;
ram_addr_t addr = 0;
+ uint64_t total_ram = ram_bytes_total();
int bytes_sent = 0;
- while (addr < ram_list.last_offset) {
+ while (addr < total_ram) {
if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
uint8_t *p;
@@ -133,7 +134,7 @@ static int ram_save_block(QEMUFile *f)
break;
}
addr += TARGET_PAGE_SIZE;
- current_addr = (saved_addr + addr) % ram_list.last_offset;
+ current_addr = (saved_addr + addr) % total_ram;
}
return bytes_sent;
@@ -145,8 +146,9 @@ static ram_addr_t ram_save_remaining(void)
{
ram_addr_t addr;
ram_addr_t count = 0;
+ uint64_t total_ram = ram_bytes_total();
- for (addr = 0; addr < ram_list.last_offset; addr += TARGET_PAGE_SIZE) {
+ for (addr = 0; addr < total_ram; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
count++;
}
@@ -167,7 +169,13 @@ uint64_t ram_bytes_transferred(void)
uint64_t ram_bytes_total(void)
{
- return ram_list.last_offset;
+ RAMBlock *block;
+ uint64_t total = 0;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next)
+ total += block->length;
+
+ return total;
}
int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
@@ -188,10 +196,11 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
}
if (stage == 1) {
+ uint64_t total_ram = ram_bytes_total();
bytes_transferred = 0;
/* Make sure all dirty bits are set */
- for (addr = 0; addr < ram_list.last_offset; addr += TARGET_PAGE_SIZE) {
+ for (addr = 0; addr < total_ram; addr += TARGET_PAGE_SIZE) {
if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
cpu_physical_memory_set_dirty(addr);
}
@@ -200,7 +209,7 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
/* Enable dirty memory tracking */
cpu_physical_memory_set_dirty_tracking(1);
- qemu_put_be64(f, ram_list.last_offset | RAM_SAVE_FLAG_MEM_SIZE);
+ qemu_put_be64(f, total_ram | RAM_SAVE_FLAG_MEM_SIZE);
}
bytes_transferred_last = bytes_transferred;
@@ -259,7 +268,7 @@ int ram_load(QEMUFile *f, void *opaque, int version_id)
addr &= TARGET_PAGE_MASK;
if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
- if (addr != ram_list.last_offset) {
+ if (addr != ram_bytes_total()) {
return -EINVAL;
}
}