aboutsummaryrefslogtreecommitdiff
path: root/memory.c
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2019-08-20 22:13:25 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2019-08-21 16:31:58 +0200
commit23f1174aac4181f86bb7e13ca8bc2d4a0bdf1e5c (patch)
treea5116df79890fe7efda4192dff26bdb1dd5c79bc /memory.c
parent9c1aa1c235c770d84462d482460a96e957e95b9c (diff)
memory: Split zones when do coalesced_io_del()
It is a workaround of current KVM's KVM_UNREGISTER_COALESCED_MMIO interface. The kernel interface only allows to unregister an mmio device with exactly the zone size when registered, or any smaller zone that is included in the device mmio zone. It does not support the userspace to specify a very large zone to remove all the small mmio devices within the zone covered. Logically speaking it would be nicer to fix this from KVM side, though in all cases we still need to coop with old kernels so let's do this. Fixes: 3ac7d43a6fbb5d4a3 Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20190820141328.10009-2-peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'memory.c')
-rw-r--r--memory.c49
1 files changed, 35 insertions, 14 deletions
diff --git a/memory.c b/memory.c
index 9a1193a144..7124274f49 100644
--- a/memory.c
+++ b/memory.c
@@ -855,8 +855,39 @@ static void address_space_update_ioeventfds(AddressSpace *as)
flatview_unref(view);
}
+/*
+ * Notify the memory listeners about the coalesced IO change events of
+ * range `cmr'. Only the part that has intersection of the specified
+ * FlatRange will be sent.
+ */
+static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
+ CoalescedMemoryRange *cmr, bool add)
+{
+ AddrRange tmp;
+
+ tmp = addrrange_shift(cmr->addr,
+ int128_sub(fr->addr.start,
+ int128_make64(fr->offset_in_region)));
+ if (!addrrange_intersects(tmp, fr->addr)) {
+ return;
+ }
+ tmp = addrrange_intersection(tmp, fr->addr);
+
+ if (add) {
+ MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
+ } else {
+ MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
+ }
+}
+
static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
{
+ CoalescedMemoryRange *cmr;
+
if (!fr->has_coalesced_range) {
return;
}
@@ -865,16 +896,15 @@ static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
return;
}
- MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
- int128_get64(fr->addr.start),
- int128_get64(fr->addr.size));
+ QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
+ flat_range_coalesced_io_notify(fr, as, cmr, false);
+ }
}
static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
{
MemoryRegion *mr = fr->mr;
CoalescedMemoryRange *cmr;
- AddrRange tmp;
if (QTAILQ_EMPTY(&mr->coalesced)) {
return;
@@ -885,16 +915,7 @@ static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
}
QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
- tmp = addrrange_shift(cmr->addr,
- int128_sub(fr->addr.start,
- int128_make64(fr->offset_in_region)));
- if (!addrrange_intersects(tmp, fr->addr)) {
- continue;
- }
- tmp = addrrange_intersection(tmp, fr->addr);
- MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
- int128_get64(tmp.start),
- int128_get64(tmp.size));
+ flat_range_coalesced_io_notify(fr, as, cmr, true);
}
}