aboutsummaryrefslogtreecommitdiff
path: root/xen-mapcache.c
diff options
context:
space:
mode:
authorAnthony PERARD <anthony.perard@citrix.com>2012-01-18 12:21:38 +0000
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2012-03-19 18:21:12 +0000
commitcd1ba7de230b3a85fb4dba53bb681b7ea626b4eb (patch)
tree34322fda4fe9cf32e9e6db52485b2c69ee2e99b2 /xen-mapcache.c
parentd1814e08c0409d9f352f9ea7fab377bcee1286b8 (diff)
xen mapcache: check if memory region has moved.
This patch changes the xen_map_cache behavior. Before trying to map a guest addr, mapcache will look into the list of range of address that have been moved (physmap/set_memory). There is currently one memory space like this, the vram, "moved" from were it's allocated to were the guest will look into. This help to have a succefull migration. Signed-off-by: Anthony PERARD <anthony.perard@citrix.com> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'xen-mapcache.c')
-rw-r--r--xen-mapcache.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 585b559c73..a456479363 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -78,6 +78,9 @@ typedef struct MapCache {
uint8_t *last_address_vaddr;
unsigned long max_mcache_size;
unsigned int mcache_bucket_shift;
+
+ phys_offset_to_gaddr_t phys_offset_to_gaddr;
+ void *opaque;
} MapCache;
static MapCache *mapcache;
@@ -91,13 +94,16 @@ static inline int test_bits(int nr, int size, const unsigned long *addr)
return 0;
}
-void xen_map_cache_init(void)
+void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
{
unsigned long size;
struct rlimit rlimit_as;
mapcache = g_malloc0(sizeof (MapCache));
+ mapcache->phys_offset_to_gaddr = f;
+ mapcache->opaque = opaque;
+
QTAILQ_INIT(&mapcache->locked_entries);
mapcache->last_address_index = -1;
@@ -193,9 +199,14 @@ uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
uint8_t lock)
{
MapCacheEntry *entry, *pentry = NULL;
- target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
- target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
+ target_phys_addr_t address_index;
+ target_phys_addr_t address_offset;
target_phys_addr_t __size = size;
+ bool translated = false;
+
+tryagain:
+ address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
+ address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
trace_xen_map_cache(phys_addr);
@@ -237,6 +248,11 @@ uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
mapcache->last_address_index = -1;
+ if (!translated && mapcache->phys_offset_to_gaddr) {
+ phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
+ translated = true;
+ goto tryagain;
+ }
trace_xen_map_cache_return(NULL);
return NULL;
}