[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V2] qemu-xen-traditionnal, Fix dirty logging during migration.



This moves the xen_modified_memory call from cpu_physical_memory_map to
cpu_physical_memory_unmap because the memory could be migrated before the
device model have written to it.

But because we need to know the guest address and to avoid rewriting a new
function, the call is moved to qemu_invalidate_entry. So this later has to new
parameters, the length of the mapping and if it was a write.

Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
---
Change since first version:
  - No more extra function to get the guest address from a pointer.
  > The xc_hvm_modied_memory is moved to qemu_invalidate_entry

 hw/xen_machine_fv.c |   16 ++++++++++++++--
 i386-dm/exec-dm.c   |    6 +-----
 qemu-xen.h          |    2 +-
 3 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/hw/xen_machine_fv.c b/hw/xen_machine_fv.c
index fdad42a..2bb44e0 100644
--- a/hw/xen_machine_fv.c
+++ b/hw/xen_machine_fv.c
@@ -174,7 +174,8 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, 
uint8_t lock)
     return last_address_vaddr + address_offset;
 }
 
-void qemu_invalidate_entry(uint8_t *buffer)
+void qemu_invalidate_entry(uint8_t *buffer, target_phys_addr_t access_len,
+                           int was_written)
 {
     struct map_cache *entry = NULL, *pentry = NULL;
     struct map_cache_rev *reventry;
@@ -210,6 +211,17 @@ void qemu_invalidate_entry(uint8_t *buffer)
         fprintf(logfile, "Trying to unmap address %p that is not in the 
mapcache!\n", buffer);
         return;
     }
+    if (xen_logdirty_enable && was_written) {
+        unsigned long addr = (paddr_index << MCACHE_BUCKET_SHIFT)
+            + ((unsigned long)buffer) - ((unsigned long)entry->vaddr_base);
+        if (access_len == 0)
+            access_len = TARGET_PAGE_SIZE;
+        xc_hvm_modified_memory(xc_handle, domid,
+            addr >> TARGET_PAGE_BITS,
+            ((addr + access_len + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
+            - (addr >> TARGET_PAGE_BITS));
+    }
+
     entry->lock--;
     if (entry->lock > 0 || pentry == NULL)
         return;
@@ -265,7 +277,7 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, 
uint8_t lock)
 
 void qemu_invalidate_map_cache(void) {};
 
-void qemu_invalidate_entry(uint8_t *buffer) {};
+void qemu_invalidate_entry(uint8_t *buffer, target_phys_addr_t len, int w) {};
 
 #endif /* defined(MAPCACHE) */
 
diff --git a/i386-dm/exec-dm.c b/i386-dm/exec-dm.c
index 96274d9..493146b 100644
--- a/i386-dm/exec-dm.c
+++ b/i386-dm/exec-dm.c
@@ -820,10 +820,6 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
     if ((*plen) > l)
         *plen = l;
 #endif
-    if (xen_logdirty_enable)
-        xc_hvm_modified_memory(xc_handle, domid, addr >> TARGET_PAGE_BITS,
-                ((addr + l + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
-                    - (addr >> TARGET_PAGE_BITS));
 
     return qemu_map_cache(addr, 1);
 }
@@ -835,6 +831,6 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                                int is_write, target_phys_addr_t access_len)
 {
-    qemu_invalidate_entry(buffer);
+    qemu_invalidate_entry(buffer, access_len, is_write);
     cpu_notify_map_clients();
 }
diff --git a/qemu-xen.h b/qemu-xen.h
index d50c89f..54159bf 100644
--- a/qemu-xen.h
+++ b/qemu-xen.h
@@ -28,7 +28,7 @@ extern int vga_ram_size;
 #endif
 
 uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, uint8_t lock);
-void     qemu_invalidate_entry(uint8_t *buffer);
+void     qemu_invalidate_entry(uint8_t *buffer, target_phys_addr_t len, int w);
 void     qemu_invalidate_map_cache(void);
 
 #define mapcache_lock()   ((void)0)
-- 
Anthony PERARD

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.