[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()



Rather than dirtying a page when establishing a (permanent) mapping,
dirty it when the page gets unmapped, or - if still mapped - on the
final iteration of a save operation. (Transient mappings continue to
get dirtied upon getting mapped, to avoid the overhead of tracking.)

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1556,6 +1556,8 @@ int hvm_domain_initialise(struct domain 
     INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
     spin_lock_init(&d->arch.hvm_domain.uc_lock);
+    spin_lock_init(&d->arch.hvm_domain.write_map.lock);
+    INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
 
     hvm_init_cacheattr_region_list(d);
 
@@ -3667,6 +3669,11 @@ int hvm_virtual_to_linear_addr(
     return 1;
 }
 
+struct hvm_write_map {
+    struct list_head list;
+    struct page_info *page;
+};
+
 /* On non-NULL return, we leave this function holding an additional 
  * ref on the underlying mfn, if any */
 static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
@@ -3694,15 +3701,30 @@ static void *_hvm_map_guest_frame(unsign
 
     if ( writable )
     {
-        if ( !p2m_is_discard_write(p2mt) )
-            paging_mark_dirty(d, page_to_mfn(page));
-        else
+        if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
+        else if ( !permanent )
+            paging_mark_dirty(d, page_to_mfn(page));
     }
 
     if ( !permanent )
         return __map_domain_page(page);
 
+    if ( writable && *writable )
+    {
+        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
+
+        if ( !track )
+        {
+            put_page(page);
+            return NULL;
+        }
+        track->page = page;
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
     map = __map_domain_page_global(page);
     if ( !map )
         put_page(page);
@@ -3725,18 +3747,45 @@ void *hvm_map_guest_frame_ro(unsigned lo
 void hvm_unmap_guest_frame(void *p, bool_t permanent)
 {
     unsigned long mfn;
+    struct page_info *page;
 
     if ( !p )
         return;
 
     mfn = domain_page_map_to_mfn(p);
+    page = mfn_to_page(mfn);
 
     if ( !permanent )
         unmap_domain_page(p);
     else
+    {
+        struct domain *d = page_get_owner(page);
+        struct hvm_write_map *track;
+
         unmap_domain_page_global(p);
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+            if ( track->page == page )
+            {
+                paging_mark_dirty(d, mfn);
+                list_del(&track->list);
+                xfree(track);
+                break;
+            }
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
+    put_page(page);
+}
+
+void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
+{
+    struct hvm_write_map *track;
 
-    put_page(mfn_to_page(mfn));
+    spin_lock(&d->arch.hvm_domain.write_map.lock);
+    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+        paging_mark_dirty(d, page_to_mfn(track->page));
+    spin_unlock(&d->arch.hvm_domain.write_map.lock);
 }
 
 static void *hvm_map_entry(unsigned long va, bool_t *writable)
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -29,6 +29,7 @@
 #include <asm/hvm/nestedhvm.h>
 #include <xen/numa.h>
 #include <xsm/xsm.h>
+#include <public/sched.h> /* SHUTDOWN_suspend */
 
 #include "mm-locks.h"
 
@@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
 
     if ( !resuming )
     {
+        /*
+         * Mark dirty all currently write-mapped pages on the final iteration
+         * of a HVM save operation.
+         */
+        if ( has_hvm_container_domain(d) && d->is_shut_down &&
+             d->shutdown_code == SHUTDOWN_suspend )
+            hvm_mapped_guest_frames_mark_dirty(d);
+
         domain_pause(d);
 
         /*
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -145,6 +145,12 @@ struct hvm_domain {
 
     unsigned long *io_bitmap;
 
+    /* List of permanently write-mapped pages. */
+    struct {
+        spinlock_t lock;
+        struct list_head list;
+    } write_map;
+
     union {
         struct vmx_domain vmx;
         struct svm_domain svm;
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -444,6 +444,7 @@ void *hvm_map_guest_frame_rw(unsigned lo
                              bool_t *writable);
 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
 void hvm_unmap_guest_frame(void *p, bool_t permanent);
+void hvm_mapped_guest_frames_mark_dirty(struct domain *);
 
 static inline void hvm_set_info_guest(struct vcpu *v)
 {


Attachment: x86-HVM-map-gf-dirtying.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.