[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/4] xen/arm: clean and invalidate all guest caches by VMID after domain build.



Guests are initially started with caches disabled and so we need to make sure
they see consistent data in RAM (requiring a cache clean) but also that they
do not have old stale data suddenly appear in the caches when they enable
their caches (requiring the invalidate).

We need to clean all caches in order to catch both pages dirtied by the domain
builder and those which have been scrubbed but not yet flushed. Separating the
two and flushing scrubbed pages at scrub time and only builder-dirtied pages
here would require tracking the dirtiness state in the guest's p2m, perhaps
via a new p2m type.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: jbeulich@xxxxxxxx
Cc: keir@xxxxxxx
Cc: ian.jackson@xxxxxxxxxxxxx
---
 tools/libxc/xc_domain.c     |    8 ++++++
 tools/libxc/xenctrl.h       |    1 +
 tools/libxl/libxl_create.c  |    3 ++
 xen/arch/arm/domctl.c       |   12 ++++++++
 xen/arch/arm/p2m.c          |   64 +++++++++++++++++++++++++++++++++++++------
 xen/include/public/domctl.h |    9 ++++++
 6 files changed, 89 insertions(+), 8 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index c2fdd74..e6fa4ff 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -48,6 +48,14 @@ int xc_domain_create(xc_interface *xch,
     return 0;
 }
 
+int xc_domain_cacheflush(xc_interface *xch, uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_cacheflush;
+    domctl.domain = (domid_t)domid;
+    domctl.u.cacheflush.start_mfn = 0;
+    return do_domctl(xch, &domctl);
+}
 
 int xc_domain_pause(xc_interface *xch,
                     uint32_t domid)
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 13f816b..43dae5c 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -453,6 +453,7 @@ int xc_domain_create(xc_interface *xch,
                      xen_domain_handle_t handle,
                      uint32_t flags,
                      uint32_t *pdomid);
+int xc_domain_cacheflush(xc_interface *xch, uint32_t domid);
 
 
 /* Functions to produce a dump of a given domain
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index a604cd8..55c86f0 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -1364,7 +1364,10 @@ static void domain_create_cb(libxl__egc *egc,
     STATE_AO_GC(cdcs->dcs.ao);
 
     if (!rc)
+    {
         *cdcs->domid_out = domid;
+        xc_domain_cacheflush(CTX->xch, domid);
+    }
 
     libxl__ao_complete(egc, ao, rc);
 }
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 546e86b..9e3b37d 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -11,12 +11,24 @@
 #include <xen/sched.h>
 #include <xen/hypercall.h>
 #include <public/domctl.h>
+#include <xen/guest_access.h>
+
+extern long p2m_cache_flush(struct domain *d, xen_pfn_t *start_mfn);
 
 long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
                     XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
 {
     switch ( domctl->cmd )
     {
+    case XEN_DOMCTL_cacheflush:
+    {
+        long rc = p2m_cache_flush(d, &domctl->u.cacheflush.start_mfn);
+        if ( __copy_to_guest(u_domctl, domctl, 1) )
+            rc = -EFAULT;
+
+        return rc;
+    }
+
     default:
         return subarch_do_domctl(domctl, d, u_domctl);
     }
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index a61edeb..18bd500 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -228,15 +228,26 @@ enum p2m_operation {
     ALLOCATE,
     REMOVE,
     RELINQUISH,
+    CACHEFLUSH,
 };
 
+static void do_one_cacheflush(paddr_t mfn)
+{
+    void *v = map_domain_page(mfn);
+
+    flush_xen_dcache_va_range(v, PAGE_SIZE);
+
+    unmap_domain_page(v);
+}
+
 static int apply_p2m_changes(struct domain *d,
                      enum p2m_operation op,
                      paddr_t start_gpaddr,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
                      int mattr,
-                     p2m_type_t t)
+                     p2m_type_t t,
+                     xen_pfn_t *last_mfn)
 {
     int rc;
     struct p2m_domain *p2m = &d->arch.p2m;
@@ -381,18 +392,42 @@ static int apply_p2m_changes(struct domain *d,
                     count++;
                 }
                 break;
+            case CACHEFLUSH:
+                {
+                    if ( !pte.p2m.valid || !p2m_is_ram(pte.p2m.type) )
+                    {
+                        count++;
+                        break;
+                    }
+
+                    count += 0x10;
+
+                    do_one_cacheflush(pte.p2m.base);
+                }
+                break;
         }
 
+        if ( last_mfn )
+            *last_mfn = addr >> PAGE_SHIFT;
+
         /* Preempt every 2MiB (mapped) or 32 MiB (unmapped) - arbitrary */
-        if ( op == RELINQUISH && count >= 0x2000 )
+        switch ( op )
         {
-            if ( hypercall_preempt_check() )
+        case RELINQUISH:
+        case CACHEFLUSH:
+            if (count >= 0x2000 && hypercall_preempt_check() )
             {
                 p2m->lowest_mapped_gfn = addr >> PAGE_SHIFT;
                 rc = -EAGAIN;
                 goto out;
             }
             count = 0;
+            break;
+        case INSERT:
+        case ALLOCATE:
+        case REMOVE:
+            /* No preemption */
+            break;
         }
 
         /* Got the next page */
@@ -438,7 +473,7 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t end)
 {
     return apply_p2m_changes(d, ALLOCATE, start, end,
-                             0, MATTR_MEM, p2m_ram_rw);
+                             0, MATTR_MEM, p2m_ram_rw, NULL);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -447,7 +482,7 @@ int map_mmio_regions(struct domain *d,
                      paddr_t maddr)
 {
     return apply_p2m_changes(d, INSERT, start_gaddr, end_gaddr,
-                             maddr, MATTR_DEV, p2m_mmio_direct);
+                             maddr, MATTR_DEV, p2m_mmio_direct, NULL);
 }
 
 int guest_physmap_add_entry(struct domain *d,
@@ -459,7 +494,7 @@ int guest_physmap_add_entry(struct domain *d,
     return apply_p2m_changes(d, INSERT,
                              pfn_to_paddr(gpfn),
                              pfn_to_paddr(gpfn + (1 << page_order)),
-                             pfn_to_paddr(mfn), MATTR_MEM, t);
+                             pfn_to_paddr(mfn), MATTR_MEM, t, NULL);
 }
 
 void guest_physmap_remove_page(struct domain *d,
@@ -469,7 +504,7 @@ void guest_physmap_remove_page(struct domain *d,
     apply_p2m_changes(d, REMOVE,
                       pfn_to_paddr(gpfn),
                       pfn_to_paddr(gpfn + (1<<page_order)),
-                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid, NULL);
 }
 
 int p2m_alloc_table(struct domain *d)
@@ -621,7 +656,20 @@ int relinquish_p2m_mapping(struct domain *d)
                               pfn_to_paddr(p2m->lowest_mapped_gfn),
                               pfn_to_paddr(p2m->max_mapped_gfn),
                               pfn_to_paddr(INVALID_MFN),
-                              MATTR_MEM, p2m_invalid);
+                              MATTR_MEM, p2m_invalid, NULL);
+}
+
+long p2m_cache_flush(struct domain *d, xen_pfn_t *start_mfn)
+{
+    struct p2m_domain *p2m = &d->arch.p2m;
+
+    *start_mfn = MAX(*start_mfn, p2m->next_gfn_to_relinquish);
+
+    return apply_p2m_changes(d, CACHEFLUSH,
+                             pfn_to_paddr(*start_mfn),
+                             pfn_to_paddr(p2m->max_mapped_gfn),
+                             pfn_to_paddr(INVALID_MFN),
+                             MATTR_MEM, p2m_invalid, start_mfn);
 }
 
 unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 91f01fa..d7b22c3 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -885,6 +885,13 @@ struct xen_domctl_set_max_evtchn {
 typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
 
+struct xen_domctl_cacheflush {
+    /* Updated for progress */
+    xen_pfn_t start_mfn;
+};
+typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -954,6 +961,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_setnodeaffinity               68
 #define XEN_DOMCTL_getnodeaffinity               69
 #define XEN_DOMCTL_set_max_evtchn                70
+#define XEN_DOMCTL_cacheflush                    71
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -1012,6 +1020,7 @@ struct xen_domctl {
         struct xen_domctl_set_max_evtchn    set_max_evtchn;
         struct xen_domctl_gdbsx_memio       gdbsx_guest_memio;
         struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
+        struct xen_domctl_cacheflush        cacheflush;
         struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
         struct xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
         uint8_t                             pad[128];
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.