[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 3/7] xen/arm: Enable the compilation of mem_access and mem_event on ARM.



This patch sets up the infrastructure to support mem_access and mem_event
 on ARM and turns on compilation. We define the required XSM functions,
handling of domctl copyback, and the required p2m types and stub-functions
in this patch.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
 xen/arch/arm/domctl.c        |  36 ++++++++++++++--
 xen/arch/arm/mm.c            |  18 ++++++--
 xen/arch/arm/p2m.c           |   5 +++
 xen/common/mem_access.c      |   6 +--
 xen/common/mem_event.c       |  15 +++++--
 xen/include/asm-arm/p2m.h    | 100 ++++++++++++++++++++++++++++++++++---------
 xen/include/xen/mem_access.h |  19 --------
 xen/include/xen/mem_event.h  |  53 +++--------------------
 xen/include/xen/sched.h      |   1 -
 xen/include/xsm/dummy.h      |  24 +++++------
 xen/include/xsm/xsm.h        |  25 +++++------
 xen/xsm/dummy.c              |   4 +-
 12 files changed, 178 insertions(+), 128 deletions(-)

diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 45974e7..bb0b8d3 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -11,10 +11,17 @@
 #include <xen/sched.h>
 #include <xen/hypercall.h>
 #include <public/domctl.h>
+#include <asm/guest_access.h>
+#include <xen/mem_event.h>
+#include <public/mem_event.h>
 
 long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
                     XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
 {
+
+    long ret;
+    bool_t copyback = 0;
+
     switch ( domctl->cmd )
     {
     case XEN_DOMCTL_cacheflush:
@@ -23,17 +30,38 @@ long arch_do_domctl(struct xen_domctl *domctl, struct 
domain *d,
         unsigned long e = s + domctl->u.cacheflush.nr_pfns;
 
         if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
-            return -EINVAL;
+        {
+            ret = -EINVAL;
+            break;
+        }
 
         if ( e < s )
-            return -EINVAL;
+        {
+            ret = -EINVAL;
+            break;
+        }
 
-        return p2m_cache_flush(d, s, e);
+        ret = p2m_cache_flush(d, s, e);
     }
+    break;
+
+    case XEN_DOMCTL_mem_event_op:
+    {
+        ret = mem_event_domctl(d, &domctl->u.mem_event_op,
+                              guest_handle_cast(u_domctl, void));
+        copyback = 1;
+    }
+    break;
 
     default:
-        return subarch_do_domctl(domctl, d, u_domctl);
+        ret = subarch_do_domctl(domctl, d, u_domctl);
+        break;
     }
+
+    if ( copyback && __copy_to_guest(u_domctl, domctl, 1) )
+        ret = -EFAULT;
+
+    return ret;
 }
 
 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 0a243b0..cd04dec 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -35,6 +35,9 @@
 #include <asm/current.h>
 #include <asm/flushtlb.h>
 #include <public/memory.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
+#include <xen/hypercall.h>
 #include <xen/sched.h>
 #include <xen/vmap.h>
 #include <xsm/xsm.h>
@@ -1111,18 +1114,27 @@ int xenmem_add_to_physmap_one(
 
 long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
-    switch ( op )
+
+    long rc;
+
+    switch ( op & MEMOP_CMD_MASK )
     {
     /* XXX: memsharing not working yet */
     case XENMEM_get_sharing_shared_pages:
     case XENMEM_get_sharing_freed_pages:
         return 0;
+    case XENMEM_access_op:
+    {
+        rc = mem_access_memop(op, guest_handle_cast(arg, xen_mem_access_op_t));
+        break;
+    }
 
     default:
-        return -ENOSYS;
+        rc = -ENOSYS;
+        break;
     }
 
-    return 0;
+    return rc;
 }
 
 struct domain *page_get_owner_and_reference(struct page_info *page)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 143199b..0ca0d2f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -10,6 +10,9 @@
 #include <asm/event.h>
 #include <asm/hardirq.h>
 #include <asm/page.h>
+#include <xen/mem_event.h>
+#include <public/mem_event.h>
+#include <xen/mem_access.h>
 
 /* First level P2M is 2 consecutive pages */
 #define P2M_FIRST_ORDER 1
@@ -999,6 +1002,8 @@ int p2m_init(struct domain *d)
     p2m->max_mapped_gfn = 0;
     p2m->lowest_mapped_gfn = ULONG_MAX;
 
+    p2m->default_access = p2m_access_rwx;
+
 err:
     spin_unlock(&p2m->lock);
 
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 84acdf9..6bb9cf4 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -29,8 +29,6 @@
 #include <xen/mem_event.h>
 #include <xsm/xsm.h>
 
-#ifdef CONFIG_X86
-
 int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
 {
@@ -45,9 +43,11 @@ int mem_access_memop(unsigned long cmd,
     if ( rc )
         return rc;
 
+#ifdef CONFIG_X86
     rc = -EINVAL;
     if ( !is_hvm_domain(d) )
         goto out;
+#endif
 
     rc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
     if ( rc )
@@ -125,8 +125,6 @@ int mem_access_send_req(struct domain *d, 
mem_event_request_t *req)
     return 0;
 }
 
-#endif /* CONFIG_X86 */
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
index a94ddf6..2a91928 100644
--- a/xen/common/mem_event.c
+++ b/xen/common/mem_event.c
@@ -20,16 +20,19 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-#ifdef CONFIG_X86
-
+#include <xen/sched.h>
 #include <asm/domain.h>
 #include <xen/event.h>
 #include <xen/wait.h>
 #include <asm/p2m.h>
 #include <xen/mem_event.h>
 #include <xen/mem_access.h>
+
+#ifdef CONFIG_X86
 #include <asm/mem_paging.h>
 #include <asm/mem_sharing.h>
+#endif
+
 #include <xsm/xsm.h>
 
 /* for public/io/ring.h macros */
@@ -427,6 +430,7 @@ static void mem_access_notification(struct vcpu *v, 
unsigned int port)
         p2m_mem_access_resume(v->domain);
 }
 
+#ifdef CONFIG_X86
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_paging_notification(struct vcpu *v, unsigned int port)
 {
@@ -470,6 +474,7 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)
     rcu_unlock_domain(d);
     return ret;
 }
+#endif
 
 /* Clean up on domain destruction */
 void mem_event_cleanup(struct domain *d)
@@ -538,6 +543,8 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
         {
         case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
         {
+
+#ifdef CONFIG_X86
             rc = -ENODEV;
             /* Only HAP is supported */
             if ( !hap_enabled(d) )
@@ -546,6 +553,7 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
             /* Currently only EPT is supported */
             if ( !cpu_has_vmx )
                 break;
+#endif
 
             rc = mem_event_enable(d, mec, med, _VPF_mem_access,
                                     HVM_PARAM_ACCESS_RING_PFN,
@@ -567,6 +575,7 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
     }
     break;
 
+#ifdef CONFIG_X86
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
     {
         struct mem_event_domain *med = &d->mem_event->paging;
@@ -656,6 +665,7 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
         }
     }
     break;
+#endif
 
     default:
         rc = -ENOSYS;
@@ -695,7 +705,6 @@ void mem_event_vcpu_unpause(struct vcpu *v)
 
     vcpu_unpause(v);
 }
-#endif
 
 /*
  * Local variables:
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 06c93a0..f3d1f33 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,9 +2,55 @@
 #define _XEN_P2M_H
 
 #include <xen/mm.h>
+#include <public/memory.h>
+#include <public/mem_event.h>
 
 struct domain;
 
+/* List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+    p2m_invalid = 0,    /* Nothing mapped here */
+    p2m_ram_rw,         /* Normal read/write guest RAM */
+    p2m_ram_ro,         /* Read-only; writes are silently dropped */
+    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
+    p2m_map_foreign,    /* Ram pages from foreign domain */
+    p2m_grant_map_rw,   /* Read/write grant mapping */
+    p2m_grant_map_ro,   /* Read-only grant mapping */
+    /* The types below are only used to decide the page attribute in the P2M */
+    p2m_iommu_map_rw,   /* Read/write iommu mapping */
+    p2m_iommu_map_ro,   /* Read-only iommu mapping */
+    p2m_max_real_type,  /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given by the p2m_type_t memory type.  Violations
+ * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+    p2m_access_n     = 0, /* No access permissions allowed */
+    p2m_access_r     = 1,
+    p2m_access_w     = 2, 
+    p2m_access_rw    = 3,
+    p2m_access_x     = 4, 
+    p2m_access_rx    = 5,
+    p2m_access_wx    = 6, 
+    p2m_access_rwx   = 7
+
+    /* NOTE: Assumed to be only 4 bits right now */
+} p2m_access_t;
+
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -38,27 +84,17 @@ struct p2m_domain {
          * at each p2m tree level. */
         unsigned long shattered[4];
     } stats;
-};
 
-/* List of possible type for each page in the p2m entry.
- * The number of available bit per page in the pte for this purpose is 4 bits.
- * So it's possible to only have 16 fields. If we run out of value in the
- * future, it's possible to use higher value for pseudo-type and don't store
- * them in the p2m entry.
- */
-typedef enum {
-    p2m_invalid = 0,    /* Nothing mapped here */
-    p2m_ram_rw,         /* Normal read/write guest RAM */
-    p2m_ram_ro,         /* Read-only; writes are silently dropped */
-    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
-    p2m_map_foreign,    /* Ram pages from foreign domain */
-    p2m_grant_map_rw,   /* Read/write grant mapping */
-    p2m_grant_map_ro,   /* Read-only grant mapping */
-    /* The types below are only used to decide the page attribute in the P2M */
-    p2m_iommu_map_rw,   /* Read/write iommu mapping */
-    p2m_iommu_map_ro,   /* Read-only iommu mapping */
-    p2m_max_real_type,  /* Types after this won't be store in the p2m */
-} p2m_type_t;
+    /* Default P2M access type for each page in the the domain: new pages,
+     * swapped in pages, cleared pages, and pages that are ambiquously
+     * retyped get this access type.  See definition of p2m_access_t. */
+    p2m_access_t default_access;
+
+    /* If true, and an access fault comes in and there is no mem_event 
listener, 
+     * pause domain.  Otherwise, remove access restrictions. */
+    bool_t       access_required;
+
+};
 
 #define p2m_is_foreign(_t)  ((_t) == p2m_map_foreign)
 #define p2m_is_ram(_t)      ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
@@ -195,6 +231,30 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+/* get host p2m table */
+#define p2m_get_hostp2m(d)      (&((d)->arch.p2m))
+
+/* Resumes the running of the VCPU, restarting the last instruction */
+static inline void p2m_mem_access_resume(struct domain *d) {}
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+static inline
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+    return -ENOSYS;
+}
+
+/* Get access type for a pfn
+ * If pfn == -1ul, gets the default access type */
+static inline
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+                       xenmem_access_t *access)
+{
+    return -ENOSYS;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index ded5441..5c7c5fd 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -23,29 +23,10 @@
 #ifndef _XEN_ASM_MEM_ACCESS_H
 #define _XEN_ASM_MEM_ACCESS_H
 
-#ifdef CONFIG_X86
-
 int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
 int mem_access_send_req(struct domain *d, mem_event_request_t *req);
 
-#else
-
-static inline
-int mem_access_memop(unsigned long cmd,
-                     XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
-{
-    return -ENOSYS;
-}
-
-static inline
-int mem_access_send_req(struct domain *d, mem_event_request_t *req)
-{
-    return -ENOSYS;
-}
-
-#endif /* CONFIG_X86 */
-
 #endif /* _XEN_ASM_MEM_ACCESS_H */
 
 /*
diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h
index a28d453..e2a9d4d 100644
--- a/xen/include/xen/mem_event.h
+++ b/xen/include/xen/mem_event.h
@@ -24,8 +24,6 @@
 #ifndef __MEM_EVENT_H__
 #define __MEM_EVENT_H__
 
-#ifdef CONFIG_X86
-
 /* Clean up on domain destruction */
 void mem_event_cleanup(struct domain *d);
 
@@ -67,66 +65,25 @@ void mem_event_put_request(struct domain *d, struct 
mem_event_domain *med,
 int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
                            mem_event_response_t *rsp);
 
-int do_mem_event_op(int op, uint32_t domain, void *arg);
 int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
                      XEN_GUEST_HANDLE_PARAM(void) u_domctl);
 
 void mem_event_vcpu_pause(struct vcpu *v);
 void mem_event_vcpu_unpause(struct vcpu *v);
 
-#else
-
-static inline void mem_event_cleanup(struct domain *d) {}
-
-static inline bool_t mem_event_check_ring(struct mem_event_domain *med)
-{
-    return 0;
-}
-
-static inline int mem_event_claim_slot(struct domain *d,
-                                        struct mem_event_domain *med)
-{
-    return -ENOSYS;
-}
-
-static inline int mem_event_claim_slot_nosleep(struct domain *d,
-                                        struct mem_event_domain *med)
-{
-    return -ENOSYS;
-}
-
-static inline
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
-{}
-
-static inline
-void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
-                            mem_event_request_t *req)
-{}
+#ifdef CONFIG_X86
 
-static inline
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
-                           mem_event_response_t *rsp)
-{
-    return -ENOSYS;
-}
+int do_mem_event_op(int op, uint32_t domain, void *arg);
 
-static inline int do_mem_event_op(int op, uint32_t domain, void *arg)
-{
-    return -ENOSYS;
-}
+#else
 
 static inline
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
-                     XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+int do_mem_event_op(int op, uint32_t domain, void *arg)
 {
     return -ENOSYS;
 }
 
-static inline void mem_event_vcpu_pause(struct vcpu *v) {}
-static inline void mem_event_vcpu_unpause(struct vcpu *v) {}
-
-#endif /* CONFIG_X86 */
+#endif
 
 #endif /* __MEM_EVENT_H__ */
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 4575dda..2365fad 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -1,4 +1,3 @@
-
 #ifndef __SCHED_H__
 #define __SCHED_H__
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index c5aa316..61677ea 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -507,6 +507,18 @@ static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT_ARG 
struct domain *d)
     return xsm_default_action(action, current->domain, d);
 }
 
+static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain *d, 
int mode, int op)
+{
+    XSM_ASSERT_ACTION(XSM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+
+static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int 
op)
+{
+    XSM_ASSERT_ACTION(XSM_DM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+
 #ifdef CONFIG_X86
 static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID)
 {
@@ -550,18 +562,6 @@ static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG 
struct domain *d, int
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain *d, 
int mode, int op)
-{
-    XSM_ASSERT_ACTION(XSM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int 
op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, 
struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index a85045d..64289cd 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -140,6 +140,9 @@ struct xsm_operations {
     int (*hvm_control) (struct domain *d, unsigned long op);
     int (*hvm_param_nested) (struct domain *d);
 
+    int (*mem_event_control) (struct domain *d, int mode, int op);
+    int (*mem_event_op) (struct domain *d, int op);
+
 #ifdef CONFIG_X86
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
@@ -148,8 +151,6 @@ struct xsm_operations {
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
     int (*hvm_ioreq_server) (struct domain *d, int op);
-    int (*mem_event_control) (struct domain *d, int mode, int op);
-    int (*mem_event_op) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -534,6 +535,16 @@ static inline int xsm_hvm_param_nested (xsm_default_t def, 
struct domain *d)
     return xsm_ops->hvm_param_nested(d);
 }
 
+static inline int xsm_mem_event_control (xsm_default_t def, struct domain *d, 
int mode, int op)
+{
+    return xsm_ops->mem_event_control(d, mode, op);
+}
+
+static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, int 
op)
+{
+    return xsm_ops->mem_event_op(d, op);
+}
+
 #ifdef CONFIG_X86
 static inline int xsm_do_mca(xsm_default_t def)
 {
@@ -570,16 +581,6 @@ static inline int xsm_hvm_ioreq_server (xsm_default_t def, 
struct domain *d, int
     return xsm_ops->hvm_ioreq_server(d, op);
 }
 
-static inline int xsm_mem_event_control (xsm_default_t def, struct domain *d, 
int mode, int op)
-{
-    return xsm_ops->mem_event_control(d, mode, op);
-}
-
-static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, int 
op)
-{
-    return xsm_ops->mem_event_op(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, 
struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index c95c803..9df9d81 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -116,6 +116,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, add_to_physmap);
     set_to_dummy_if_null(ops, remove_from_physmap);
     set_to_dummy_if_null(ops, map_gmfn_foreign);
+    set_to_dummy_if_null(ops, mem_event_control);
+    set_to_dummy_if_null(ops, mem_event_op);
 
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
@@ -125,8 +127,6 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
-    set_to_dummy_if_null(ops, mem_event_control);
-    set_to_dummy_if_null(ops, mem_event_op);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, platform_op);
-- 
2.0.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.