[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V2] xen/x86: Clean up vm_event-related code in asm-x86/domain.h


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
  • Date: Sat, 15 Aug 2015 09:23:30 +0300
  • Cc: tamas@xxxxxxxxxxxxx, keir@xxxxxxx, Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>, george.dunlap@xxxxxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, jbeulich@xxxxxxxx
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Sat, 15 Aug 2015 06:24:17 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=cofGSRhribk6tfhMIFq76tLJEwfsLEp5fnkBALVoSrugLW0Fpz69o8fGrf7GfMnB0dv7idCVkcNqm+4FTl0BL45FP/+k333Py1ibjlsYfIJTNGrvz0bGjoTg86dnX+Uo0CurxvBV+DfzLCbAAQIajSvqmFc3H5EAwO/bKWPjKIT5mIQop3x86L6c3n+akxyMuI4DSu/Vj0yzYrY8DzD/vSWl6tFwgMR8RCcp314crGIYzt1lU0pQO4VXuhwogBSPm9SQBu/qgEpvslrRTNPeRHwS5vFsgAio2M94r8yVHLEaq+Ym1n1GZTnbb5wX2axOokKNOT0/9F4UxVtLPjJ/Qg==; h=Received:Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer:X-BitDefender-Scanner:X-BitDefender-Spam:X-BitDefender-SpamStamp:X-BitDefender-CF-Stamp;
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

As suggested by Jan Beulich, moved struct monitor_write_data from
struct arch_domain to struct arch_vcpu, as well as moving all
vm_event-related data from asm-x86/domain.h to struct vm_event,
and allocating it dynamically only when needed.

Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>

---
Changes since V1:
 - Left trailing whitespace in unrelated code alone.
 - Dropped the redundant != NULL in ASSERT()s.
 - Removed intermediary stack variable currad in functions where
   it was only used once.
 - Moved struct vm_event to asm-x86/vm_event.h.
---
 xen/arch/x86/domain.c          |   10 ++--------
 xen/arch/x86/hvm/emulate.c     |    7 ++++---
 xen/arch/x86/hvm/hvm.c         |   40 +++++++++++++++++++---------------------
 xen/arch/x86/mm/p2m.c          |   25 +++++++++++++------------
 xen/arch/x86/vm_event.c        |   26 +++++++-------------------
 xen/include/asm-x86/domain.h   |   13 +------------
 xen/include/asm-x86/vm_event.h |   12 ++++++++++++
 7 files changed, 58 insertions(+), 75 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 045f6ff..58e173e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -424,9 +424,6 @@ int vcpu_initialise(struct vcpu *v)
 
     v->arch.flags = TF_kernel_mode;
 
-    /* By default, do not emulate */
-    v->arch.vm_event.emulate_flags = 0;
-
     rc = mapcache_vcpu_init(v);
     if ( rc )
         return rc;
@@ -511,8 +508,8 @@ int vcpu_initialise(struct vcpu *v)
 
 void vcpu_destroy(struct vcpu *v)
 {
-    xfree(v->arch.vm_event.emul_read_data);
-    v->arch.vm_event.emul_read_data = NULL;
+    xfree(v->arch.vm_event);
+    v->arch.vm_event = NULL;
 
     if ( is_pv_32bit_vcpu(v) )
     {
@@ -668,9 +665,6 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
 
 void arch_domain_destroy(struct domain *d)
 {
-    vfree(d->arch.event_write_data);
-    d->arch.event_write_data = NULL;
-
     if ( has_hvm_container_domain(d) )
         hvm_domain_destroy(d);
 
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 30acb78..5934c72 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -22,6 +22,7 @@
 #include <asm/hvm/trace.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
+#include <asm/vm_event.h>
 
 static void hvmtrace_io_assist(const ioreq_t *p)
 {
@@ -71,12 +72,12 @@ static int set_context_data(void *buffer, unsigned int size)
 {
     struct vcpu *curr = current;
 
-    if ( curr->arch.vm_event.emul_read_data )
+    if ( curr->arch.vm_event )
     {
         unsigned int safe_size =
-            min(size, curr->arch.vm_event.emul_read_data->size);
+            min(size, curr->arch.vm_event->emul_read_data.size);
 
-        memcpy(buffer, curr->arch.vm_event.emul_read_data->data, safe_size);
+        memcpy(buffer, curr->arch.vm_event->emul_read_data.data, safe_size);
         memset(buffer + safe_size, 0, size - safe_size);
         return X86EMUL_OKAY;
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c957610..6f92871 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -63,6 +63,7 @@
 #include <asm/altp2m.h>
 #include <asm/mtrr.h>
 #include <asm/apic.h>
+#include <asm/vm_event.h>
 #include <public/sched.h>
 #include <public/hvm/ioreq.h>
 #include <public/version.h>
@@ -541,9 +542,9 @@ void hvm_do_resume(struct vcpu *v)
         break;
     }
 
-    if ( unlikely(d->arch.event_write_data) )
+    if ( unlikely(v->arch.vm_event) )
     {
-        struct monitor_write_data *w = &d->arch.event_write_data[v->vcpu_id];
+        struct monitor_write_data *w = &v->arch.vm_event->write_data;
 
         if ( w->do_write.msr )
         {
@@ -3337,7 +3338,6 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
     struct domain *d = v->domain;
     unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
     struct page_info *page;
-    struct arch_domain *currad = &v->domain->arch;
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
 
@@ -3367,17 +3367,17 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
         goto gpf;
     }
 
-    if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+    if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
                                monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) &&
          value != old_value )
     {
-        ASSERT(currad->event_write_data != NULL);
+        ASSERT(v->arch.vm_event);
 
         if ( hvm_event_crX(CR0, value, old_value) )
         {
             /* The actual write will occur in hvm_do_resume(), if permitted. */
-            currad->event_write_data[v->vcpu_id].do_write.cr0 = 1;
-            currad->event_write_data[v->vcpu_id].cr0 = value;
+            v->arch.vm_event->write_data.do_write.cr0 = 1;
+            v->arch.vm_event->write_data.cr0 = value;
 
             return X86EMUL_OKAY;
         }
@@ -3469,19 +3469,18 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
     struct vcpu *v = current;
     struct page_info *page;
     unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
-    struct arch_domain *currad = &v->domain->arch;
 
-    if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+    if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
                                monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) &&
          value != old )
     {
-        ASSERT(currad->event_write_data != NULL);
+        ASSERT(v->arch.vm_event);
 
         if ( hvm_event_crX(CR3, value, old) )
         {
             /* The actual write will occur in hvm_do_resume(), if permitted. */
-            currad->event_write_data[v->vcpu_id].do_write.cr3 = 1;
-            currad->event_write_data[v->vcpu_id].cr3 = value;
+            v->arch.vm_event->write_data.do_write.cr3 = 1;
+            v->arch.vm_event->write_data.cr3 = value;
 
             return X86EMUL_OKAY;
         }
@@ -3517,7 +3516,6 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
 {
     struct vcpu *v = current;
     unsigned long old_cr;
-    struct arch_domain *currad = &v->domain->arch;
 
     if ( value & hvm_cr4_guest_reserved_bits(v, 0) )
     {
@@ -3545,17 +3543,17 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
         goto gpf;
     }
 
-    if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+    if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
                                monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) &&
          value != old_cr )
     {
-        ASSERT(currad->event_write_data != NULL);
+        ASSERT(v->arch.vm_event);
 
         if ( hvm_event_crX(CR4, value, old_cr) )
         {
             /* The actual write will occur in hvm_do_resume(), if permitted. */
-            currad->event_write_data[v->vcpu_id].do_write.cr4 = 1;
-            currad->event_write_data[v->vcpu_id].cr4 = value;
+            v->arch.vm_event->write_data.do_write.cr4 = 1;
+            v->arch.vm_event->write_data.cr4 = value;
 
             return X86EMUL_OKAY;
         }
@@ -4744,12 +4742,12 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t 
msr_content,
 
     if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) )
     {
-        ASSERT(currad->event_write_data != NULL);
+        ASSERT(v->arch.vm_event);
 
         /* The actual write will occur in hvm_do_resume() (if permitted). */
-        currad->event_write_data[v->vcpu_id].do_write.msr = 1;
-        currad->event_write_data[v->vcpu_id].msr = msr;
-        currad->event_write_data[v->vcpu_id].value = msr_content;
+        v->arch.vm_event->write_data.do_write.msr = 1;
+        v->arch.vm_event->write_data.msr = msr;
+        v->arch.vm_event->write_data.value = msr_content;
 
         hvm_event_msr(msr, msr_content);
         return X86EMUL_OKAY;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 7aafacc..0ac56ce 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -36,6 +36,7 @@
 #include <asm/hvm/nestedhvm.h>
 #include <asm/altp2m.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
+#include <asm/vm_event.h>
 #include <xsm/xsm.h>
 
 #include "mm-locks.h"
@@ -1551,11 +1552,10 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
             }
         }
 
-        v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
+        v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
 
-        if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) &&
-             v->arch.vm_event.emul_read_data )
-            *v->arch.vm_event.emul_read_data = rsp->data.emul_read_data;
+        if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
+            v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
     }
 }
 
@@ -1638,34 +1638,35 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
gla,
     }
 
     /* The previous vm_event reply does not match the current state. */
-    if ( v->arch.vm_event.gpa != gpa || v->arch.vm_event.eip != eip )
+    if ( unlikely(v->arch.vm_event) &&
+         (v->arch.vm_event->gpa != gpa || v->arch.vm_event->eip != eip) )
     {
         /* Don't emulate the current instruction, send a new vm_event. */
-        v->arch.vm_event.emulate_flags = 0;
+        v->arch.vm_event->emulate_flags = 0;
 
         /*
          * Make sure to mark the current state to match it again against
          * the new vm_event about to be sent.
          */
-        v->arch.vm_event.gpa = gpa;
-        v->arch.vm_event.eip = eip;
+        v->arch.vm_event->gpa = gpa;
+        v->arch.vm_event->eip = eip;
     }
 
-    if ( v->arch.vm_event.emulate_flags )
+    if ( unlikely(v->arch.vm_event) && v->arch.vm_event->emulate_flags )
     {
         enum emul_kind kind = EMUL_KIND_NORMAL;
 
-        if ( v->arch.vm_event.emulate_flags &
+        if ( v->arch.vm_event->emulate_flags &
              VM_EVENT_FLAG_SET_EMUL_READ_DATA )
             kind = EMUL_KIND_SET_CONTEXT;
-        else if ( v->arch.vm_event.emulate_flags &
+        else if ( v->arch.vm_event->emulate_flags &
                   VM_EVENT_FLAG_EMULATE_NOWRITE )
             kind = EMUL_KIND_NOWRITE;
 
         hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
                                    HVM_DELIVER_NO_ERROR_CODE);
 
-        v->arch.vm_event.emulate_flags = 0;
+        v->arch.vm_event->emulate_flags = 0;
         return 1;
     }
 
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index b32a839..ecbcdfa 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -27,22 +27,14 @@ int vm_event_init_domain(struct domain *d)
 {
     struct vcpu *v;
 
-    if ( !d->arch.event_write_data )
-        d->arch.event_write_data =
-            vzalloc(sizeof(struct monitor_write_data) * d->max_vcpus);
-
-    if ( !d->arch.event_write_data )
-        return -ENOMEM;
-
     for_each_vcpu ( d, v )
     {
-        if ( v->arch.vm_event.emul_read_data )
+        if ( v->arch.vm_event )
             continue;
 
-        v->arch.vm_event.emul_read_data =
-            xzalloc(struct vm_event_emul_read_data);
+        v->arch.vm_event = xzalloc(struct vm_event);
 
-        if ( !v->arch.vm_event.emul_read_data )
+        if ( !v->arch.vm_event )
             return -ENOMEM;
     }
 
@@ -57,13 +49,10 @@ void vm_event_cleanup_domain(struct domain *d)
 {
     struct vcpu *v;
 
-    vfree(d->arch.event_write_data);
-    d->arch.event_write_data = NULL;
-
     for_each_vcpu ( d, v )
     {
-        xfree(v->arch.vm_event.emul_read_data);
-        v->arch.vm_event.emul_read_data = NULL;
+        xfree(v->arch.vm_event);
+        v->arch.vm_event = NULL;
     }
 }
 
@@ -79,10 +68,9 @@ void vm_event_register_write_resume(struct vcpu *v, 
vm_event_response_t *rsp)
 {
     if ( rsp->flags & VM_EVENT_FLAG_DENY )
     {
-        struct monitor_write_data *w =
-            &v->domain->arch.event_write_data[v->vcpu_id];
+        struct monitor_write_data *w = &v->arch.vm_event->write_data;
 
-        ASSERT(v->domain->arch.event_write_data != NULL);
+        ASSERT(w);
 
         switch ( rsp->reason )
         {
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 0fce09e..9f62463 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -385,8 +385,6 @@ struct arch_domain
 
     /* Mem_access emulation control */
     bool_t mem_access_emulate_enabled;
-
-    struct monitor_write_data *event_write_data;
 } __cacheline_aligned;
 
 #define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
@@ -533,16 +531,7 @@ struct arch_vcpu
     /* A secondary copy of the vcpu time info. */
     XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
 
-    /*
-     * Should we emulate the next matching instruction on VCPU resume
-     * after a vm_event?
-     */
-    struct {
-        uint32_t emulate_flags;
-        unsigned long gpa;
-        unsigned long eip;
-        struct vm_event_emul_read_data *emul_read_data;
-    } vm_event;
+    struct vm_event *vm_event;
 };
 
 smap_check_policy_t smap_policy_change(struct vcpu *v,
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index 0ae5952..17ba4eb 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -22,6 +22,18 @@
 #include <xen/sched.h>
 #include <xen/vm_event.h>
 
+/*
+ * Should we emulate the next matching instruction on VCPU resume
+ * after a vm_event?
+ */
+struct vm_event {
+    uint32_t emulate_flags;
+    unsigned long gpa;
+    unsigned long eip;
+    struct vm_event_emul_read_data emul_read_data;
+    struct monitor_write_data write_data;
+};
+
 int vm_event_init_domain(struct domain *d);
 
 void vm_event_cleanup_domain(struct domain *d);
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.