[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V2 4/8] x86/hvm: rename hvm_memory_event_* functions to hvm_event_*



The function names currently imply that these events are to be delivered via
the memory_event subsystem. However, the naming is confusing as these events
have nothing to do with actual memory events. Simply naming these functions
hvm_event_* more accurately describe their usage.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
 docs/misc/pvh-readme.txt      |  2 +-
 xen/arch/x86/hvm/hvm.c        | 50 +++++++++++++++++++++----------------------
 xen/arch/x86/hvm/vmx/vmx.c    |  6 +++---
 xen/include/asm-x86/hvm/hvm.h | 12 +++++------
 4 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/docs/misc/pvh-readme.txt b/docs/misc/pvh-readme.txt
index c5b3de4..bbd9dbe 100644
--- a/docs/misc/pvh-readme.txt
+++ b/docs/misc/pvh-readme.txt
@@ -49,7 +49,7 @@ Following remain to be done for PVH:
    - AMD port.
    - 32bit PVH guest support in both linux and xen. Xen changes are tagged
      "32bitfixme".
-   - Add support for monitoring guest behavior. See hvm_memory_event* functions
+   - Add support for monitoring guest behavior. See hvm_event* functions
      in hvm.c
    - vcpu hotplug support
    - Live migration of PVH guests.
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f936d51..1968865 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3229,7 +3229,7 @@ int hvm_set_cr0(unsigned long value)
         hvm_funcs.handle_cd(v, value);
 
     hvm_update_cr(v, 0, value);
-    hvm_memory_event_cr0(value, old_value);
+    hvm_event_cr0(value, old_value);
 
     if ( (value ^ old_value) & X86_CR0_PG ) {
         if ( !nestedhvm_vmswitch_in_progress(v) && 
nestedhvm_vcpu_in_guestmode(v) )
@@ -3270,7 +3270,7 @@ int hvm_set_cr3(unsigned long value)
     old=v->arch.hvm_vcpu.guest_cr[3];
     v->arch.hvm_vcpu.guest_cr[3] = value;
     paging_update_cr3(v);
-    hvm_memory_event_cr3(value, old);
+    hvm_event_cr3(value, old);
     return X86EMUL_OKAY;
 
  bad_cr3:
@@ -3311,7 +3311,7 @@ int hvm_set_cr4(unsigned long value)
     }
 
     hvm_update_cr(v, 4, value);
-    hvm_memory_event_cr4(value, old_cr);
+    hvm_event_cr4(value, old_cr);
 
     /*
      * Modifying CR4.{PSE,PAE,PGE,SMEP}, or clearing CR4.PCIDE
@@ -4457,7 +4457,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t 
msr_content)
     hvm_cpuid(1, NULL, NULL, NULL, &edx);
     mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
-    hvm_memory_event_msr(msr, msr_content);
+    hvm_event_msr(msr, msr_content);
 
     switch ( msr )
     {
@@ -6171,7 +6171,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
     return rc;
 }
 
-static void hvm_mem_event_fill_regs(mem_event_request_t *req)
+static void hvm_event_fill_regs(mem_event_request_t *req)
 {
     const struct cpu_user_regs *regs = guest_cpu_user_regs();
     const struct vcpu *curr = current;
@@ -6203,7 +6203,7 @@ static void hvm_mem_event_fill_regs(mem_event_request_t 
*req)
     req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
 }
 
-static int hvm_memory_event_traps(long parameters, mem_event_request_t *req)
+static int hvm_event_traps(long parameters, mem_event_request_t *req)
 {
     int rc;
     struct vcpu *v = current;
@@ -6228,13 +6228,13 @@ static int hvm_memory_event_traps(long parameters, 
mem_event_request_t *req)
         mem_event_vcpu_pause(v);
     }
 
-    hvm_mem_event_fill_regs(req);
+    hvm_event_fill_regs(req);
     mem_event_put_request(d, &d->mem_event->monitor, req);
 
     return 1;
 }
 
-void hvm_memory_event_cr0(unsigned long value, unsigned long old) 
+void hvm_event_cr0(unsigned long value, unsigned long old)
 {
     mem_event_request_t req = {
         .reason = MEM_EVENT_REASON_CR0,
@@ -6249,10 +6249,10 @@ void hvm_memory_event_cr0(unsigned long value, unsigned 
long old)
     if ( (parameters & HVMPME_onchangeonly) && (value == old) )
         return;
 
-    hvm_memory_event_traps(parameters, &req);
+    hvm_event_traps(parameters, &req);
 }
 
-void hvm_memory_event_cr3(unsigned long value, unsigned long old) 
+void hvm_event_cr3(unsigned long value, unsigned long old)
 {
     mem_event_request_t req = {
         .reason = MEM_EVENT_REASON_CR3,
@@ -6267,10 +6267,10 @@ void hvm_memory_event_cr3(unsigned long value, unsigned 
long old)
     if ( (parameters & HVMPME_onchangeonly) && (value == old) )
         return;
 
-    hvm_memory_event_traps(parameters, &req);
+    hvm_event_traps(parameters, &req);
 }
 
-void hvm_memory_event_cr4(unsigned long value, unsigned long old) 
+void hvm_event_cr4(unsigned long value, unsigned long old)
 {
     mem_event_request_t req = {
         .reason = MEM_EVENT_REASON_CR4,
@@ -6285,10 +6285,10 @@ void hvm_memory_event_cr4(unsigned long value, unsigned 
long old)
     if ( (parameters & HVMPME_onchangeonly) && (value == old) )
         return;
 
-    hvm_memory_event_traps(parameters, &req);
+    hvm_event_traps(parameters, &req);
 }
 
-void hvm_memory_event_msr(unsigned long msr, unsigned long value)
+void hvm_event_msr(unsigned long msr, unsigned long value)
 {
     mem_event_request_t req = {
         .reason = MEM_EVENT_REASON_MSR,
@@ -6297,12 +6297,12 @@ void hvm_memory_event_msr(unsigned long msr, unsigned 
long value)
         .msr_event.new_value = value,
     };
 
-    hvm_memory_event_traps(current->domain->arch.hvm_domain
-                            .params[HVM_PARAM_MEMORY_EVENT_MSR],
-                           &req);
+    hvm_event_traps(current->domain->arch.hvm_domain
+                        .params[HVM_PARAM_MEMORY_EVENT_MSR],
+                    &req);
 }
 
-int hvm_memory_event_int3(unsigned long gla) 
+int hvm_event_int3(unsigned long gla)
 {
     uint32_t pfec = PFEC_page_present;
     mem_event_request_t req = {
@@ -6312,12 +6312,12 @@ int hvm_memory_event_int3(unsigned long gla)
         .int3_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
     };
 
-    return hvm_memory_event_traps(current->domain->arch.hvm_domain
-                                   .params[HVM_PARAM_MEMORY_EVENT_INT3],
-                                  &req);
+    return hvm_event_traps(current->domain->arch.hvm_domain
+                            .params[HVM_PARAM_MEMORY_EVENT_INT3],
+                           &req);
 }
 
-int hvm_memory_event_single_step(unsigned long gla)
+int hvm_event_single_step(unsigned long gla)
 {
     uint32_t pfec = PFEC_page_present;
     mem_event_request_t req = {
@@ -6327,9 +6327,9 @@ int hvm_memory_event_single_step(unsigned long gla)
         .singlestep_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
     };
 
-    return hvm_memory_event_traps(current->domain->arch.hvm_domain
-                                   .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
-                                  &req);
+    return hvm_event_traps(current->domain->arch.hvm_domain
+                            .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
+                           &req);
 }
 
 int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f2554d6..d2c39f4 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1967,7 +1967,7 @@ static int vmx_cr_access(unsigned long exit_qualification)
         unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
         curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
         vmx_update_guest_cr(curr, 0);
-        hvm_memory_event_cr0(curr->arch.hvm_vcpu.guest_cr[0], old);
+        hvm_event_cr0(curr->arch.hvm_vcpu.guest_cr[0], old);
         HVMTRACE_0D(CLTS);
         break;
     }
@@ -2816,7 +2816,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
                 break;
             }
             else {
-                int handled = hvm_memory_event_int3(regs->eip);
+                int handled = hvm_event_int3(regs->eip);
                 
                 if ( handled < 0 ) 
                 {
@@ -3133,7 +3133,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
         vmx_update_cpu_exec_control(v);
         if ( v->arch.hvm_vcpu.single_step ) {
-          hvm_memory_event_single_step(regs->eip);
+          hvm_event_single_step(regs->eip);
           if ( v->domain->debugger_attached )
               domain_pause_for_debugger();
         }
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index e3d2d9a..5ac390b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -474,15 +474,15 @@ int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, 
uint64_t *msr_content)
 int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t 
msr_content);
 
 /* Called for current VCPU on crX changes by guest */
-void hvm_memory_event_cr0(unsigned long value, unsigned long old);
-void hvm_memory_event_cr3(unsigned long value, unsigned long old);
-void hvm_memory_event_cr4(unsigned long value, unsigned long old);
-void hvm_memory_event_msr(unsigned long msr, unsigned long value);
+void hvm_event_cr0(unsigned long value, unsigned long old);
+void hvm_event_cr3(unsigned long value, unsigned long old);
+void hvm_event_cr4(unsigned long value, unsigned long old);
+void hvm_event_msr(unsigned long msr, unsigned long value);
 /* Called for current VCPU on int3: returns -1 if no listener */
-int hvm_memory_event_int3(unsigned long gla);
+int hvm_event_int3(unsigned long gla);
 
 /* Called for current VCPU on single step: returns -1 if no listener */
-int hvm_memory_event_single_step(unsigned long gla);
+int hvm_event_single_step(unsigned long gla);
 
 /*
  * Nested HVM
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.