|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 4/7] x86/hvm: rename hvm_memory_event_* functions to hvm_event_*
The function names currently imply that these events are to be delivered via
the memory_event subsystem. However, the naming is confusing as these events
have nothing to do with actual memory events. Simply naming these functions
hvm_event_* more accurately describe their usage.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
docs/misc/pvh-readme.txt | 2 +-
xen/arch/x86/hvm/hvm.c | 50 +++++++++++++++++++++----------------------
xen/arch/x86/hvm/vmx/vmx.c | 6 +++---
xen/include/asm-x86/hvm/hvm.h | 12 +++++------
4 files changed, 35 insertions(+), 35 deletions(-)
diff --git a/docs/misc/pvh-readme.txt b/docs/misc/pvh-readme.txt
index c5b3de4..bbd9dbe 100644
--- a/docs/misc/pvh-readme.txt
+++ b/docs/misc/pvh-readme.txt
@@ -49,7 +49,7 @@ Following remain to be done for PVH:
- AMD port.
- 32bit PVH guest support in both linux and xen. Xen changes are tagged
"32bitfixme".
- - Add support for monitoring guest behavior. See hvm_memory_event* functions
+ - Add support for monitoring guest behavior. See hvm_event* functions
in hvm.c
- vcpu hotplug support
- Live migration of PVH guests.
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b34cdbd..9140a2a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3230,7 +3230,7 @@ int hvm_set_cr0(unsigned long value)
hvm_funcs.handle_cd(v, value);
hvm_update_cr(v, 0, value);
- hvm_memory_event_cr0(value, old_value);
+ hvm_event_cr0(value, old_value);
if ( (value ^ old_value) & X86_CR0_PG ) {
if ( !nestedhvm_vmswitch_in_progress(v) &&
nestedhvm_vcpu_in_guestmode(v) )
@@ -3271,7 +3271,7 @@ int hvm_set_cr3(unsigned long value)
old=v->arch.hvm_vcpu.guest_cr[3];
v->arch.hvm_vcpu.guest_cr[3] = value;
paging_update_cr3(v);
- hvm_memory_event_cr3(value, old);
+ hvm_event_cr3(value, old);
return X86EMUL_OKAY;
bad_cr3:
@@ -3312,7 +3312,7 @@ int hvm_set_cr4(unsigned long value)
}
hvm_update_cr(v, 4, value);
- hvm_memory_event_cr4(value, old_cr);
+ hvm_event_cr4(value, old_cr);
/*
* Modifying CR4.{PSE,PAE,PGE,SMEP}, or clearing CR4.PCIDE
@@ -4458,7 +4458,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content)
hvm_cpuid(1, NULL, NULL, NULL, &edx);
mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
- hvm_memory_event_msr(msr, msr_content);
+ hvm_event_msr(msr, msr_content);
switch ( msr )
{
@@ -6153,7 +6153,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
-static void hvm_mem_event_fill_regs(mem_event_request_t *req)
+static void hvm_event_fill_regs(mem_event_request_t *req)
{
const struct cpu_user_regs *regs = guest_cpu_user_regs();
const struct vcpu *curr = current;
@@ -6185,7 +6185,7 @@ static void hvm_mem_event_fill_regs(mem_event_request_t
*req)
req->x86_regs.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
}
-static int hvm_memory_event_traps(long parameters, mem_event_request_t *req)
+static int hvm_event_traps(long parameters, mem_event_request_t *req)
{
int rc;
struct vcpu *v = current;
@@ -6210,13 +6210,13 @@ static int hvm_memory_event_traps(long parameters,
mem_event_request_t *req)
mem_event_vcpu_pause(v);
}
- hvm_mem_event_fill_regs(req);
+ hvm_event_fill_regs(req);
mem_event_put_request(d, &d->mem_event->monitor, req);
return 1;
}
-void hvm_memory_event_cr0(unsigned long value, unsigned long old)
+void hvm_event_cr0(unsigned long value, unsigned long old)
{
mem_event_request_t req = {
.reason = MEM_EVENT_REASON_CR0,
@@ -6231,10 +6231,10 @@ void hvm_memory_event_cr0(unsigned long value, unsigned
long old)
if ( (parameters & HVMPME_onchangeonly) && (value == old) )
return;
- hvm_memory_event_traps(parameters, &req);
+ hvm_event_traps(parameters, &req);
}
-void hvm_memory_event_cr3(unsigned long value, unsigned long old)
+void hvm_event_cr3(unsigned long value, unsigned long old)
{
mem_event_request_t req = {
.reason = MEM_EVENT_REASON_CR3,
@@ -6249,10 +6249,10 @@ void hvm_memory_event_cr3(unsigned long value, unsigned
long old)
if ( (parameters & HVMPME_onchangeonly) && (value == old) )
return;
- hvm_memory_event_traps(parameters, &req);
+ hvm_event_traps(parameters, &req);
}
-void hvm_memory_event_cr4(unsigned long value, unsigned long old)
+void hvm_event_cr4(unsigned long value, unsigned long old)
{
mem_event_request_t req = {
.reason = MEM_EVENT_REASON_CR4,
@@ -6267,10 +6267,10 @@ void hvm_memory_event_cr4(unsigned long value, unsigned
long old)
if ( (parameters & HVMPME_onchangeonly) && (value == old) )
return;
- hvm_memory_event_traps(parameters, &req);
+ hvm_event_traps(parameters, &req);
}
-void hvm_memory_event_msr(unsigned long msr, unsigned long value)
+void hvm_event_msr(unsigned long msr, unsigned long value)
{
mem_event_request_t req = {
.reason = MEM_EVENT_REASON_MSR,
@@ -6279,12 +6279,12 @@ void hvm_memory_event_msr(unsigned long msr, unsigned
long value)
.msr_event.new_value = value,
};
- hvm_memory_event_traps(current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_MSR],
- &req);
+ hvm_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_MSR],
+ &req);
}
-int hvm_memory_event_int3(unsigned long gla)
+int hvm_event_int3(unsigned long gla)
{
uint32_t pfec = PFEC_page_present;
mem_event_request_t req = {
@@ -6294,12 +6294,12 @@ int hvm_memory_event_int3(unsigned long gla)
.int3_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
};
- return hvm_memory_event_traps(current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_INT3],
- &req);
+ return hvm_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_INT3],
+ &req);
}
-int hvm_memory_event_single_step(unsigned long gla)
+int hvm_event_single_step(unsigned long gla)
{
uint32_t pfec = PFEC_page_present;
mem_event_request_t req = {
@@ -6309,9 +6309,9 @@ int hvm_memory_event_single_step(unsigned long gla)
.singlestep_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
};
- return hvm_memory_event_traps(current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
- &req);
+ return hvm_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
+ &req);
}
int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0bf92b2..0bb8b38 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1967,7 +1967,7 @@ static int vmx_cr_access(unsigned long exit_qualification)
unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
vmx_update_guest_cr(curr, 0);
- hvm_memory_event_cr0(curr->arch.hvm_vcpu.guest_cr[0], old);
+ hvm_event_cr0(curr->arch.hvm_vcpu.guest_cr[0], old);
HVMTRACE_0D(CLTS);
break;
}
@@ -2816,7 +2816,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
break;
}
else {
- int handled = hvm_memory_event_int3(regs->eip);
+ int handled = hvm_event_int3(regs->eip);
if ( handled < 0 )
{
@@ -3132,7 +3132,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
if ( v->arch.hvm_vcpu.single_step ) {
- hvm_memory_event_single_step(regs->eip);
+ hvm_event_single_step(regs->eip);
if ( v->domain->debugger_attached )
domain_pause_for_debugger();
}
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index e3d2d9a..5ac390b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -474,15 +474,15 @@ int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr,
uint64_t *msr_content)
int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t
msr_content);
/* Called for current VCPU on crX changes by guest */
-void hvm_memory_event_cr0(unsigned long value, unsigned long old);
-void hvm_memory_event_cr3(unsigned long value, unsigned long old);
-void hvm_memory_event_cr4(unsigned long value, unsigned long old);
-void hvm_memory_event_msr(unsigned long msr, unsigned long value);
+void hvm_event_cr0(unsigned long value, unsigned long old);
+void hvm_event_cr3(unsigned long value, unsigned long old);
+void hvm_event_cr4(unsigned long value, unsigned long old);
+void hvm_event_msr(unsigned long msr, unsigned long value);
/* Called for current VCPU on int3: returns -1 if no listener */
-int hvm_memory_event_int3(unsigned long gla);
+int hvm_event_int3(unsigned long gla);
/* Called for current VCPU on single step: returns -1 if no listener */
-int hvm_memory_event_single_step(unsigned long gla);
+int hvm_event_single_step(unsigned long gla);
/*
* Nested HVM
--
2.1.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |