|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen/x86: Clean up vm_event-related code in asm-x86/domain.h
As suggested by Jan Beulich, moved struct monitor_write_data from
struct arch_domain to struct arch_vcpu, as well as moving all
vm_event-related data from asm-x86/domain.h to struct vm_event,
and allocating it dynamically only when needed.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
xen/arch/x86/domain.c | 10 ++--------
xen/arch/x86/hvm/emulate.c | 6 +++---
xen/arch/x86/hvm/hvm.c | 32 ++++++++++++++++----------------
xen/arch/x86/mm/p2m.c | 24 ++++++++++++------------
xen/arch/x86/vm_event.c | 26 +++++++-------------------
xen/include/asm-x86/domain.h | 26 ++++++++++++++------------
6 files changed, 54 insertions(+), 70 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 045f6ff..58e173e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -424,9 +424,6 @@ int vcpu_initialise(struct vcpu *v)
v->arch.flags = TF_kernel_mode;
- /* By default, do not emulate */
- v->arch.vm_event.emulate_flags = 0;
-
rc = mapcache_vcpu_init(v);
if ( rc )
return rc;
@@ -511,8 +508,8 @@ int vcpu_initialise(struct vcpu *v)
void vcpu_destroy(struct vcpu *v)
{
- xfree(v->arch.vm_event.emul_read_data);
- v->arch.vm_event.emul_read_data = NULL;
+ xfree(v->arch.vm_event);
+ v->arch.vm_event = NULL;
if ( is_pv_32bit_vcpu(v) )
{
@@ -668,9 +665,6 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
void arch_domain_destroy(struct domain *d)
{
- vfree(d->arch.event_write_data);
- d->arch.event_write_data = NULL;
-
if ( has_hvm_container_domain(d) )
hvm_domain_destroy(d);
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 30acb78..d0f98c8 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -71,12 +71,12 @@ static int set_context_data(void *buffer, unsigned int size)
{
struct vcpu *curr = current;
- if ( curr->arch.vm_event.emul_read_data )
+ if ( curr->arch.vm_event )
{
unsigned int safe_size =
- min(size, curr->arch.vm_event.emul_read_data->size);
+ min(size, curr->arch.vm_event->emul_read_data.size);
- memcpy(buffer, curr->arch.vm_event.emul_read_data->data, safe_size);
+ memcpy(buffer, curr->arch.vm_event->emul_read_data.data, safe_size);
memset(buffer + safe_size, 0, size - safe_size);
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c957610..9bc698c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -541,9 +541,9 @@ void hvm_do_resume(struct vcpu *v)
break;
}
- if ( unlikely(d->arch.event_write_data) )
+ if ( unlikely(v->arch.vm_event) )
{
- struct monitor_write_data *w = &d->arch.event_write_data[v->vcpu_id];
+ struct monitor_write_data *w = &v->arch.vm_event->write_data;
if ( w->do_write.msr )
{
@@ -571,7 +571,7 @@ void hvm_do_resume(struct vcpu *v)
}
/* Inject pending hw/sw trap */
- if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
+ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap);
v->arch.hvm_vcpu.inject_trap.vector = -1;
@@ -3371,13 +3371,13 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) &&
value != old_value )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event != NULL);
if ( hvm_event_crX(CR0, value, old_value) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
- currad->event_write_data[v->vcpu_id].do_write.cr0 = 1;
- currad->event_write_data[v->vcpu_id].cr0 = value;
+ v->arch.vm_event->write_data.do_write.cr0 = 1;
+ v->arch.vm_event->write_data.cr0 = value;
return X86EMUL_OKAY;
}
@@ -3475,13 +3475,13 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) &&
value != old )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event != NULL);
if ( hvm_event_crX(CR3, value, old) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
- currad->event_write_data[v->vcpu_id].do_write.cr3 = 1;
- currad->event_write_data[v->vcpu_id].cr3 = value;
+ v->arch.vm_event->write_data.do_write.cr3 = 1;
+ v->arch.vm_event->write_data.cr3 = value;
return X86EMUL_OKAY;
}
@@ -3549,13 +3549,13 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) &&
value != old_cr )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event != NULL);
if ( hvm_event_crX(CR4, value, old_cr) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
- currad->event_write_data[v->vcpu_id].do_write.cr4 = 1;
- currad->event_write_data[v->vcpu_id].cr4 = value;
+ v->arch.vm_event->write_data.do_write.cr4 = 1;
+ v->arch.vm_event->write_data.cr4 = value;
return X86EMUL_OKAY;
}
@@ -4744,12 +4744,12 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event != NULL);
/* The actual write will occur in hvm_do_resume() (if permitted). */
- currad->event_write_data[v->vcpu_id].do_write.msr = 1;
- currad->event_write_data[v->vcpu_id].msr = msr;
- currad->event_write_data[v->vcpu_id].value = msr_content;
+ v->arch.vm_event->write_data.do_write.msr = 1;
+ v->arch.vm_event->write_data.msr = msr;
+ v->arch.vm_event->write_data.value = msr_content;
hvm_event_msr(msr, msr_content);
return X86EMUL_OKAY;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 7aafacc..530c47b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1551,11 +1551,10 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
}
}
- v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
+ v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
- if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) &&
- v->arch.vm_event.emul_read_data )
- *v->arch.vm_event.emul_read_data = rsp->data.emul_read_data;
+ if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
+ v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
}
}
@@ -1638,34 +1637,35 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
}
/* The previous vm_event reply does not match the current state. */
- if ( v->arch.vm_event.gpa != gpa || v->arch.vm_event.eip != eip )
+ if ( unlikely(v->arch.vm_event) &&
+ (v->arch.vm_event->gpa != gpa || v->arch.vm_event->eip != eip) )
{
/* Don't emulate the current instruction, send a new vm_event. */
- v->arch.vm_event.emulate_flags = 0;
+ v->arch.vm_event->emulate_flags = 0;
/*
* Make sure to mark the current state to match it again against
* the new vm_event about to be sent.
*/
- v->arch.vm_event.gpa = gpa;
- v->arch.vm_event.eip = eip;
+ v->arch.vm_event->gpa = gpa;
+ v->arch.vm_event->eip = eip;
}
- if ( v->arch.vm_event.emulate_flags )
+ if ( unlikely(v->arch.vm_event) && v->arch.vm_event->emulate_flags )
{
enum emul_kind kind = EMUL_KIND_NORMAL;
- if ( v->arch.vm_event.emulate_flags &
+ if ( v->arch.vm_event->emulate_flags &
VM_EVENT_FLAG_SET_EMUL_READ_DATA )
kind = EMUL_KIND_SET_CONTEXT;
- else if ( v->arch.vm_event.emulate_flags &
+ else if ( v->arch.vm_event->emulate_flags &
VM_EVENT_FLAG_EMULATE_NOWRITE )
kind = EMUL_KIND_NOWRITE;
hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
HVM_DELIVER_NO_ERROR_CODE);
- v->arch.vm_event.emulate_flags = 0;
+ v->arch.vm_event->emulate_flags = 0;
return 1;
}
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index b32a839..27b875c 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -27,22 +27,14 @@ int vm_event_init_domain(struct domain *d)
{
struct vcpu *v;
- if ( !d->arch.event_write_data )
- d->arch.event_write_data =
- vzalloc(sizeof(struct monitor_write_data) * d->max_vcpus);
-
- if ( !d->arch.event_write_data )
- return -ENOMEM;
-
for_each_vcpu ( d, v )
{
- if ( v->arch.vm_event.emul_read_data )
+ if ( v->arch.vm_event )
continue;
- v->arch.vm_event.emul_read_data =
- xzalloc(struct vm_event_emul_read_data);
+ v->arch.vm_event = xzalloc(struct vm_event);
- if ( !v->arch.vm_event.emul_read_data )
+ if ( !v->arch.vm_event )
return -ENOMEM;
}
@@ -57,13 +49,10 @@ void vm_event_cleanup_domain(struct domain *d)
{
struct vcpu *v;
- vfree(d->arch.event_write_data);
- d->arch.event_write_data = NULL;
-
for_each_vcpu ( d, v )
{
- xfree(v->arch.vm_event.emul_read_data);
- v->arch.vm_event.emul_read_data = NULL;
+ xfree(v->arch.vm_event);
+ v->arch.vm_event = NULL;
}
}
@@ -79,10 +68,9 @@ void vm_event_register_write_resume(struct vcpu *v,
vm_event_response_t *rsp)
{
if ( rsp->flags & VM_EVENT_FLAG_DENY )
{
- struct monitor_write_data *w =
- &v->domain->arch.event_write_data[v->vcpu_id];
+ struct monitor_write_data *w = &v->arch.vm_event->write_data;
- ASSERT(v->domain->arch.event_write_data != NULL);
+ ASSERT(w != NULL);
switch ( rsp->reason )
{
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 0fce09e..3500dd8 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -8,6 +8,7 @@
#include <asm/hvm/domain.h>
#include <asm/e820.h>
#include <asm/mce.h>
+#include <public/vm_event.h>
#include <public/vcpu.h>
#include <public/hvm/hvm_info_table.h>
@@ -385,8 +386,6 @@ struct arch_domain
/* Mem_access emulation control */
bool_t mem_access_emulate_enabled;
-
- struct monitor_write_data *event_write_data;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
@@ -460,6 +459,18 @@ typedef enum __packed {
SMAP_CHECK_DISABLED, /* disable the check */
} smap_check_policy_t;
+/*
+ * Should we emulate the next matching instruction on VCPU resume
+ * after a vm_event?
+ */
+struct vm_event {
+ uint32_t emulate_flags;
+ unsigned long gpa;
+ unsigned long eip;
+ struct vm_event_emul_read_data emul_read_data;
+ struct monitor_write_data write_data;
+};
+
struct arch_vcpu
{
/*
@@ -533,16 +544,7 @@ struct arch_vcpu
/* A secondary copy of the vcpu time info. */
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
- /*
- * Should we emulate the next matching instruction on VCPU resume
- * after a vm_event?
- */
- struct {
- uint32_t emulate_flags;
- unsigned long gpa;
- unsigned long eip;
- struct vm_event_emul_read_data *emul_read_data;
- } vm_event;
+ struct vm_event *vm_event;
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |