|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V3 3/3] xen/vm_event: Deny register writes if refused by vm_event reply
Deny register writes if a vm_client subscribed to mov_to_msr or
control register write events forbids them. Currently supported for
MSR, CR0, CR3 and CR4 events.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
Changes since V2:
- Added a comment to explain why hvm_event_cr() returns bool_t where
the other event functions return void.
- Moved an ASSERT() to the beginning of hvm_do_resume() and wrapped
the d->arch.event_write_data test in an unlikely().
- Renamed 'event_only' to 'may_defer' in the hvm_set_<register>()
functions.
- Added a comment to / fixed VMX_CONTROL_REG_ACCESS_TYPE_CLTS code.
- Now initializing CR4 before CR3 in hvm_do_resume().
- Moved an ASSERT() in p2m_mem_access_emulate_check().
- Changed uint64_t msr to uint32_t msr struct monitor_write_data.
- Changed uint8_t to unsigned int in the do_write bitfield.
- Fixed MEM_ACCESS_DENY stale comment and stale patch description.
---
xen/arch/x86/domain.c | 2 +
xen/arch/x86/hvm/emulate.c | 8 +--
xen/arch/x86/hvm/event.c | 5 +-
xen/arch/x86/hvm/hvm.c | 120 ++++++++++++++++++++++++++++++++-----
xen/arch/x86/hvm/svm/nestedsvm.c | 14 ++---
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 14 +++--
xen/arch/x86/hvm/vmx/vvmx.c | 18 +++---
xen/arch/x86/mm/p2m.c | 29 +++++++++
xen/common/vm_event.c | 6 ++
xen/include/asm-x86/domain.h | 18 +++++-
xen/include/asm-x86/hvm/event.h | 9 ++-
xen/include/asm-x86/hvm/support.h | 9 +--
xen/include/public/vm_event.h | 5 ++
14 files changed, 213 insertions(+), 46 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index cbbc354..8914aff 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -679,6 +679,8 @@ void arch_domain_destroy(struct domain *d)
cleanup_domain_irq_mapping(d);
psr_free_rmid(d);
+
+ xfree(d->arch.event_write_data);
}
void arch_domain_shutdown(struct domain *d)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 3678e29..f0d05fa 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1377,14 +1377,14 @@ static int hvmemul_write_cr(
switch ( reg )
{
case 0:
- return hvm_set_cr0(val);
+ return hvm_set_cr0(val, 1);
case 2:
current->arch.hvm_vcpu.guest_cr[2] = val;
return X86EMUL_OKAY;
case 3:
- return hvm_set_cr3(val);
+ return hvm_set_cr3(val, 1);
case 4:
- return hvm_set_cr4(val);
+ return hvm_set_cr4(val, 1);
default:
break;
}
@@ -1405,7 +1405,7 @@ static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_write_intercept(reg, val);
+ return hvm_msr_write_intercept(reg, val, 1);
}
static int hvmemul_wbinvd(
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
index 17638ea..042e583 100644
--- a/xen/arch/x86/hvm/event.c
+++ b/xen/arch/x86/hvm/event.c
@@ -90,7 +90,7 @@ static int hvm_event_traps(uint8_t sync, vm_event_request_t
*req)
return 1;
}
-void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
+bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
{
struct arch_domain *currad = ¤t->domain->arch;
unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
@@ -109,7 +109,10 @@ void hvm_event_cr(unsigned int index, unsigned long value,
unsigned long old)
hvm_event_traps(currad->monitor.write_ctrlreg_sync & ctrlreg_bitmask,
&req);
+ return 1;
}
+
+ return 0;
}
void hvm_event_msr(unsigned int msr, uint64_t value)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 536d1c8..6508aa1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -52,6 +52,7 @@
#include <asm/traps.h>
#include <asm/mc146818rtc.h>
#include <asm/mce.h>
+#include <asm/monitor.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/vpt.h>
#include <asm/hvm/support.h>
@@ -443,6 +444,8 @@ void hvm_do_resume(struct vcpu *v)
struct domain *d = v->domain;
struct hvm_ioreq_server *s;
+ ASSERT(v == current);
+
check_wakeup_from_wait();
if ( is_hvm_domain(d) )
@@ -468,6 +471,35 @@ void hvm_do_resume(struct vcpu *v)
}
}
+ if ( unlikely(d->arch.event_write_data) )
+ {
+ struct monitor_write_data *w = &d->arch.event_write_data[v->vcpu_id];
+
+ if ( w->do_write.msr )
+ {
+ hvm_msr_write_intercept(w->msr, w->value, 0);
+ w->do_write.msr = 0;
+ }
+
+ if ( w->do_write.cr0 )
+ {
+ hvm_set_cr0(w->cr0, 0);
+ w->do_write.cr0 = 0;
+ }
+
+ if ( w->do_write.cr4 )
+ {
+ hvm_set_cr4(w->cr4, 0);
+ w->do_write.cr4 = 0;
+ }
+
+ if ( w->do_write.cr3 )
+ {
+ hvm_set_cr3(w->cr3, 0);
+ w->do_write.cr3 = 0;
+ }
+ }
+
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
@@ -3099,13 +3131,13 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
switch ( cr )
{
case 0:
- return hvm_set_cr0(val);
+ return hvm_set_cr0(val, 1);
case 3:
- return hvm_set_cr3(val);
+ return hvm_set_cr3(val, 1);
case 4:
- return hvm_set_cr4(val);
+ return hvm_set_cr4(val, 1);
case 8:
vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
@@ -3202,12 +3234,13 @@ static void hvm_update_cr(struct vcpu *v, unsigned int
cr, unsigned long value)
hvm_update_guest_cr(v, cr);
}
-int hvm_set_cr0(unsigned long value)
+int hvm_set_cr0(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
struct page_info *page;
+ struct arch_domain *currad = &v->domain->arch;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -3237,6 +3270,22 @@ int hvm_set_cr0(unsigned long value)
goto gpf;
}
+ if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) &&
+ value != old_value )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR0, value, old_value) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr0 = 1;
+ currad->event_write_data[v->vcpu_id].cr0 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
+
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
@@ -3303,7 +3352,6 @@ int hvm_set_cr0(unsigned long value)
hvm_funcs.handle_cd(v, value);
hvm_update_cr(v, 0, value);
- hvm_event_crX(CR0, value, old_value);
if ( (value ^ old_value) & X86_CR0_PG ) {
if ( !nestedhvm_vmswitch_in_progress(v) &&
nestedhvm_vcpu_in_guestmode(v) )
@@ -3319,11 +3367,28 @@ int hvm_set_cr0(unsigned long value)
return X86EMUL_EXCEPTION;
}
-int hvm_set_cr3(unsigned long value)
+int hvm_set_cr3(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
struct page_info *page;
- unsigned long old;
+ unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
+ struct arch_domain *currad = &v->domain->arch;
+
+ if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) &&
+ value != old )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR3, value, old) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr3 = 1;
+ currad->event_write_data[v->vcpu_id].cr3 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
(value != v->arch.hvm_vcpu.guest_cr[3]) )
@@ -3341,10 +3406,8 @@ int hvm_set_cr3(unsigned long value)
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
- old=v->arch.hvm_vcpu.guest_cr[3];
v->arch.hvm_vcpu.guest_cr[3] = value;
paging_update_cr3(v);
- hvm_event_crX(CR3, value, old);
return X86EMUL_OKAY;
bad_cr3:
@@ -3353,10 +3416,11 @@ int hvm_set_cr3(unsigned long value)
return X86EMUL_UNHANDLEABLE;
}
-int hvm_set_cr4(unsigned long value)
+int hvm_set_cr4(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
unsigned long old_cr;
+ struct arch_domain *currad = &v->domain->arch;
if ( value & hvm_cr4_guest_reserved_bits(v, 0) )
{
@@ -3384,8 +3448,23 @@ int hvm_set_cr4(unsigned long value)
goto gpf;
}
+ if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) &&
+ value != old_cr )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR4, value, old_cr) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr4 = 1;
+ currad->event_write_data[v->vcpu_id].cr4 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
+
hvm_update_cr(v, 4, value);
- hvm_event_crX(CR4, value, old_cr);
/*
* Modifying CR4.{PSE,PAE,PGE,SMEP}, or clearing CR4.PCIDE
@@ -3849,7 +3928,7 @@ void hvm_task_switch(
goto out;
- if ( hvm_set_cr3(tss.cr3) )
+ if ( hvm_set_cr3(tss.cr3, 1) )
goto out;
regs->eip = tss.eip;
@@ -4551,12 +4630,14 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
goto out;
}
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
+ bool_t may_defer)
{
struct vcpu *v = current;
bool_t mtrr;
unsigned int edx, index;
int ret = X86EMUL_OKAY;
+ struct arch_domain *currad = ¤t->domain->arch;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
@@ -4564,7 +4645,18 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content)
hvm_cpuid(1, NULL, NULL, NULL, &edx);
mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
- hvm_event_msr(msr, msr_content);
+ if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ /* The actual write will occur in hvm_do_resume() (if permitted). */
+ currad->event_write_data[v->vcpu_id].do_write.msr = 1;
+ currad->event_write_data[v->vcpu_id].msr = msr;
+ currad->event_write_data[v->vcpu_id].value = msr_content;
+
+ hvm_event_msr(msr, msr_content);
+ return X86EMUL_OKAY;
+ }
switch ( msr )
{
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index be5797a..07e3cad 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -274,7 +274,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4;
- rc = hvm_set_cr4(n1vmcb->_cr4);
+ rc = hvm_set_cr4(n1vmcb->_cr4, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -283,7 +283,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]);
v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
- rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE);
+ rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
@@ -309,7 +309,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.guest_table = pagetable_null();
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
}
- rc = hvm_set_cr3(n1vmcb->_cr3);
+ rc = hvm_set_cr3(n1vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
@@ -534,7 +534,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
- rc = hvm_set_cr4(ns_vmcb->_cr4);
+ rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -542,7 +542,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
- rc = hvm_set_cr0(cr0);
+ rc = hvm_set_cr0(cr0, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
@@ -558,7 +558,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else if (paging_mode_hap(v->domain)) {
@@ -570,7 +570,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
* we assume it intercepts page faults.
*/
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else {
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a02f983..5e39b88 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1945,7 +1945,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
return;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- rc = hvm_msr_write_intercept(regs->ecx, msr_content);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_content, 1);
}
if ( rc == X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index fc29b89..394feca 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2010,9 +2010,15 @@ static int vmx_cr_access(unsigned long
exit_qualification)
}
case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
- curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+ unsigned long value = old & ~X86_CR0_TS;
+
+ /*
+ * Special case unlikely to be interesting to a MEM_ACCESS_DENY-capable
+ * application, so the hvm_event_crX() return value is ignored for now.
+ */
+ hvm_event_crX(CR0, value, old);
+ curr->arch.hvm_vcpu.guest_cr[0] = value;
vmx_update_guest_cr(curr, 0);
- hvm_event_crX(CR0, curr->arch.hvm_vcpu.guest_cr[0], old);
HVMTRACE_0D(CLTS);
break;
}
@@ -2024,7 +2030,7 @@ static int vmx_cr_access(unsigned long exit_qualification)
(VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
- return hvm_set_cr0(value);
+ return hvm_set_cr0(value, 1);
}
default:
BUG();
@@ -3035,7 +3041,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
uint64_t msr_content;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
+ if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) ==
X86EMUL_OKAY )
update_guest_eip(); /* Safe: WRMSR */
break;
}
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index ac6e3b3..52bf39c 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1048,15 +1048,16 @@ static void load_shadow_guest_state(struct vcpu *v)
nvcpu->guest_cr[0] = __get_vvmcs(vvmcs, CR0_READ_SHADOW);
nvcpu->guest_cr[4] = __get_vvmcs(vvmcs, CR4_READ_SHADOW);
- hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0));
- hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
- hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
+ hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0), 1);
+ hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4), 1);
+ hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_ENTRY_CONTROLS);
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, GUEST_PAT));
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
GUEST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, GUEST_PERF_GLOBAL_CTRL), 0);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
@@ -1249,15 +1250,16 @@ static void load_vvmcs_host_state(struct vcpu *v)
__vmwrite(vmcs_h2g_field[i].guest_field, r);
}
- hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0));
- hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
- hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
+ hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0), 1);
+ hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4), 1);
+ hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_EXIT_CONTROLS);
if ( control & VM_EXIT_LOAD_HOST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, HOST_PAT));
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
HOST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, HOST_PERF_GLOBAL_CTRL), 1);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 5d40d2c..bfa2430 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1417,6 +1417,35 @@ static void p2m_vm_event_fill_regs(vm_event_request_t
*req)
void p2m_mem_access_emulate_check(struct vcpu *v,
const vm_event_response_t *rsp)
{
+ if ( rsp->flags & MEM_ACCESS_DENY )
+ {
+ struct monitor_write_data *w =
+ &v->domain->arch.event_write_data[v->vcpu_id];
+
+ ASSERT(v->domain->arch.event_write_data != NULL);
+
+ switch ( rsp->reason )
+ {
+ case VM_EVENT_REASON_MOV_TO_MSR:
+ w->do_write.msr = 0;
+ break;
+ case VM_EVENT_REASON_WRITE_CTRLREG:
+ switch ( rsp->u.write_ctrlreg.index )
+ {
+ case VM_EVENT_X86_CR0:
+ w->do_write.cr0 = 0;
+ break;
+ case VM_EVENT_X86_CR3:
+ w->do_write.cr3 = 0;
+ break;
+ case VM_EVENT_X86_CR4:
+ w->do_write.cr4 = 0;
+ break;
+ }
+ break;
+ }
+ }
+
/* Mark vcpu for skipping one instruction upon rescheduling. */
if ( rsp->flags & MEM_ACCESS_EMULATE )
{
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 11438da..e9ac776 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -79,6 +79,10 @@ static int vm_event_enable(
}
}
+ if ( !d->arch.event_write_data )
+ d->arch.event_write_data = xzalloc_array(struct monitor_write_data,
+ d->max_vcpus);
+
rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
&ved->ring_page);
if ( rc < 0 )
@@ -410,6 +414,8 @@ void vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
#ifdef HAS_MEM_ACCESS
case VM_EVENT_REASON_MEM_ACCESS:
+ case VM_EVENT_REASON_MOV_TO_MSR:
+ case VM_EVENT_REASON_WRITE_CTRLREG:
mem_access_resume(v, &rsp);
break;
#endif
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index f712caa..8990277 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -249,6 +249,21 @@ struct pv_domain
struct mapcache_domain mapcache;
};
+struct monitor_write_data {
+ struct {
+ unsigned int msr : 1;
+ unsigned int cr0 : 1;
+ unsigned int cr3 : 1;
+ unsigned int cr4 : 1;
+ } do_write;
+
+ uint32_t msr;
+ uint64_t value;
+ uint64_t cr0;
+ uint64_t cr3;
+ uint64_t cr4;
+};
+
struct arch_domain
{
struct page_info *perdomain_l3_pg;
@@ -359,6 +374,8 @@ struct arch_domain
/* Mem_access emulation control */
bool_t mem_access_emulate_enabled;
+
+ struct monitor_write_data *event_write_data;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
@@ -513,7 +530,6 @@ struct arch_vcpu
unsigned long eip;
struct vm_event_emul_read_data *emul_read_data;
} vm_event;
-
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h
index ab5abd0..c082c20 100644
--- a/xen/include/asm-x86/hvm/event.h
+++ b/xen/include/asm-x86/hvm/event.h
@@ -18,8 +18,13 @@
#ifndef __ASM_X86_HVM_EVENT_H__
#define __ASM_X86_HVM_EVENT_H__
-/* Called for current VCPU on crX/MSR changes by guest */
-void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old);
+/*
+ * Called for current VCPU on crX/MSR changes by guest.
+ * The event might not fire if the client has subscribed to it in onchangeonly
+ * mode, hence the bool_t return type for control register write events.
+ */
+bool_t hvm_event_cr(unsigned int index, unsigned long value,
+ unsigned long old);
#define hvm_event_crX(what, new, old) \
hvm_event_cr(VM_EVENT_X86_##what, new, old)
void hvm_event_msr(unsigned int msr, uint64_t value);
diff --git a/xen/include/asm-x86/hvm/support.h
b/xen/include/asm-x86/hvm/support.h
index 05ef5c5..95d3bb2 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -124,11 +124,12 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long
value);
/* These functions all return X86EMUL return codes. */
int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value);
-int hvm_set_cr3(unsigned long value);
-int hvm_set_cr4(unsigned long value);
+int hvm_set_cr0(unsigned long value, bool_t may_defer);
+int hvm_set_cr3(unsigned long value, bool_t may_defer);
+int hvm_set_cr4(unsigned long value, bool_t may_defer);
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
+int hvm_msr_write_intercept(
+ unsigned int msr, uint64_t msr_content, bool_t may_defer);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index f0da008..bc97334 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -158,6 +158,11 @@ struct vm_event_regs_x86 {
* MEM_ACCESS_EMULATE_NOWRITE.
*/
#define MEM_ACCESS_SET_EMUL_READ_DATA (1 << 8)
+ /*
+ * Deny completion of the operation that triggered the event.
+ * Currently only useful for MSR, CR0, CR3 and CR4 write events.
+ */
+#define MEM_ACCESS_DENY (1 << 9)
struct vm_event_mem_access {
uint64_t gfn;
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |