|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2 3/3] xen/vm_event: Deny register writes if refused by vm_event reply
Deny register writes if a vm_client subscribed to mov_to_msr events
forbids them. Currently supported for MSR, CR0, CR3 and CR4 events.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
Changes since V1:
- Renamed MEM_ACCESS_SKIP_MSR_WRITE to MEM_ACCESS_DENY, to be used
as a generic deny flag for whatever write operation triggered the
event.
- Write events for CR0, CR3 and CR4 are now pre-write.
---
xen/arch/x86/domain.c | 2 +
xen/arch/x86/hvm/emulate.c | 8 +--
xen/arch/x86/hvm/event.c | 5 +-
xen/arch/x86/hvm/hvm.c | 120 ++++++++++++++++++++++++++++++++-----
xen/arch/x86/hvm/svm/nestedsvm.c | 14 ++---
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 6 +-
xen/arch/x86/hvm/vmx/vvmx.c | 18 +++---
xen/arch/x86/mm/p2m.c | 29 +++++++++
xen/common/vm_event.c | 6 ++
xen/include/asm-x86/domain.h | 18 +++++-
xen/include/asm-x86/hvm/event.h | 3 +-
xen/include/asm-x86/hvm/support.h | 9 +--
xen/include/public/vm_event.h | 5 ++
14 files changed, 201 insertions(+), 44 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 95eb190..3b706b0 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -659,6 +659,8 @@ void arch_domain_destroy(struct domain *d)
cleanup_domain_irq_mapping(d);
psr_free_rmid(d);
+
+ xfree(d->arch.event_write_data);
}
void arch_domain_shutdown(struct domain *d)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 2f7081d..f59bc7f 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1346,14 +1346,14 @@ static int hvmemul_write_cr(
switch ( reg )
{
case 0:
- return hvm_set_cr0(val);
+ return hvm_set_cr0(val, 1);
case 2:
current->arch.hvm_vcpu.guest_cr[2] = val;
return X86EMUL_OKAY;
case 3:
- return hvm_set_cr3(val);
+ return hvm_set_cr3(val, 1);
case 4:
- return hvm_set_cr4(val);
+ return hvm_set_cr4(val, 1);
default:
break;
}
@@ -1374,7 +1374,7 @@ static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_write_intercept(reg, val);
+ return hvm_msr_write_intercept(reg, val, 1);
}
static int hvmemul_wbinvd(
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
index 5df0e31..9ac8583 100644
--- a/xen/arch/x86/hvm/event.c
+++ b/xen/arch/x86/hvm/event.c
@@ -90,7 +90,7 @@ static int hvm_event_traps(uint8_t sync, vm_event_request_t
*req)
return 1;
}
-void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
+bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
{
struct arch_domain *currad = ¤t->domain->arch;
unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
@@ -109,7 +109,10 @@ void hvm_event_cr(unsigned int index, unsigned long value,
unsigned long old)
hvm_event_traps(currad->monitor.write_ctrlreg_sync & ctrlreg_bitmask,
&req);
+ return 1;
}
+
+ return 0;
}
void hvm_event_msr(unsigned int msr, uint64_t value)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 02a3df8..32ebfe1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -52,6 +52,7 @@
#include <asm/traps.h>
#include <asm/mc146818rtc.h>
#include <asm/mce.h>
+#include <asm/monitor.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/vpt.h>
#include <asm/hvm/support.h>
@@ -468,6 +469,37 @@ void hvm_do_resume(struct vcpu *v)
}
}
+ ASSERT(v == current);
+
+ if ( d->arch.event_write_data )
+ {
+ struct monitor_write_data *w = &d->arch.event_write_data[v->vcpu_id];
+
+ if ( w->do_write.msr )
+ {
+ hvm_msr_write_intercept(w->msr, w->value, 0);
+ w->do_write.msr = 0;
+ }
+
+ if ( w->do_write.cr0 )
+ {
+ hvm_set_cr0(w->cr0, 0);
+ w->do_write.cr0 = 0;
+ }
+
+ if ( w->do_write.cr3 )
+ {
+ hvm_set_cr3(w->cr3, 0);
+ w->do_write.cr3 = 0;
+ }
+
+ if ( w->do_write.cr4 )
+ {
+ hvm_set_cr4(w->cr4, 0);
+ w->do_write.cr4 = 0;
+ }
+ }
+
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
@@ -3086,13 +3118,13 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
switch ( cr )
{
case 0:
- return hvm_set_cr0(val);
+ return hvm_set_cr0(val, 1);
case 3:
- return hvm_set_cr3(val);
+ return hvm_set_cr3(val, 1);
case 4:
- return hvm_set_cr4(val);
+ return hvm_set_cr4(val, 1);
case 8:
vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
@@ -3189,12 +3221,13 @@ static void hvm_update_cr(struct vcpu *v, unsigned int
cr, unsigned long value)
hvm_update_guest_cr(v, cr);
}
-int hvm_set_cr0(unsigned long value)
+int hvm_set_cr0(unsigned long value, bool_t event_only)
{
struct vcpu *v = current;
struct domain *d = v->domain;
unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
struct page_info *page;
+ struct arch_domain *currad = &v->domain->arch;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -3224,6 +3257,22 @@ int hvm_set_cr0(unsigned long value)
goto gpf;
}
+ if ( event_only && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) &&
+ value != old_value )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR0, value, old_value) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr0 = 1;
+ currad->event_write_data[v->vcpu_id].cr0 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
+
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
@@ -3290,7 +3339,6 @@ int hvm_set_cr0(unsigned long value)
hvm_funcs.handle_cd(v, value);
hvm_update_cr(v, 0, value);
- hvm_event_crX(CR0, value, old_value);
if ( (value ^ old_value) & X86_CR0_PG ) {
if ( !nestedhvm_vmswitch_in_progress(v) &&
nestedhvm_vcpu_in_guestmode(v) )
@@ -3306,11 +3354,28 @@ int hvm_set_cr0(unsigned long value)
return X86EMUL_EXCEPTION;
}
-int hvm_set_cr3(unsigned long value)
+int hvm_set_cr3(unsigned long value, bool_t event_only)
{
struct vcpu *v = current;
struct page_info *page;
- unsigned long old;
+ unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
+ struct arch_domain *currad = &v->domain->arch;
+
+ if ( event_only && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) &&
+ value != old )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR3, value, old) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr3 = 1;
+ currad->event_write_data[v->vcpu_id].cr3 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
(value != v->arch.hvm_vcpu.guest_cr[3]) )
@@ -3328,10 +3393,8 @@ int hvm_set_cr3(unsigned long value)
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
- old=v->arch.hvm_vcpu.guest_cr[3];
v->arch.hvm_vcpu.guest_cr[3] = value;
paging_update_cr3(v);
- hvm_event_crX(CR3, value, old);
return X86EMUL_OKAY;
bad_cr3:
@@ -3340,10 +3403,11 @@ int hvm_set_cr3(unsigned long value)
return X86EMUL_UNHANDLEABLE;
}
-int hvm_set_cr4(unsigned long value)
+int hvm_set_cr4(unsigned long value, bool_t event_only)
{
struct vcpu *v = current;
unsigned long old_cr;
+ struct arch_domain *currad = &v->domain->arch;
if ( value & hvm_cr4_guest_reserved_bits(v, 0) )
{
@@ -3371,8 +3435,23 @@ int hvm_set_cr4(unsigned long value)
goto gpf;
}
+ if ( event_only && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) &&
+ value != old_cr )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR4, value, old_cr) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr4 = 1;
+ currad->event_write_data[v->vcpu_id].cr4 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
+
hvm_update_cr(v, 4, value);
- hvm_event_crX(CR4, value, old_cr);
/*
* Modifying CR4.{PSE,PAE,PGE,SMEP}, or clearing CR4.PCIDE
@@ -3836,7 +3915,7 @@ void hvm_task_switch(
goto out;
- if ( hvm_set_cr3(tss.cr3) )
+ if ( hvm_set_cr3(tss.cr3, 1) )
goto out;
regs->eip = tss.eip;
@@ -4538,12 +4617,14 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
goto out;
}
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
+ bool_t event_only)
{
struct vcpu *v = current;
bool_t mtrr;
unsigned int edx, index;
int ret = X86EMUL_OKAY;
+ struct arch_domain *currad = ¤t->domain->arch;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
@@ -4551,7 +4632,18 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content)
hvm_cpuid(1, NULL, NULL, NULL, &edx);
mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
- hvm_event_msr(msr, msr_content);
+ if ( event_only && unlikely(currad->monitor.mov_to_msr_enabled) )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ /* The actual write will occur in hvm_do_resume() (if permitted). */
+ currad->event_write_data[v->vcpu_id].do_write.msr = 1;
+ currad->event_write_data[v->vcpu_id].msr = msr;
+ currad->event_write_data[v->vcpu_id].value = msr_content;
+
+ hvm_event_msr(msr, msr_content);
+ return X86EMUL_OKAY;
+ }
switch ( msr )
{
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index be5797a..07e3cad 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -274,7 +274,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4;
- rc = hvm_set_cr4(n1vmcb->_cr4);
+ rc = hvm_set_cr4(n1vmcb->_cr4, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -283,7 +283,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]);
v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
- rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE);
+ rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
@@ -309,7 +309,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.guest_table = pagetable_null();
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
}
- rc = hvm_set_cr3(n1vmcb->_cr3);
+ rc = hvm_set_cr3(n1vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
@@ -534,7 +534,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
- rc = hvm_set_cr4(ns_vmcb->_cr4);
+ rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -542,7 +542,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
- rc = hvm_set_cr0(cr0);
+ rc = hvm_set_cr0(cr0, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
@@ -558,7 +558,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else if (paging_mode_hap(v->domain)) {
@@ -570,7 +570,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
* we assume it intercepts page faults.
*/
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else {
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 6734fb6..b9b4791 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1945,7 +1945,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
return;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- rc = hvm_msr_write_intercept(regs->ecx, msr_content);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_content, 1);
}
if ( rc == X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index af257db..a8757da 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2010,9 +2010,9 @@ static int vmx_cr_access(unsigned long exit_qualification)
}
case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
+ hvm_event_crX(CR0, curr->arch.hvm_vcpu.guest_cr[0], old);
curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
vmx_update_guest_cr(curr, 0);
- hvm_event_crX(CR0, curr->arch.hvm_vcpu.guest_cr[0], old);
HVMTRACE_0D(CLTS);
break;
}
@@ -2024,7 +2024,7 @@ static int vmx_cr_access(unsigned long exit_qualification)
(VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
- return hvm_set_cr0(value);
+ return hvm_set_cr0(value, 1);
}
default:
BUG();
@@ -3046,7 +3046,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
uint64_t msr_content;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
+ if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) ==
X86EMUL_OKAY )
update_guest_eip(); /* Safe: WRMSR */
break;
}
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index ac6e3b3..3184af2 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1048,15 +1048,16 @@ static void load_shadow_guest_state(struct vcpu *v)
nvcpu->guest_cr[0] = __get_vvmcs(vvmcs, CR0_READ_SHADOW);
nvcpu->guest_cr[4] = __get_vvmcs(vvmcs, CR4_READ_SHADOW);
- hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0));
- hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
- hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
+ hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0), 1);
+ hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4), 1);
+ hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_ENTRY_CONTROLS);
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, GUEST_PAT));
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
GUEST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, GUEST_PERF_GLOBAL_CTRL), 1);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
@@ -1249,15 +1250,16 @@ static void load_vvmcs_host_state(struct vcpu *v)
__vmwrite(vmcs_h2g_field[i].guest_field, r);
}
- hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0));
- hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
- hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
+ hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0), 1);
+ hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4), 1);
+ hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_EXIT_CONTROLS);
if ( control & VM_EXIT_LOAD_HOST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, HOST_PAT));
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
HOST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, HOST_PERF_GLOBAL_CTRL), 1);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0ab74bc..451b674 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1417,6 +1417,35 @@ static void p2m_vm_event_fill_regs(vm_event_request_t
*req)
void p2m_mem_access_emulate_check(struct vcpu *v,
const vm_event_response_t *rsp)
{
+ ASSERT(v->domain->arch.event_write_data != NULL);
+
+ if ( rsp->flags & MEM_ACCESS_DENY )
+ {
+ struct monitor_write_data *w =
+ &v->domain->arch.event_write_data[v->vcpu_id];
+
+ switch ( rsp->reason )
+ {
+ case VM_EVENT_REASON_MOV_TO_MSR:
+ w->do_write.msr = 0;
+ break;
+ case VM_EVENT_REASON_WRITE_CTRLREG:
+ switch ( rsp->u.write_ctrlreg.index )
+ {
+ case VM_EVENT_X86_CR0:
+ w->do_write.cr0 = 0;
+ break;
+ case VM_EVENT_X86_CR3:
+ w->do_write.cr3 = 0;
+ break;
+ case VM_EVENT_X86_CR4:
+ w->do_write.cr4 = 0;
+ break;
+ }
+ break;
+ }
+ }
+
/* Mark vcpu for skipping one instruction upon rescheduling. */
if ( rsp->flags & MEM_ACCESS_EMULATE )
{
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index d635b36..c1a1573 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -79,6 +79,10 @@ static int vm_event_enable(
}
}
+ if ( !d->arch.event_write_data )
+ d->arch.event_write_data = xzalloc_array(struct monitor_write_data,
+ d->max_vcpus);
+
rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
&ved->ring_page);
if ( rc < 0 )
@@ -410,6 +414,8 @@ void vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
#ifdef HAS_MEM_ACCESS
case VM_EVENT_REASON_MEM_ACCESS:
+ case VM_EVENT_REASON_MOV_TO_MSR:
+ case VM_EVENT_REASON_WRITE_CTRLREG:
mem_access_resume(v, &rsp);
break;
#endif
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index a9af5f4..fbfc5c4 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -250,6 +250,21 @@ struct pv_domain
struct mapcache_domain mapcache;
};
+struct monitor_write_data {
+ struct {
+ uint8_t msr : 1;
+ uint8_t cr0 : 1;
+ uint8_t cr3 : 1;
+ uint8_t cr4 : 1;
+ } do_write;
+
+ uint64_t msr;
+ uint64_t value;
+ uint64_t cr0;
+ uint64_t cr3;
+ uint64_t cr4;
+};
+
struct arch_domain
{
struct page_info *perdomain_l3_pg;
@@ -355,6 +370,8 @@ struct arch_domain
/* Mem_access emulation control */
bool_t mem_access_emulate_enabled;
+
+ struct monitor_write_data *event_write_data;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
@@ -509,7 +526,6 @@ struct arch_vcpu
unsigned long eip;
struct vm_event_emul_read_data *emul_read_data;
} vm_event;
-
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h
index 8d93f6a..d9b3581 100644
--- a/xen/include/asm-x86/hvm/event.h
+++ b/xen/include/asm-x86/hvm/event.h
@@ -19,7 +19,8 @@
#define __ASM_X86_HVM_EVENT_H__
/* Called for current VCPU on crX/MSR changes by guest */
-void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old);
+bool_t hvm_event_cr(unsigned int index, unsigned long value,
+ unsigned long old);
#define hvm_event_crX(what, new, old) \
hvm_event_cr(VM_EVENT_X86_##what, new, old)
void hvm_event_msr(unsigned int msr, uint64_t value);
diff --git a/xen/include/asm-x86/hvm/support.h
b/xen/include/asm-x86/hvm/support.h
index 05ef5c5..9c06fd3 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -124,11 +124,12 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long
value);
/* These functions all return X86EMUL return codes. */
int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value);
-int hvm_set_cr3(unsigned long value);
-int hvm_set_cr4(unsigned long value);
+int hvm_set_cr0(unsigned long value, bool_t event_only);
+int hvm_set_cr3(unsigned long value, bool_t event_only);
+int hvm_set_cr4(unsigned long value, bool_t event_only);
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
+int hvm_msr_write_intercept(
+ unsigned int msr, uint64_t msr_content, bool_t event_only);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 8c39549..e96491d 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -158,6 +158,11 @@ struct vm_event_regs_x86 {
* MEM_ACCESS_EMULATE_NOWRITE.
*/
#define MEM_ACCESS_SET_EMUL_READ_DATA (1 << 8)
+ /*
+ * Deny completion of the operation that triggered the event.
+ * Currently only useful for MSR write events.
+ */
+#define MEM_ACCESS_DENY (1 << 9)
struct vm_event_mem_access {
uint64_t gfn;
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |