|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/5] xen/vm_event: Deny MSR writes if refused by vm_event reply
Deny MSR writes if a vm_client subscribed to mov_to_msr events
forbids them.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 2 +-
xen/arch/x86/hvm/hvm.c | 24 ++++++++++++++++++++++--
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/arch/x86/hvm/vmx/vvmx.c | 6 ++++--
xen/arch/x86/mm/p2m.c | 3 +++
xen/common/vm_event.c | 1 +
xen/include/asm-x86/domain.h | 5 +++++
xen/include/asm-x86/hvm/support.h | 3 ++-
xen/include/public/vm_event.h | 5 +++++
10 files changed, 45 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 0058b15..566eee7 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1293,7 +1293,7 @@ static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_write_intercept(reg, val);
+ return hvm_msr_write_intercept(reg, val, 1);
}
static int hvmemul_wbinvd(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8ad03c6..ed0ec9a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -464,6 +464,15 @@ void hvm_do_resume(struct vcpu *v)
}
}
+ ASSERT(v == current);
+
+ if ( v->arch.msr_write.do_write )
+ {
+ hvm_msr_write_intercept(v->arch.msr_write.msr,
+ v->arch.msr_write.value, 0);
+ v->arch.msr_write.do_write = 0;
+ }
+
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
@@ -4517,12 +4526,14 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
goto out;
}
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
+ bool_t event_only)
{
struct vcpu *v = current;
bool_t mtrr;
unsigned int edx, index;
int ret = X86EMUL_OKAY;
+ struct arch_domain *currad = ¤t->domain->arch;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
@@ -4530,7 +4541,16 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content)
hvm_cpuid(1, NULL, NULL, NULL, &edx);
mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
- hvm_event_msr(msr, msr_content);
+ if ( event_only && unlikely(currad->monitor.mov_to_msr_enabled) )
+ {
+ /* The actual write will occur in hvm_do_resume() (if permitted). */
+ v->arch.msr_write.do_write = 1;
+ v->arch.msr_write.msr = msr;
+ v->arch.msr_write.value = msr_content;
+
+ hvm_event_msr(msr, msr_content);
+ return X86EMUL_OKAY;
+ }
switch ( msr )
{
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 6734fb6..b9b4791 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1945,7 +1945,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
return;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- rc = hvm_msr_write_intercept(regs->ecx, msr_content);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_content, 1);
}
if ( rc == X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 74f563f..562507a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3044,7 +3044,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
uint64_t msr_content;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
+ if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) ==
X86EMUL_OKAY )
update_guest_eip(); /* Safe: WRMSR */
break;
}
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index ac6e3b3..01726d5 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1056,7 +1056,8 @@ static void load_shadow_guest_state(struct vcpu *v)
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, GUEST_PAT));
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
GUEST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, GUEST_PERF_GLOBAL_CTRL), 1);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
@@ -1257,7 +1258,8 @@ static void load_vvmcs_host_state(struct vcpu *v)
if ( control & VM_EXIT_LOAD_HOST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, HOST_PAT));
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
HOST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, HOST_PERF_GLOBAL_CTRL), 1);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0b2f9a6..1939d27 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1417,6 +1417,9 @@ static void p2m_vm_event_fill_regs(vm_event_request_t
*req)
void p2m_mem_access_emulate_check(struct vcpu *v,
const vm_event_response_t *rsp)
{
+ if ( rsp->flags & MEM_ACCESS_SKIP_MSR_WRITE )
+ v->arch.msr_write.do_write = 0;
+
/* Mark vcpu for skipping one instruction upon rescheduling. */
if ( rsp->flags & MEM_ACCESS_EMULATE )
{
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 120a78a..844e892 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -387,6 +387,7 @@ void vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
#ifdef HAS_MEM_ACCESS
case VM_EVENT_REASON_MEM_ACCESS:
+ case VM_EVENT_REASON_MOV_TO_MSR:
mem_access_resume(v, &rsp);
break;
#endif
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 682ccc5..e70e2bc 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -518,6 +518,11 @@ struct arch_vcpu
struct vm_event_emul_read_data emul_read_data;
} vm_event;
+ struct {
+ bool_t do_write;
+ uint64_t msr;
+ uint64_t value;
+ } msr_write;
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
diff --git a/xen/include/asm-x86/hvm/support.h
b/xen/include/asm-x86/hvm/support.h
index 05ef5c5..f55373e 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -128,7 +128,8 @@ int hvm_set_cr0(unsigned long value);
int hvm_set_cr3(unsigned long value);
int hvm_set_cr4(unsigned long value);
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
+int hvm_msr_write_intercept(
+ unsigned int msr, uint64_t msr_content, bool_t event_only);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 2913a85..36190cc 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -158,6 +158,11 @@ struct vm_event_regs_x86 {
* MEM_ACCESS_EMULATE_NOWRITE.
*/
#define MEM_ACCESS_SET_EMUL_READ_DATA (1 << 8)
+ /*
+ * If mov_to_msr events are enabled, setting this flag in the vm_event
+ * response denies the MSR write that triggered the event.
+ */
+#define MEM_ACCESS_SKIP_MSR_WRITE (1 << 9)
struct vm_event_mem_access {
uint64_t gfn;
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |