[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V4 3/3] xen/vm_event: Deny register writes if refused by vm_event reply
Deny register writes if a vm_client subscribed to mov_to_msr or control register write events forbids them. Currently supported for MSR, CR0, CR3 and CR4 events. Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- Changes since V3: - Renamed MEM_ACCESS_FLAG_DENY to VM_EVENT_FLAG_DENY (and fixed the bit shift appropriately). - Moved the DENY vm_event response logic from p2m.c to newly added dedicated files for vm_event handling, as suggested by Tamas Lengyel. --- MAINTAINERS | 1 + xen/arch/x86/Makefile | 1 + xen/arch/x86/domain.c | 2 + xen/arch/x86/hvm/emulate.c | 8 +-- xen/arch/x86/hvm/event.c | 5 +- xen/arch/x86/hvm/hvm.c | 118 ++++++++++++++++++++++++++++++++----- xen/arch/x86/hvm/svm/nestedsvm.c | 14 ++--- xen/arch/x86/hvm/svm/svm.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 15 +++-- xen/arch/x86/hvm/vmx/vvmx.c | 18 +++--- xen/arch/x86/vm_event.c | 33 +++++++++++ xen/common/vm_event.c | 9 +++ xen/include/asm-arm/vm_event.h | 12 ++++ xen/include/asm-x86/domain.h | 18 +++++- xen/include/asm-x86/hvm/event.h | 9 ++- xen/include/asm-x86/hvm/support.h | 9 +-- xen/include/asm-x86/vm_event.h | 8 +++ xen/include/public/vm_event.h | 6 ++ 18 files changed, 242 insertions(+), 46 deletions(-) create mode 100644 xen/arch/x86/vm_event.c create mode 100644 xen/include/asm-arm/vm_event.h create mode 100644 xen/include/asm-x86/vm_event.h diff --git a/MAINTAINERS b/MAINTAINERS index 6b1068e..59c0822 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -383,6 +383,7 @@ F: xen/common/vm_event.c F: xen/common/mem_access.c F: xen/arch/x86/hvm/event.c F: xen/arch/x86/monitor.c +F: xen/arch/x86/vm_event.c XENTRACE M: George Dunlap <george.dunlap@xxxxxxxxxxxxx> diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index 37e547c..5f24951 100644 --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -60,6 +60,7 @@ obj-y += machine_kexec.o obj-y += crash.o obj-y += tboot.o obj-y += hpet.o +obj-y += vm_event.o obj-y += xstate.o obj-$(crash_debug) += gdbstub.o diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index a8fe046..c688ab9 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -678,6 +678,8 @@ void arch_domain_destroy(struct domain *d) cleanup_domain_irq_mapping(d); psr_free_rmid(d); + + xfree(d->arch.event_write_data); } void arch_domain_shutdown(struct domain *d) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index c6ccb1f..780adb4 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1389,14 +1389,14 @@ static int hvmemul_write_cr( switch ( reg ) { case 0: - return hvm_set_cr0(val); + return hvm_set_cr0(val, 1); case 2: current->arch.hvm_vcpu.guest_cr[2] = val; return X86EMUL_OKAY; case 3: - return hvm_set_cr3(val); + return hvm_set_cr3(val, 1); case 4: - return hvm_set_cr4(val); + return hvm_set_cr4(val, 1); default: break; } @@ -1417,7 +1417,7 @@ static int hvmemul_write_msr( uint64_t val, struct x86_emulate_ctxt *ctxt) { - return hvm_msr_write_intercept(reg, val); + return hvm_msr_write_intercept(reg, val, 1); } static int hvmemul_wbinvd( diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c index 17638ea..042e583 100644 --- a/xen/arch/x86/hvm/event.c +++ b/xen/arch/x86/hvm/event.c @@ -90,7 +90,7 @@ static int hvm_event_traps(uint8_t sync, vm_event_request_t *req) return 1; } -void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old) +bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old) { struct arch_domain *currad = ¤t->domain->arch; unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index); @@ -109,7 +109,10 @@ void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old) hvm_event_traps(currad->monitor.write_ctrlreg_sync & ctrlreg_bitmask, &req); + return 1; } + + return 0; } void hvm_event_msr(unsigned int msr, uint64_t value) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 536d1c8..abfca33 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -52,6 +52,7 @@ #include <asm/traps.h> #include <asm/mc146818rtc.h> #include <asm/mce.h> +#include <asm/monitor.h> #include <asm/hvm/hvm.h> #include <asm/hvm/vpt.h> #include <asm/hvm/support.h> @@ -468,6 +469,35 @@ void hvm_do_resume(struct vcpu *v) } } + if ( unlikely(d->arch.event_write_data) ) + { + struct monitor_write_data *w = &d->arch.event_write_data[v->vcpu_id]; + + if ( w->do_write.msr ) + { + hvm_msr_write_intercept(w->msr, w->value, 0); + w->do_write.msr = 0; + } + + if ( w->do_write.cr0 ) + { + hvm_set_cr0(w->cr0, 0); + w->do_write.cr0 = 0; + } + + if ( w->do_write.cr4 ) + { + hvm_set_cr4(w->cr4, 0); + w->do_write.cr4 = 0; + } + + if ( w->do_write.cr3 ) + { + hvm_set_cr3(w->cr3, 0); + w->do_write.cr3 = 0; + } + } + /* Inject pending hw/sw trap */ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { @@ -3099,13 +3129,13 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr) switch ( cr ) { case 0: - return hvm_set_cr0(val); + return hvm_set_cr0(val, 1); case 3: - return hvm_set_cr3(val); + return hvm_set_cr3(val, 1); case 4: - return hvm_set_cr4(val); + return hvm_set_cr4(val, 1); case 8: vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4)); @@ -3202,12 +3232,13 @@ static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value) hvm_update_guest_cr(v, cr); } -int hvm_set_cr0(unsigned long value) +int hvm_set_cr0(unsigned long value, bool_t may_defer) { struct vcpu *v = current; struct domain *d = v->domain; unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0]; struct page_info *page; + struct arch_domain *currad = &v->domain->arch; HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value); @@ -3237,6 +3268,22 @@ int hvm_set_cr0(unsigned long value) goto gpf; } + if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled & + monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) && + value != old_value ) + { + ASSERT(currad->event_write_data != NULL); + + if ( hvm_event_crX(CR0, value, old_value) ) + { + /* The actual write will occur in hvm_do_resume(), if permitted. */ + currad->event_write_data[v->vcpu_id].do_write.cr0 = 1; + currad->event_write_data[v->vcpu_id].cr0 = value; + + return X86EMUL_OKAY; + } + } + if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) ) { if ( v->arch.hvm_vcpu.guest_efer & EFER_LME ) @@ -3303,7 +3350,6 @@ int hvm_set_cr0(unsigned long value) hvm_funcs.handle_cd(v, value); hvm_update_cr(v, 0, value); - hvm_event_crX(CR0, value, old_value); if ( (value ^ old_value) & X86_CR0_PG ) { if ( !nestedhvm_vmswitch_in_progress(v) && nestedhvm_vcpu_in_guestmode(v) ) @@ -3319,11 +3365,28 @@ int hvm_set_cr0(unsigned long value) return X86EMUL_EXCEPTION; } -int hvm_set_cr3(unsigned long value) +int hvm_set_cr3(unsigned long value, bool_t may_defer) { struct vcpu *v = current; struct page_info *page; - unsigned long old; + unsigned long old = v->arch.hvm_vcpu.guest_cr[3]; + struct arch_domain *currad = &v->domain->arch; + + if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled & + monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) && + value != old ) + { + ASSERT(currad->event_write_data != NULL); + + if ( hvm_event_crX(CR3, value, old) ) + { + /* The actual write will occur in hvm_do_resume(), if permitted. */ + currad->event_write_data[v->vcpu_id].do_write.cr3 = 1; + currad->event_write_data[v->vcpu_id].cr3 = value; + + return X86EMUL_OKAY; + } + } if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) && (value != v->arch.hvm_vcpu.guest_cr[3]) ) @@ -3341,10 +3404,8 @@ int hvm_set_cr3(unsigned long value) HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value); } - old=v->arch.hvm_vcpu.guest_cr[3]; v->arch.hvm_vcpu.guest_cr[3] = value; paging_update_cr3(v); - hvm_event_crX(CR3, value, old); return X86EMUL_OKAY; bad_cr3: @@ -3353,10 +3414,11 @@ int hvm_set_cr3(unsigned long value) return X86EMUL_UNHANDLEABLE; } -int hvm_set_cr4(unsigned long value) +int hvm_set_cr4(unsigned long value, bool_t may_defer) { struct vcpu *v = current; unsigned long old_cr; + struct arch_domain *currad = &v->domain->arch; if ( value & hvm_cr4_guest_reserved_bits(v, 0) ) { @@ -3384,8 +3446,23 @@ int hvm_set_cr4(unsigned long value) goto gpf; } + if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled & + monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) && + value != old_cr ) + { + ASSERT(currad->event_write_data != NULL); + + if ( hvm_event_crX(CR4, value, old_cr) ) + { + /* The actual write will occur in hvm_do_resume(), if permitted. */ + currad->event_write_data[v->vcpu_id].do_write.cr4 = 1; + currad->event_write_data[v->vcpu_id].cr4 = value; + + return X86EMUL_OKAY; + } + } + hvm_update_cr(v, 4, value); - hvm_event_crX(CR4, value, old_cr); /* * Modifying CR4.{PSE,PAE,PGE,SMEP}, or clearing CR4.PCIDE @@ -3849,7 +3926,7 @@ void hvm_task_switch( goto out; - if ( hvm_set_cr3(tss.cr3) ) + if ( hvm_set_cr3(tss.cr3, 1) ) goto out; regs->eip = tss.eip; @@ -4551,12 +4628,14 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) goto out; } -int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) +int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, + bool_t may_defer) { struct vcpu *v = current; bool_t mtrr; unsigned int edx, index; int ret = X86EMUL_OKAY; + struct arch_domain *currad = ¤t->domain->arch; HVMTRACE_3D(MSR_WRITE, msr, (uint32_t)msr_content, (uint32_t)(msr_content >> 32)); @@ -4564,7 +4643,18 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) hvm_cpuid(1, NULL, NULL, NULL, &edx); mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR)); - hvm_event_msr(msr, msr_content); + if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) ) + { + ASSERT(currad->event_write_data != NULL); + + /* The actual write will occur in hvm_do_resume() (if permitted). */ + currad->event_write_data[v->vcpu_id].do_write.msr = 1; + currad->event_write_data[v->vcpu_id].msr = msr; + currad->event_write_data[v->vcpu_id].value = msr_content; + + hvm_event_msr(msr, msr_content); + return X86EMUL_OKAY; + } switch ( msr ) { diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c index be5797a..07e3cad 100644 --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -274,7 +274,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) /* CR4 */ v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4; - rc = hvm_set_cr4(n1vmcb->_cr4); + rc = hvm_set_cr4(n1vmcb->_cr4, 1); if (rc != X86EMUL_OKAY) gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc); @@ -283,7 +283,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]); v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE; n1vmcb->rflags &= ~X86_EFLAGS_VM; - rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE); + rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1); if (rc != X86EMUL_OKAY) gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc); svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0]; @@ -309,7 +309,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) v->arch.guest_table = pagetable_null(); /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */ } - rc = hvm_set_cr3(n1vmcb->_cr3); + rc = hvm_set_cr3(n1vmcb->_cr3, 1); if (rc != X86EMUL_OKAY) gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc); @@ -534,7 +534,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) /* CR4 */ v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4; - rc = hvm_set_cr4(ns_vmcb->_cr4); + rc = hvm_set_cr4(ns_vmcb->_cr4, 1); if (rc != X86EMUL_OKAY) gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc); @@ -542,7 +542,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0]; cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb); v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0; - rc = hvm_set_cr0(cr0); + rc = hvm_set_cr0(cr0, 1); if (rc != X86EMUL_OKAY) gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc); @@ -558,7 +558,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb); /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */ - rc = hvm_set_cr3(ns_vmcb->_cr3); + rc = hvm_set_cr3(ns_vmcb->_cr3, 1); if (rc != X86EMUL_OKAY) gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc); } else if (paging_mode_hap(v->domain)) { @@ -570,7 +570,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) * we assume it intercepts page faults. */ /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */ - rc = hvm_set_cr3(ns_vmcb->_cr3); + rc = hvm_set_cr3(ns_vmcb->_cr3, 1); if (rc != X86EMUL_OKAY) gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc); } else { diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index a02f983..5e39b88 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1945,7 +1945,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs) if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 ) return; msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax; - rc = hvm_msr_write_intercept(regs->ecx, msr_content); + rc = hvm_msr_write_intercept(regs->ecx, msr_content, 1); } if ( rc == X86EMUL_OKAY ) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index fc29b89..96c55ec 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2010,9 +2010,16 @@ static int vmx_cr_access(unsigned long exit_qualification) } case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: { unsigned long old = curr->arch.hvm_vcpu.guest_cr[0]; - curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; + unsigned long value = old & ~X86_CR0_TS; + + /* + * Special case unlikely to be interesting to a + * VM_EVENT_FLAG_DENY-capable application, so the hvm_event_crX() + * return value is ignored for now. + */ + hvm_event_crX(CR0, value, old); + curr->arch.hvm_vcpu.guest_cr[0] = value; vmx_update_guest_cr(curr, 0); - hvm_event_crX(CR0, curr->arch.hvm_vcpu.guest_cr[0], old); HVMTRACE_0D(CLTS); break; } @@ -2024,7 +2031,7 @@ static int vmx_cr_access(unsigned long exit_qualification) (VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) & (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)); HVMTRACE_LONG_1D(LMSW, value); - return hvm_set_cr0(value); + return hvm_set_cr0(value, 1); } default: BUG(); @@ -3035,7 +3042,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) { uint64_t msr_content; msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax; - if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY ) + if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) == X86EMUL_OKAY ) update_guest_eip(); /* Safe: WRMSR */ break; } diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index ac6e3b3..52bf39c 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -1048,15 +1048,16 @@ static void load_shadow_guest_state(struct vcpu *v) nvcpu->guest_cr[0] = __get_vvmcs(vvmcs, CR0_READ_SHADOW); nvcpu->guest_cr[4] = __get_vvmcs(vvmcs, CR4_READ_SHADOW); - hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0)); - hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4)); - hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3)); + hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0), 1); + hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4), 1); + hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3), 1); control = __get_vvmcs(vvmcs, VM_ENTRY_CONTROLS); if ( control & VM_ENTRY_LOAD_GUEST_PAT ) hvm_set_guest_pat(v, __get_vvmcs(vvmcs, GUEST_PAT)); if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL ) - hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs, GUEST_PERF_GLOBAL_CTRL)); + hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, + __get_vvmcs(vvmcs, GUEST_PERF_GLOBAL_CTRL), 0); hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); @@ -1249,15 +1250,16 @@ static void load_vvmcs_host_state(struct vcpu *v) __vmwrite(vmcs_h2g_field[i].guest_field, r); } - hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0)); - hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4)); - hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3)); + hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0), 1); + hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4), 1); + hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3), 1); control = __get_vvmcs(vvmcs, VM_EXIT_CONTROLS); if ( control & VM_EXIT_LOAD_HOST_PAT ) hvm_set_guest_pat(v, __get_vvmcs(vvmcs, HOST_PAT)); if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL ) - hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs, HOST_PERF_GLOBAL_CTRL)); + hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, + __get_vvmcs(vvmcs, HOST_PERF_GLOBAL_CTRL), 1); hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c new file mode 100644 index 0000000..aca731d --- /dev/null +++ b/xen/arch/x86/vm_event.c @@ -0,0 +1,33 @@ +#include <asm/vm_event.h> + +void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) +{ + if ( rsp->flags & VM_EVENT_FLAG_DENY ) + { + struct monitor_write_data *w = + &v->domain->arch.event_write_data[v->vcpu_id]; + + ASSERT(v->domain->arch.event_write_data != NULL); + + switch ( rsp->reason ) + { + case VM_EVENT_REASON_MOV_TO_MSR: + w->do_write.msr = 0; + break; + case VM_EVENT_REASON_WRITE_CTRLREG: + switch ( rsp->u.write_ctrlreg.index ) + { + case VM_EVENT_X86_CR0: + w->do_write.cr0 = 0; + break; + case VM_EVENT_X86_CR3: + w->do_write.cr3 = 0; + break; + case VM_EVENT_X86_CR4: + w->do_write.cr4 = 0; + break; + } + break; + } + } +} diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c index 11438da..eea035c 100644 --- a/xen/common/vm_event.c +++ b/xen/common/vm_event.c @@ -26,6 +26,7 @@ #include <xen/wait.h> #include <xen/vm_event.h> #include <xen/mem_access.h> +#include <asm/vm_event.h> #include <asm/p2m.h> #include <xsm/xsm.h> @@ -79,6 +80,10 @@ static int vm_event_enable( } } + if ( !d->arch.event_write_data ) + d->arch.event_write_data = xzalloc_array(struct monitor_write_data, + d->max_vcpus); + rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct, &ved->ring_page); if ( rc < 0 ) @@ -407,6 +412,10 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved) */ switch ( rsp.reason ) { + case VM_EVENT_REASON_MOV_TO_MSR: + case VM_EVENT_REASON_WRITE_CTRLREG: + vm_event_register_write_resume(v, &rsp); + break; #ifdef HAS_MEM_ACCESS case VM_EVENT_REASON_MEM_ACCESS: diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h new file mode 100644 index 0000000..5205ee8 --- /dev/null +++ b/xen/include/asm-arm/vm_event.h @@ -0,0 +1,12 @@ +#ifndef __ASM_ARM_VM_EVENT_H__ +#define __ASM_ARM_VM_EVENT_H__ + +#include <xen/vm_event.h> + +static inline +void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) +{ + /* Not supported on ARM. */ +} + +#endif /* __ASM_ARM_VM_EVENT_H__ */ diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index f712caa..8990277 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -249,6 +249,21 @@ struct pv_domain struct mapcache_domain mapcache; }; +struct monitor_write_data { + struct { + unsigned int msr : 1; + unsigned int cr0 : 1; + unsigned int cr3 : 1; + unsigned int cr4 : 1; + } do_write; + + uint32_t msr; + uint64_t value; + uint64_t cr0; + uint64_t cr3; + uint64_t cr4; +}; + struct arch_domain { struct page_info *perdomain_l3_pg; @@ -359,6 +374,8 @@ struct arch_domain /* Mem_access emulation control */ bool_t mem_access_emulate_enabled; + + struct monitor_write_data *event_write_data; } __cacheline_aligned; #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) @@ -513,7 +530,6 @@ struct arch_vcpu unsigned long eip; struct vm_event_emul_read_data *emul_read_data; } vm_event; - }; smap_check_policy_t smap_policy_change(struct vcpu *v, diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h index ab5abd0..c082c20 100644 --- a/xen/include/asm-x86/hvm/event.h +++ b/xen/include/asm-x86/hvm/event.h @@ -18,8 +18,13 @@ #ifndef __ASM_X86_HVM_EVENT_H__ #define __ASM_X86_HVM_EVENT_H__ -/* Called for current VCPU on crX/MSR changes by guest */ -void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old); +/* + * Called for current VCPU on crX/MSR changes by guest. + * The event might not fire if the client has subscribed to it in onchangeonly + * mode, hence the bool_t return type for control register write events. + */ +bool_t hvm_event_cr(unsigned int index, unsigned long value, + unsigned long old); #define hvm_event_crX(what, new, old) \ hvm_event_cr(VM_EVENT_X86_##what, new, old) void hvm_event_msr(unsigned int msr, uint64_t value); diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index 05ef5c5..95d3bb2 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -124,11 +124,12 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value); /* These functions all return X86EMUL return codes. */ int hvm_set_efer(uint64_t value); -int hvm_set_cr0(unsigned long value); -int hvm_set_cr3(unsigned long value); -int hvm_set_cr4(unsigned long value); +int hvm_set_cr0(unsigned long value, bool_t may_defer); +int hvm_set_cr3(unsigned long value, bool_t may_defer); +int hvm_set_cr4(unsigned long value, bool_t may_defer); int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content); -int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content); +int hvm_msr_write_intercept( + unsigned int msr, uint64_t msr_content, bool_t may_defer); int hvm_mov_to_cr(unsigned int cr, unsigned int gpr); int hvm_mov_from_cr(unsigned int cr, unsigned int gpr); diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h new file mode 100644 index 0000000..bc6e9f7 --- /dev/null +++ b/xen/include/asm-x86/vm_event.h @@ -0,0 +1,8 @@ +#ifndef __ASM_X86_VM_EVENT_H__ +#define __ASM_X86_VM_EVENT_H__ + +#include <xen/vm_event.h> + +void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp); + +#endif /* __ASM_X86_VM_EVENT_H__ */ diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h index 11e65c4..0e175cc 100644 --- a/xen/include/public/vm_event.h +++ b/xen/include/public/vm_event.h @@ -69,6 +69,12 @@ * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored). */ #define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 4) + /* + * Deny completion of the operation that triggered the event. + * Currently only useful for MSR, CR0, CR3 and CR4 write events. + */ +#define VM_EVENT_FLAG_DENY (1 << 5) + /* * Reasons for the vm event request -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |