# HG changeset patch # User Wei Huang # Date 1333136756 18000 # Node ID b3d45e0ffd4b7e2ea409e6d376951f33ca7feadf # Parent 6765a2510ee7ad899dcb87eefdf206f8c8ae34ae AMD_LWP: add interrupt support for AMD LWP This patch adds interrupt support for AMD lightweight profiling. It registers interrupt handler using alloc_direct_apic_vector(). When notified, SVM reinjects virtual interrupts into guest VM using guest's virtual local APIC. Signed-off-by: Wei Huang diff -r 6765a2510ee7 -r b3d45e0ffd4b xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri Mar 30 10:01:15 2012 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Mar 30 14:45:56 2012 -0500 @@ -87,6 +87,9 @@ static uint64_t osvw_length, osvw_status; static DEFINE_SPINLOCK(osvw_lock); +/* LWP Vector */ +static uint8_t lwp_intr_vector; + void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len) { struct vcpu *curr = current; @@ -745,6 +748,26 @@ *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */ } +/* LWP interrupt handler */ +static void svm_lwp_interrupt(struct cpu_user_regs *regs) +{ + struct vcpu *v = current; + + ack_APIC_irq(); + vlapic_set_irq(vcpu_vlapic(v), + (v->arch.hvm_svm.guest_lwp_cfg >> 40) && 0xff, 0); +} + +/* Init LWP interrupt handler */ +static void svm_init_lwp_intr(void) +{ + /* return if already allocated */ + if ( lwp_intr_vector > 0 ) + return; + + alloc_direct_apic_vector(&lwp_intr_vector, svm_lwp_interrupt); +} + static inline void svm_lwp_save(struct vcpu *v) { /* Don't mess up with other guests. Disable LWP for next VCPU. */ @@ -759,7 +782,7 @@ { /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */ if ( v->arch.hvm_svm.guest_lwp_cfg ) - wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg); + wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg); } /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */ @@ -776,11 +799,23 @@ /* generate #GP if guest tries to turn on unsupported features. */ if ( msr_low & ~edx) return -1; + + v->arch.hvm_svm.guest_lwp_cfg = msr_content; + + /* setup interrupt handler if needed */ + if ( (msr_content & 0x80000000) && ((msr_content >> 40) & 0xff) ) + { + svm_init_lwp_intr(); + v->arch.hvm_svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL) + | ((uint64_t)lwp_intr_vector << 40); + } + else + { + /* otherwise disable it */ + v->arch.hvm_svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL; + } - wrmsrl(MSR_AMD64_LWP_CFG, msr_content); - /* CPU might automatically correct reserved bits. So read it back. */ - rdmsrl(MSR_AMD64_LWP_CFG, msr_content); - v->arch.hvm_svm.guest_lwp_cfg = msr_content; + wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg); /* track nonalzy state if LWP_CFG is non-zero. */ v->arch.nonlazy_xstate_used = !!(msr_content); diff -r 6765a2510ee7 -r b3d45e0ffd4b xen/include/asm-x86/hvm/svm/vmcb.h --- a/xen/include/asm-x86/hvm/svm/vmcb.h Fri Mar 30 10:01:15 2012 +0100 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Fri Mar 30 14:45:56 2012 -0500 @@ -515,7 +515,8 @@ uint64_t guest_sysenter_eip; /* AMD lightweight profiling MSR */ - uint64_t guest_lwp_cfg; + uint64_t guest_lwp_cfg; /* guest version */ + uint64_t cpu_lwp_cfg; /* CPU version */ /* OSVW MSRs */ struct {