|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 1/3] SVM: support data breakpoint extension registers
On Mon, Apr 7, 2014 at 10:38 AM, Jan Beulich <JBeulich@xxxxxxxx> wrote:
> Leveraging the generic MSR save/restore logic introduced a little while
> ago.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Tested-by: Aravind Gopalakrishnan<aravind.gopalakrishnan@xxxxxxx>
>
> --- a/tools/libxc/xc_cpufeature.h
> +++ b/tools/libxc/xc_cpufeature.h
> @@ -125,6 +125,7 @@
> #define X86_FEATURE_NODEID_MSR 19 /* NodeId MSR */
> #define X86_FEATURE_TBM 21 /* trailing bit manipulations */
> #define X86_FEATURE_TOPOEXT 22 /* topology extensions CPUID leafs */
> +#define X86_FEATURE_DBEXT 26 /* data breakpoint extension */
>
> /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx) */
> #define X86_FEATURE_FSGSBASE 0 /* {RD,WR}{FS,GS}BASE instructions */
> --- a/tools/libxc/xc_cpuid_x86.c
> +++ b/tools/libxc/xc_cpuid_x86.c
> @@ -110,9 +110,10 @@ static void amd_xc_cpuid_policy(
> bitmaskof(X86_FEATURE_3DNOWPREFETCH) |
> bitmaskof(X86_FEATURE_OSVW) |
> bitmaskof(X86_FEATURE_XOP) |
> + bitmaskof(X86_FEATURE_LWP) |
> bitmaskof(X86_FEATURE_FMA4) |
> bitmaskof(X86_FEATURE_TBM) |
> - bitmaskof(X86_FEATURE_LWP));
> + bitmaskof(X86_FEATURE_DBEXT));
> regs[3] &= (0x0183f3ff | /* features shared with 0x00000001:EDX */
> (is_pae ? bitmaskof(X86_FEATURE_NX) : 0) |
> (is_64bit ? bitmaskof(X86_FEATURE_LM) : 0) |
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3082,6 +3082,9 @@ void hvm_cpuid(unsigned int input, unsig
> /* Only provide PSE36 when guest runs in 32bit PAE or in long mode */
> if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
> *edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
> + /* Hide data breakpoint extensions if the hardware has not support.
> */
> + if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
> + *ecx &= ~cpufeat_mask(X86_FEATURE_DBEXT);
> break;
>
> case 0x80000008:
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -160,14 +160,28 @@ void svm_intercept_msr(struct vcpu *v, u
> static void svm_save_dr(struct vcpu *v)
> {
> struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
> + unsigned int flag_dr_dirty = v->arch.hvm_vcpu.flag_dr_dirty;
>
> - if ( !v->arch.hvm_vcpu.flag_dr_dirty )
> + if ( !flag_dr_dirty )
> return;
>
> /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
> v->arch.hvm_vcpu.flag_dr_dirty = 0;
> vmcb_set_dr_intercepts(vmcb, ~0u);
>
> + if ( flag_dr_dirty & 2 )
> + {
> + svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_RW);
> + svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_RW);
> + svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);
> + svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);
> +
> + rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
> + rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
> + rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
> + rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
> + }
> +
> v->arch.debugreg[0] = read_debugreg(0);
> v->arch.debugreg[1] = read_debugreg(1);
> v->arch.debugreg[2] = read_debugreg(2);
> @@ -178,12 +192,32 @@ static void svm_save_dr(struct vcpu *v)
>
> static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu
> *v)
> {
> + unsigned int ecx;
> +
> if ( v->arch.hvm_vcpu.flag_dr_dirty )
> return;
>
> v->arch.hvm_vcpu.flag_dr_dirty = 1;
> vmcb_set_dr_intercepts(vmcb, 0);
>
> + ASSERT(v == current);
> + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
> + if ( test_bit(X86_FEATURE_DBEXT & 31, &ecx) )
> + {
> + svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_NONE);
> + svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_NONE);
> + svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
> + svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
> +
> + wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
> + wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
> + wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
> + wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
> +
> + /* Can't use hvm_cpuid() in svm_save_dr(): v != current. */
> + v->arch.hvm_vcpu.flag_dr_dirty |= 2;
> + }
> +
> write_debugreg(0, v->arch.debugreg[0]);
> write_debugreg(1, v->arch.debugreg[1]);
> write_debugreg(2, v->arch.debugreg[2]);
> @@ -355,6 +389,72 @@ static int svm_load_vmcb_ctxt(struct vcp
> return 0;
> }
>
> +static unsigned int __init svm_init_msr(void)
> +{
> + return boot_cpu_has(X86_FEATURE_DBEXT) ? 4 : 0;
> +}
> +
> +static void svm_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
> +{
> + if ( boot_cpu_has(X86_FEATURE_DBEXT) )
> + {
> + ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[0];
> + if ( ctxt->msr[ctxt->count].val )
> + ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK;
> +
> + ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[1];
> + if ( ctxt->msr[ctxt->count].val )
> + ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK;
> +
> + ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[2];
> + if ( ctxt->msr[ctxt->count].val )
> + ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK;
> +
> + ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[3];
> + if ( ctxt->msr[ctxt->count].val )
> + ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK;
> + }
> +}
> +
> +static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
> +{
> + unsigned int i, idx;
> + int err = 0;
> +
> + for ( i = 0; i < ctxt->count; ++i )
> + {
> + switch ( idx = ctxt->msr[i].index )
> + {
> + case MSR_AMD64_DR0_ADDRESS_MASK:
> + if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
> + err = -ENXIO;
> + else if ( ctxt->msr[i].val >> 32 )
> + err = -EDOM;
> + else
> + v->arch.hvm_svm.dr_mask[0] = ctxt->msr[i].val;
> + break;
> +
> + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
> + if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
> + err = -ENXIO;
> + else if ( ctxt->msr[i].val >> 32 )
> + err = -EDOM;
> + else
> + v->arch.hvm_svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK +
> 1] =
> + ctxt->msr[i].val;
> + break;
> +
> + default:
> + continue;
> + }
> + if ( err )
> + break;
> + ctxt->msr[i]._rsvd = 1;
> + }
> +
> + return err;
> +}
> +
> static void svm_fpu_enter(struct vcpu *v)
> {
> struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
> @@ -1451,6 +1551,8 @@ static int svm_msr_read_intercept(unsign
>
> switch ( msr )
> {
> + unsigned int ecx;
> +
> case MSR_IA32_SYSENTER_CS:
> *msr_content = v->arch.hvm_svm.guest_sysenter_cs;
> break;
> @@ -1526,6 +1628,21 @@ static int svm_msr_read_intercept(unsign
> vpmu_do_rdmsr(msr, msr_content);
> break;
>
> + case MSR_AMD64_DR0_ADDRESS_MASK:
> + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
> + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) )
> + goto gpf;
> + *msr_content = v->arch.hvm_svm.dr_mask[0];
> + break;
> +
> + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
> + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
> + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) )
> + goto gpf;
> + *msr_content =
> + v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
> + break;
> +
> case MSR_AMD_OSVW_ID_LENGTH:
> case MSR_AMD_OSVW_STATUS:
> ret = svm_handle_osvw(v, msr, msr_content, 1);
> @@ -1594,6 +1711,8 @@ static int svm_msr_write_intercept(unsig
>
> switch ( msr )
> {
> + unsigned int ecx;
> +
> case MSR_IA32_SYSENTER_CS:
> vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content;
> break;
> @@ -1669,6 +1788,21 @@ static int svm_msr_write_intercept(unsig
> */
> break;
>
> + case MSR_AMD64_DR0_ADDRESS_MASK:
> + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
> + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) || (msr_content >> 32) )
> + goto gpf;
> + v->arch.hvm_svm.dr_mask[0] = msr_content;
> + break;
> +
> + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
> + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
> + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) || (msr_content >> 32) )
> + goto gpf;
> + v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
> + msr_content;
> + break;
> +
> case MSR_AMD_OSVW_ID_LENGTH:
> case MSR_AMD_OSVW_STATUS:
> ret = svm_handle_osvw(v, msr, &msr_content, 0);
> @@ -2022,6 +2156,9 @@ static struct hvm_function_table __initd
> .vcpu_destroy = svm_vcpu_destroy,
> .save_cpu_ctxt = svm_save_vmcb_ctxt,
> .load_cpu_ctxt = svm_load_vmcb_ctxt,
> + .init_msr = svm_init_msr,
> + .save_msr = svm_save_msr,
> + .load_msr = svm_load_msr,
> .get_interrupt_shadow = svm_get_interrupt_shadow,
> .set_interrupt_shadow = svm_set_interrupt_shadow,
> .guest_x86_mode = svm_guest_x86_mode,
> --- a/xen/include/asm-x86/cpufeature.h
> +++ b/xen/include/asm-x86/cpufeature.h
> @@ -134,6 +134,7 @@
> #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
> #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
> #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs
> */
> +#define X86_FEATURE_DBEXT (6*32+26) /* data breakpoint extension */
>
> /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
> #define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions
> */
> --- a/xen/include/asm-x86/hvm/svm/vmcb.h
> +++ b/xen/include/asm-x86/hvm/svm/vmcb.h
> @@ -515,6 +515,9 @@ struct arch_svm_struct {
> uint64_t guest_lwp_cfg; /* guest version */
> uint64_t cpu_lwp_cfg; /* CPU version */
>
> + /* data breakpoint extension MSRs */
> + uint32_t dr_mask[4];
> +
> /* OSVW MSRs */
> struct {
> u64 length;
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -140,7 +140,7 @@ struct hvm_vcpu {
>
> int xen_port;
>
> - bool_t flag_dr_dirty;
> + u8 flag_dr_dirty;
> bool_t debug_state_latch;
> bool_t single_step;
>
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -206,6 +206,11 @@
> #define MSR_AMD64_DC_CFG 0xc0011022
> #define AMD64_NB_CFG_CF8_EXT_ENABLE_BIT 46
>
> +#define MSR_AMD64_DR0_ADDRESS_MASK 0xc0011027
> +#define MSR_AMD64_DR1_ADDRESS_MASK 0xc0011019
> +#define MSR_AMD64_DR2_ADDRESS_MASK 0xc001101a
> +#define MSR_AMD64_DR3_ADDRESS_MASK 0xc001101b
> +
> /* AMD Family10h machine check MSRs */
> #define MSR_F10_MC4_MISC1 0xc0000408
> #define MSR_F10_MC4_MISC2 0xc0000409
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |