[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v7 05/10] x86/vmx: Add Intel Processor Trace support
From: Michał Leszczyński <michal.leszczynski@xxxxxxx> Add CPUID/MSR enumeration details for Processor Trace. For now, we will only support its use inside VMX operation. Fill in the vmtrace_available boolean to activate the newly introduced common infrastructure for allocating trace buffers. For now, Processor Trace is going to be operated in Single Output mode behind the guests back. Add the MSRs to struct vcpu_msrs, and set up the buffer limit in vmx_init_pt() as it is fixed for the lifetime of the domain. Context switch the most of the MSRs in and out of vCPU context switch, but the main control register needs to reside in the MSR load/save lists. Explicitly pull the msrs pointer out into a local variable, because the optimiser cannot keep it live across the memory clobbers in the MSR accesses. Signed-off-by: Michał Leszczyński <michal.leszczynski@xxxxxxx> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> CC: Michał Leszczyński <michal.leszczynski@xxxxxxx> CC: Tamas K Lengyel <tamas@xxxxxxxxxxxxx> v7: * Major chop&change within the series. * Move MSRs to vcpu_msrs, which is where they'll definitely want to live when we offer PT to VMs for their own use. --- tools/libs/light/libxl_cpuid.c | 1 + tools/misc/xen-cpuid.c | 2 +- xen/arch/x86/hvm/vmx/vmcs.c | 15 ++++++++++++- xen/arch/x86/hvm/vmx/vmx.c | 34 ++++++++++++++++++++++++++++- xen/include/asm-x86/cpufeature.h | 1 + xen/include/asm-x86/hvm/vmx/vmcs.h | 4 ++++ xen/include/asm-x86/msr.h | 32 +++++++++++++++++++++++++++ xen/include/public/arch-x86/cpufeatureset.h | 1 + 8 files changed, 87 insertions(+), 3 deletions(-) diff --git a/tools/libs/light/libxl_cpuid.c b/tools/libs/light/libxl_cpuid.c index 259612834e..289c59c742 100644 --- a/tools/libs/light/libxl_cpuid.c +++ b/tools/libs/light/libxl_cpuid.c @@ -188,6 +188,7 @@ int libxl_cpuid_parse_config(libxl_cpuid_policy_list *cpuid, const char* str) {"avx512-ifma", 0x00000007, 0, CPUID_REG_EBX, 21, 1}, {"clflushopt", 0x00000007, 0, CPUID_REG_EBX, 23, 1}, {"clwb", 0x00000007, 0, CPUID_REG_EBX, 24, 1}, + {"proc-trace", 0x00000007, 0, CPUID_REG_EBX, 25, 1}, {"avx512pf", 0x00000007, 0, CPUID_REG_EBX, 26, 1}, {"avx512er", 0x00000007, 0, CPUID_REG_EBX, 27, 1}, {"avx512cd", 0x00000007, 0, CPUID_REG_EBX, 28, 1}, diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c index c81aa93055..2d04162d8d 100644 --- a/tools/misc/xen-cpuid.c +++ b/tools/misc/xen-cpuid.c @@ -106,7 +106,7 @@ static const char *const str_7b0[32] = [18] = "rdseed", [19] = "adx", [20] = "smap", [21] = "avx512-ifma", [22] = "pcommit", [23] = "clflushopt", - [24] = "clwb", [25] = "pt", + [24] = "clwb", [25] = "proc-trace", [26] = "avx512pf", [27] = "avx512er", [28] = "avx512cd", [29] = "sha", [30] = "avx512bw", [31] = "avx512vl", diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 164535f8f0..5576caad8e 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -291,6 +291,20 @@ static int vmx_init_vmcs_config(void) _vmx_cpu_based_exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING); + rdmsrl(MSR_IA32_VMX_MISC, _vmx_misc_cap); + + /* Check whether IPT is supported in VMX operation. */ + if ( !smp_processor_id() ) + vmtrace_available = cpu_has_proc_trace && + (_vmx_misc_cap & VMX_MISC_PROC_TRACE); + else if ( vmtrace_available && + !(_vmx_misc_cap & VMX_MISC_PROC_TRACE) ) + { + printk("VMX: IPT capabilities differ between CPU%u and CPU0\n", + smp_processor_id()); + return -EINVAL; + } + if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS ) { min = 0; @@ -305,7 +319,6 @@ static int vmx_init_vmcs_config(void) SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_TSC_SCALING); - rdmsrl(MSR_IA32_VMX_MISC, _vmx_misc_cap); if ( _vmx_misc_cap & VMX_MISC_VMWRITE_ALL ) opt |= SECONDARY_EXEC_ENABLE_VMCS_SHADOWING; if ( opt_vpid_enabled ) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 4120234c15..93121fbf27 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -428,6 +428,20 @@ static void vmx_domain_relinquish_resources(struct domain *d) vmx_free_vlapic_mapping(d); } +static void vmx_init_pt(struct vcpu *v) +{ + unsigned int frames = v->domain->vmtrace_frames; + + if ( !frames ) + return; + + /* Checked by domain creation logic. */ + ASSERT(v->vmtrace.buf); + ASSERT(frames <= (GB(4) >> PAGE_SHIFT) && (frames & (frames - 1)) == 0); + + v->arch.msrs->rtit.output_limit = (frames << PAGE_SHIFT) - 1; +} + static int vmx_vcpu_initialise(struct vcpu *v) { int rc; @@ -470,6 +484,7 @@ static int vmx_vcpu_initialise(struct vcpu *v) } vmx_install_vlapic_mapping(v); + vmx_init_pt(v); return 0; } @@ -508,22 +523,39 @@ static void vmx_restore_host_msrs(void) static void vmx_save_guest_msrs(struct vcpu *v) { + struct vcpu_msrs *msrs = v->arch.msrs; + /* * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can * be updated at any time via SWAPGS, which we cannot trap. */ v->arch.hvm.vmx.shadow_gs = read_gs_shadow(); + + if ( v->arch.hvm.vmx.ipt_active ) + { + rdmsrl(MSR_RTIT_OUTPUT_MASK, msrs->rtit.output_mask); + rdmsrl(MSR_RTIT_STATUS, msrs->rtit.status); + } } static void vmx_restore_guest_msrs(struct vcpu *v) { + const struct vcpu_msrs *msrs = v->arch.msrs; + write_gs_shadow(v->arch.hvm.vmx.shadow_gs); wrmsrl(MSR_STAR, v->arch.hvm.vmx.star); wrmsrl(MSR_LSTAR, v->arch.hvm.vmx.lstar); wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask); if ( cpu_has_msr_tsc_aux ) - wrmsr_tsc_aux(v->arch.msrs->tsc_aux); + wrmsr_tsc_aux(msrs->tsc_aux); + + if ( v->arch.hvm.vmx.ipt_active ) + { + wrmsrl(MSR_RTIT_OUTPUT_BASE, page_to_maddr(v->vmtrace.buf)); + wrmsrl(MSR_RTIT_OUTPUT_MASK, msrs->rtit.output_mask); + wrmsrl(MSR_RTIT_STATUS, msrs->rtit.status); + } } void vmx_update_cpu_exec_control(struct vcpu *v) diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index ad3d84bdde..8ad658be1b 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -104,6 +104,7 @@ #define cpu_has_clwb boot_cpu_has(X86_FEATURE_CLWB) #define cpu_has_avx512er boot_cpu_has(X86_FEATURE_AVX512ER) #define cpu_has_avx512cd boot_cpu_has(X86_FEATURE_AVX512CD) +#define cpu_has_proc_trace boot_cpu_has(X86_FEATURE_PROC_TRACE) #define cpu_has_sha boot_cpu_has(X86_FEATURE_SHA) #define cpu_has_avx512bw boot_cpu_has(X86_FEATURE_AVX512BW) #define cpu_has_avx512vl boot_cpu_has(X86_FEATURE_AVX512VL) diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 906810592f..8073af323b 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -156,6 +156,9 @@ struct vmx_vcpu { /* Do we need to tolerate a spurious EPT_MISCONFIG VM exit? */ bool_t ept_spurious_misconfig; + /* Processor Trace configured and enabled for the vcpu. */ + bool ipt_active; + /* Is the guest in real mode? */ uint8_t vmx_realmode; /* Are we emulating rather than VMENTERing? */ @@ -283,6 +286,7 @@ extern u32 vmx_secondary_exec_control; #define VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 0x80000000000ULL extern u64 vmx_ept_vpid_cap; +#define VMX_MISC_PROC_TRACE 0x00004000 #define VMX_MISC_CR3_TARGET 0x01ff0000 #define VMX_MISC_VMWRITE_ALL 0x20000000 diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h index 16f95e7344..1d3eca9063 100644 --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -306,6 +306,38 @@ struct vcpu_msrs }; } misc_features_enables; + /* + * 0x00000560 ... 57x - MSR_RTIT_* + * + * "Real Time Instruction Trace", now called Processor Trace. + * + * These MSRs are not exposed to guests. They are controlled by Xen + * behind the scenes, when vmtrace is enabled for the domain. + * + * MSR_RTIT_OUTPUT_BASE not stored here. It is fixed per vcpu, and + * derived from v->vmtrace.buf. + */ + struct { + /* + * Placed in the MSR load/save lists. Only modified by hypercall in + * the common case. + */ + uint64_t ctl; + + /* + * Updated by hardware in non-root mode. Synchronised here on vcpu + * context switch. + */ + uint64_t status; + union { + uint64_t output_mask; + struct { + uint32_t output_limit; + uint32_t output_offset; + }; + }; + } rtit; + /* 0x00000da0 - MSR_IA32_XSS */ struct { uint64_t raw; diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h index 6f7efaad6d..a501479820 100644 --- a/xen/include/public/arch-x86/cpufeatureset.h +++ b/xen/include/public/arch-x86/cpufeatureset.h @@ -217,6 +217,7 @@ XEN_CPUFEATURE(SMAP, 5*32+20) /*S Supervisor Mode Access Prevention */ XEN_CPUFEATURE(AVX512_IFMA, 5*32+21) /*A AVX-512 Integer Fused Multiply Add */ XEN_CPUFEATURE(CLFLUSHOPT, 5*32+23) /*A CLFLUSHOPT instruction */ XEN_CPUFEATURE(CLWB, 5*32+24) /*A CLWB instruction */ +XEN_CPUFEATURE(PROC_TRACE, 5*32+25) /* Processor Trace */ XEN_CPUFEATURE(AVX512PF, 5*32+26) /*A AVX-512 Prefetch Instructions */ XEN_CPUFEATURE(AVX512ER, 5*32+27) /*A AVX-512 Exponent & Reciprocal Instrs */ XEN_CPUFEATURE(AVX512CD, 5*32+28) /*A AVX-512 Conflict Detection Instrs */ -- 2.11.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |