|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 4/5] x86/msr: Handle MSR_TSC_AUX consistently for PV and HVM guests
With PVRDTSCP mode removed, handling of MSR_TSC_AUX can move into the common
code. Move its storage into struct vcpu_msrs (dropping the HVM-specific
msr_tsc_aux), and add an RDPID feature check as this bit also enumerates the
presence of the MSR.
Introduce cpu_has_rdpid along with the synthesized cpu_has_msr_tsc_aux to
correct the context switch paths, as MSR_TSC_AUX is enumerated by either
RDTSCP or RDPID.
Drop hvm_msr_tsc_aux() entirely, and use v->arch.msrs->tsc_aux directly.
Update hvm_load_cpu_ctxt() to check that the incoming ctxt.msr_tsc_aux isn't
out of range. In practice, no previous version of Xen ever wrote an
out-of-range value. Add MSR_TSC_AUX to the list of MSRs migrated for PV
guests, but leave the HVM path using the existing space in hvm_hw_cpu.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
v2:
* Rebase over "x86/msr: Handle MSR_AMD64_DR{0-3}_ADDRESS_MASK in the new MSR
infrastructure"
* Move the HVM msr_tsc_aux check earlier in hvm_load_cpu_ctxt()
* Introduce cpu_has_msr_tsc_aux
RFC: I'm not overly happy with cpu_has_msr_tsc_aux because in practice all
hardware with rdpid has rdtscp, making this an effectively dead conditional in
the context switch path. I'm tempted to go with
#define cpu_has_msr_tsc_aux (cpu_has_rdtscp /* || cpu_has_rdpid */)
to get the point across, but without the extra jump.
---
xen/arch/x86/domain.c | 5 ++---
xen/arch/x86/domctl.c | 2 ++
xen/arch/x86/hvm/hvm.c | 22 +++++++++-------------
xen/arch/x86/hvm/svm/svm.c | 6 +++---
xen/arch/x86/hvm/vmx/vmx.c | 6 +++---
xen/arch/x86/msr.c | 18 ++++++++++++++++++
xen/arch/x86/pv/emul-priv-op.c | 4 ----
xen/include/asm-x86/cpufeature.h | 5 +++++
xen/include/asm-x86/hvm/hvm.h | 6 ------
xen/include/asm-x86/hvm/vcpu.h | 1 -
xen/include/asm-x86/msr.h | 9 +++++++++
11 files changed, 51 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 245300b..38c233e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1592,9 +1592,8 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
activate_debugregs(v);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP
- ? v->domain->arch.incarnation : 0);
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index ed46df8..9bf2d08 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1274,6 +1274,7 @@ long arch_do_domctl(
static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_TSC_AUX,
MSR_AMD64_DR0_ADDRESS_MASK,
MSR_AMD64_DR1_ADDRESS_MASK,
MSR_AMD64_DR2_ADDRESS_MASK,
@@ -1373,6 +1374,7 @@ long arch_do_domctl(
{
case MSR_SPEC_CTRL:
case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_TSC_AUX:
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e2e4204..1f9bafc 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -774,7 +774,7 @@ static int hvm_save_cpu_ctxt(struct vcpu *v,
hvm_domain_context_t *h)
struct segment_register seg;
struct hvm_hw_cpu ctxt = {
.tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm.sync_tsc),
- .msr_tsc_aux = hvm_msr_tsc_aux(v),
+ .msr_tsc_aux = v->arch.msrs->tsc_aux,
.rax = v->arch.user_regs.rax,
.rbx = v->arch.user_regs.rbx,
.rcx = v->arch.user_regs.rcx,
@@ -1014,6 +1014,13 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
return -EINVAL;
}
+ if ( ctxt.msr_tsc_aux != (uint32_t)ctxt.msr_tsc_aux )
+ {
+ printk(XENLOG_G_ERR "%pv: HVM restore: bad MSR_TSC_AUX %#"PRIx64"\n",
+ v, ctxt.msr_tsc_aux);
+ return -EINVAL;
+ }
+
/* Older Xen versions used to save the segment arbytes directly
* from the VMCS on Intel hosts. Detect this and rearrange them
* into the struct segment_register format. */
@@ -1040,7 +1047,7 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
if ( hvm_funcs.tsc_scaling.setup )
hvm_funcs.tsc_scaling.setup(v);
- v->arch.hvm.msr_tsc_aux = ctxt.msr_tsc_aux;
+ v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
@@ -3406,10 +3413,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
*msr_content = v->arch.hvm.msr_tsc_adjust;
break;
- case MSR_TSC_AUX:
- *msr_content = hvm_msr_tsc_aux(v);
- break;
-
case MSR_IA32_APICBASE:
*msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
break;
@@ -3562,13 +3565,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
hvm_set_guest_tsc_adjust(v, msr_content);
break;
- case MSR_TSC_AUX:
- v->arch.hvm.msr_tsc_aux = (uint32_t)msr_content;
- if ( cpu_has_rdtscp
- && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
- wrmsr_tsc_aux(msr_content);
- break;
-
case MSR_IA32_APICBASE:
if ( !vlapic_msr_set(vcpu_vlapic(v), msr_content) )
goto gp_fault;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b9a8900..df6f262 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1069,8 +1069,8 @@ static void svm_ctxt_switch_to(struct vcpu *v)
svm_lwp_load(v);
svm_tsc_ratio_load(v);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
static void noreturn svm_do_resume(struct vcpu *v)
@@ -2968,7 +2968,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
break;
case VMEXIT_RDTSCP:
- regs->rcx = hvm_msr_tsc_aux(v);
+ regs->rcx = v->arch.msrs->tsc_aux;
/* fall through */
case VMEXIT_RDTSC:
svm_vmexit_do_rdtsc(regs);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 365eeb2..9b691d9 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -511,8 +511,8 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
wrmsrl(MSR_LSTAR, v->arch.hvm.vmx.lstar);
wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
void vmx_update_cpu_exec_control(struct vcpu *v)
@@ -3956,7 +3956,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
vmx_invlpg_intercept(exit_qualification);
break;
case EXIT_REASON_RDTSCP:
- regs->rcx = hvm_msr_tsc_aux(v);
+ regs->rcx = v->arch.msrs->tsc_aux;
/* fall through */
case EXIT_REASON_RDTSC:
update_guest_eip(); /* Safe: RDTSC, RDTSCP */
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 76cb6ef..f86da8f 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -162,6 +162,13 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
ret = guest_rdmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+
+ *val = msrs->tsc_aux;
+ break;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !cp->extd.dbext )
@@ -309,6 +316,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
ret = guest_wrmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+ if ( val != (uint32_t)val )
+ goto gp_fault;
+
+ msrs->tsc_aux = val;
+ if ( v == curr )
+ wrmsr_tsc_aux(val);
+ break;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !cp->extd.dbext || val != (uint32_t)val )
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 5133c35..942ece2 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -847,10 +847,6 @@ static int read_msr(unsigned int reg, uint64_t *val,
*val = currd->arch.vtsc ? pv_soft_rdtsc(curr, ctxt->regs) : rdtsc();
return X86EMUL_OKAY;
- case MSR_TSC_AUX:
- *val = 0;
- return X86EMUL_OKAY;
-
case MSR_EFER:
/* Hide unknown bits, and unconditionally hide SVME from guests. */
*val = read_efer() & EFER_KNOWN_MASK & ~EFER_SVME;
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index c2b0f6a..5592e17 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -107,6 +107,9 @@
#define cpu_has_avx512bw boot_cpu_has(X86_FEATURE_AVX512BW)
#define cpu_has_avx512vl boot_cpu_has(X86_FEATURE_AVX512VL)
+/* CPUID level 0x00000007:0.ecx */
+#define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID)
+
/* CPUID level 0x80000007.edx */
#define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC)
@@ -117,6 +120,8 @@
#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
#define cpu_has_xen_lbr boot_cpu_has(X86_FEATURE_XEN_LBR)
+#define cpu_has_msr_tsc_aux (cpu_has_rdtscp || cpu_has_rdpid)
+
enum _cache_type {
CACHE_TYPE_NULL = 0,
CACHE_TYPE_DATA = 1,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 3d3250d..3a92bb3 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -563,12 +563,6 @@ static inline void hvm_invalidate_regs_fields(struct
cpu_user_regs *regs)
#endif
}
-#define hvm_msr_tsc_aux(v) ({ \
- struct domain *__d = (v)->domain; \
- (__d->arch.tsc_mode == TSC_MODE_PVRDTSCP) \
- ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm.msr_tsc_aux; \
-})
-
/*
* Nested HVM
*/
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 39778f9..c8a40f6 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -170,7 +170,6 @@ struct hvm_vcpu {
struct hvm_vcpu_asid n1asid;
- u32 msr_tsc_aux;
u64 msr_tsc_adjust;
u64 msr_xss;
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 05d905b..adfa2fa 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -289,6 +289,15 @@ struct vcpu_msrs
} misc_features_enables;
/*
+ * 0xc0000103 - MSR_TSC_AUX
+ *
+ * Value is guest chosen, and always loaded in vcpu context. Guests have
+ * no direct MSR access, and the value is accessible to userspace with the
+ * RDTSCP and RDPID instructions.
+ */
+ uint32_t tsc_aux;
+
+ /*
* 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
*
* Loaded into hardware for guests which have active %dr7 settings.
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |