|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/msr: Handle MSR_TSC_AUX consistently for PV and HVM guests
commit 5f5a7a6d3669fb94fa15daf563e5dc54cc9dbece
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Nov 15 21:04:37 2018 +0000
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Dec 18 17:13:51 2018 +0000
x86/msr: Handle MSR_TSC_AUX consistently for PV and HVM guests
With PVRDTSCP mode removed, handling of MSR_TSC_AUX can move into the common
code. Move its storage into struct vcpu_msrs (dropping the HVM-specific
msr_tsc_aux), and add an RDPID feature check as this bit also enumerates the
presence of the MSR.
Introduce cpu_has_rdpid along with the synthesized cpu_has_msr_tsc_aux to
correct the context switch paths, as MSR_TSC_AUX is enumerated by either
RDTSCP or RDPID.
Drop hvm_msr_tsc_aux() entirely, and use v->arch.msrs->tsc_aux directly.
Update hvm_load_cpu_ctxt() to check that the incoming ctxt.msr_tsc_aux isn't
out of range. In practice, no previous version of Xen ever wrote an
out-of-range value. Add MSR_TSC_AUX to the list of MSRs migrated for PV
guests, but leave the HVM path using the existing space in hvm_hw_cpu.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Brian Woods <brian.woods@xxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/arch/x86/domain.c | 5 ++---
xen/arch/x86/domctl.c | 2 ++
xen/arch/x86/hvm/hvm.c | 22 +++++++++-------------
xen/arch/x86/hvm/svm/svm.c | 9 ++++-----
xen/arch/x86/hvm/vmx/vmx.c | 9 ++++-----
xen/arch/x86/msr.c | 18 ++++++++++++++++++
xen/arch/x86/pv/emul-priv-op.c | 4 ----
xen/include/asm-x86/cpufeature.h | 5 +++++
xen/include/asm-x86/hvm/hvm.h | 6 ------
xen/include/asm-x86/hvm/vcpu.h | 1 -
xen/include/asm-x86/msr.h | 9 +++++++++
11 files changed, 53 insertions(+), 37 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index ae9f24e457..f5a1f325aa 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1595,9 +1595,8 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
activate_debugregs(v);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP
- ? v->domain->arch.incarnation : 0);
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index ed46df8c5d..9bf2d0820f 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1274,6 +1274,7 @@ long arch_do_domctl(
static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_TSC_AUX,
MSR_AMD64_DR0_ADDRESS_MASK,
MSR_AMD64_DR1_ADDRESS_MASK,
MSR_AMD64_DR2_ADDRESS_MASK,
@@ -1373,6 +1374,7 @@ long arch_do_domctl(
{
case MSR_SPEC_CTRL:
case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_TSC_AUX:
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 97fcaadb0b..d14ddcb527 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -774,7 +774,7 @@ static int hvm_save_cpu_ctxt(struct vcpu *v,
hvm_domain_context_t *h)
struct segment_register seg;
struct hvm_hw_cpu ctxt = {
.tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm.sync_tsc),
- .msr_tsc_aux = hvm_msr_tsc_aux(v),
+ .msr_tsc_aux = v->arch.msrs->tsc_aux,
.rax = v->arch.user_regs.rax,
.rbx = v->arch.user_regs.rbx,
.rcx = v->arch.user_regs.rcx,
@@ -1014,6 +1014,13 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
return -EINVAL;
}
+ if ( ctxt.msr_tsc_aux != (uint32_t)ctxt.msr_tsc_aux )
+ {
+ printk(XENLOG_G_ERR "%pv: HVM restore: bad MSR_TSC_AUX %#"PRIx64"\n",
+ v, ctxt.msr_tsc_aux);
+ return -EINVAL;
+ }
+
/* Older Xen versions used to save the segment arbytes directly
* from the VMCS on Intel hosts. Detect this and rearrange them
* into the struct segment_register format. */
@@ -1040,7 +1047,7 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
if ( hvm_funcs.tsc_scaling.setup )
hvm_funcs.tsc_scaling.setup(v);
- v->arch.hvm.msr_tsc_aux = ctxt.msr_tsc_aux;
+ v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
@@ -3406,10 +3413,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
*msr_content = v->arch.hvm.msr_tsc_adjust;
break;
- case MSR_TSC_AUX:
- *msr_content = hvm_msr_tsc_aux(v);
- break;
-
case MSR_APIC_BASE:
*msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
break;
@@ -3557,13 +3560,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
hvm_set_guest_tsc_adjust(v, msr_content);
break;
- case MSR_TSC_AUX:
- v->arch.hvm.msr_tsc_aux = (uint32_t)msr_content;
- if ( cpu_has_rdtscp
- && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
- wrmsr_tsc_aux(msr_content);
- break;
-
case MSR_APIC_BASE:
return guest_wrmsr_apic_base(v, msr_content);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 9464394b0d..954822c960 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1069,8 +1069,8 @@ static void svm_ctxt_switch_to(struct vcpu *v)
svm_lwp_load(v);
svm_tsc_ratio_load(v);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
static void noreturn svm_do_resume(struct vcpu *v)
@@ -2286,8 +2286,7 @@ static void svm_vmexit_do_rdtsc(struct cpu_user_regs
*regs, bool rdtscp)
enum instruction_index insn = rdtscp ? INSTR_RDTSCP : INSTR_RDTSC;
unsigned int inst_len;
- if ( rdtscp && !currd->arch.cpuid->extd.rdtscp &&
- currd->arch.tsc_mode != TSC_MODE_PVRDTSCP )
+ if ( rdtscp && !currd->arch.cpuid->extd.rdtscp )
{
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
return;
@@ -2299,7 +2298,7 @@ static void svm_vmexit_do_rdtsc(struct cpu_user_regs
*regs, bool rdtscp)
__update_guest_eip(regs, inst_len);
if ( rdtscp )
- regs->rcx = hvm_msr_tsc_aux(curr);
+ regs->rcx = curr->arch.msrs->tsc_aux;
hvm_rdtsc_intercept(regs);
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2166b0dfe4..64af8bf943 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -511,8 +511,8 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
wrmsrl(MSR_LSTAR, v->arch.hvm.vmx.lstar);
wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
void vmx_update_cpu_exec_control(struct vcpu *v)
@@ -3957,14 +3957,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
vmx_invlpg_intercept(exit_qualification);
break;
case EXIT_REASON_RDTSCP:
- if ( !currd->arch.cpuid->extd.rdtscp &&
- currd->arch.tsc_mode != TSC_MODE_PVRDTSCP )
+ if ( !currd->arch.cpuid->extd.rdtscp )
{
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
break;
}
- regs->rcx = hvm_msr_tsc_aux(v);
+ regs->rcx = v->arch.msrs->tsc_aux;
/* fall through */
case EXIT_REASON_RDTSC:
update_guest_eip(); /* Safe: RDTSC, RDTSCP */
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 85a58c0b58..9bb38b6d66 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -170,6 +170,13 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
ret = guest_rdmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+
+ *val = msrs->tsc_aux;
+ break;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !cp->extd.dbext )
@@ -324,6 +331,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
ret = guest_wrmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+ if ( val != (uint32_t)val )
+ goto gp_fault;
+
+ msrs->tsc_aux = val;
+ if ( v == curr )
+ wrmsr_tsc_aux(val);
+ break;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !cp->extd.dbext || val != (uint32_t)val )
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 5133c3561e..942ece2ca0 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -847,10 +847,6 @@ static int read_msr(unsigned int reg, uint64_t *val,
*val = currd->arch.vtsc ? pv_soft_rdtsc(curr, ctxt->regs) : rdtsc();
return X86EMUL_OKAY;
- case MSR_TSC_AUX:
- *val = 0;
- return X86EMUL_OKAY;
-
case MSR_EFER:
/* Hide unknown bits, and unconditionally hide SVME from guests. */
*val = read_efer() & EFER_KNOWN_MASK & ~EFER_SVME;
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index c2b0f6ae4e..5592e1749d 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -107,6 +107,9 @@
#define cpu_has_avx512bw boot_cpu_has(X86_FEATURE_AVX512BW)
#define cpu_has_avx512vl boot_cpu_has(X86_FEATURE_AVX512VL)
+/* CPUID level 0x00000007:0.ecx */
+#define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID)
+
/* CPUID level 0x80000007.edx */
#define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC)
@@ -117,6 +120,8 @@
#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
#define cpu_has_xen_lbr boot_cpu_has(X86_FEATURE_XEN_LBR)
+#define cpu_has_msr_tsc_aux (cpu_has_rdtscp || cpu_has_rdpid)
+
enum _cache_type {
CACHE_TYPE_NULL = 0,
CACHE_TYPE_DATA = 1,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 95581ce6cb..0a10b51554 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -560,12 +560,6 @@ static inline void hvm_invalidate_regs_fields(struct
cpu_user_regs *regs)
#endif
}
-#define hvm_msr_tsc_aux(v) ({ \
- struct domain *__d = (v)->domain; \
- (__d->arch.tsc_mode == TSC_MODE_PVRDTSCP) \
- ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm.msr_tsc_aux; \
-})
-
/*
* Nested HVM
*/
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 39778f9e4e..c8a40f6d55 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -170,7 +170,6 @@ struct hvm_vcpu {
struct hvm_vcpu_asid n1asid;
- u32 msr_tsc_aux;
u64 msr_tsc_adjust;
u64 msr_xss;
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 05d905bafd..adfa2fa05b 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -289,6 +289,15 @@ struct vcpu_msrs
} misc_features_enables;
/*
+ * 0xc0000103 - MSR_TSC_AUX
+ *
+ * Value is guest chosen, and always loaded in vcpu context. Guests have
+ * no direct MSR access, and the value is accessible to userspace with the
+ * RDTSCP and RDPID instructions.
+ */
+ uint32_t tsc_aux;
+
+ /*
* 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
*
* Loaded into hardware for guests which have active %dr7 settings.
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |