|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 5/9] x86/amd: Probe for legacy SSBD interfaces on boot
Introduce a new synthetic LEGACY_SSBD feature and set it if we find
VIRT_SPEC_CTRL offered by our hypervisor, or if we find a working bit in an
LS_CFG register.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
---
xen/arch/x86/cpu/amd.c | 59 +++++++++++++++++++++++++++++++++++++++
xen/arch/x86/spec_ctrl.c | 3 +-
xen/include/asm-x86/cpufeature.h | 6 ++++
xen/include/asm-x86/cpufeatures.h | 1 +
4 files changed, 68 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index c790416..897c060 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -362,6 +362,62 @@ static void __init noinline amd_init_levelling(void)
ctxt_switch_masking = amd_ctxt_switch_masking;
}
+/* Cached once on boot. */
+static uint64_t __read_mostly ls_cfg_base, __read_mostly ls_cfg_ssbd_mask;
+
+static void __init noinline amd_probe_legacy_ssbd(void)
+{
+ uint64_t new;
+
+ /*
+ * Search for mechanisms of controlling Memory Disambiguation.
+ *
+ * If the CPU reports that it is fixed, there is nothing to do. If we
+ * have an architectural MSR_SPEC_CTRL.SSBD control, leave everything
+ * to the common code.
+ */
+ if (cpu_has_amd_ssb_no || cpu_has_amd_ssbd)
+ return;
+
+ /* Use MSR_VIRT_SPEC_CTRL if our hypervisor offers it. */
+ if (cpu_has_virt_sc_ssbd) {
+ setup_force_cpu_cap(X86_FEATURE_LEGACY_SSBD);
+ return;
+ }
+
+ /* Probe for LS_CFG settings. */
+ switch (boot_cpu_data.x86) {
+ default: return; /* No known LS_CFG settings. */
+ case 0x15: ls_cfg_ssbd_mask = 1ull << 54; break;
+ case 0x16: ls_cfg_ssbd_mask = 1ull << 33; break;
+ case 0x17: ls_cfg_ssbd_mask = 1ull << 10; break;
+ }
+
+ /*
+ * MSR_AMD64_LS_CFG isn't architectural, and may not be virtualised
+ * fully. Check that we can actually flip the bit before concluding
+ * that LS_CFG is available for use.
+ */
+ if (rdmsr_safe(MSR_AMD64_LS_CFG, ls_cfg_base) ||
+ wrmsr_safe(MSR_AMD64_LS_CFG, ls_cfg_base ^ ls_cfg_ssbd_mask))
+ return;
+
+ rdmsrl(MSR_AMD64_LS_CFG, new);
+ if (new != (ls_cfg_base ^ ls_cfg_ssbd_mask))
+ return;
+
+ /*
+ * Leave ls_cfg_base with the bit clear. This is Xen's overall
+ * default, and it simplifies the context switch logic.
+ */
+ ls_cfg_base &= ~ls_cfg_ssbd_mask;
+ if ((new != ls_cfg_base) && wrmsr_safe(MSR_AMD64_LS_CFG, ls_cfg_base))
+ return;
+
+ /* LS_CFG appears to work fully. Lets choose to use it. */
+ setup_force_cpu_cap(X86_FEATURE_LEGACY_SSBD);
+}
+
/*
* Check for the presence of an AMD erratum. Arguments are defined in amd.h
* for each known erratum. Return 1 if erratum is found.
@@ -603,6 +659,9 @@ static void init_amd(struct cpuinfo_x86 *c)
c->x86_capability);
}
+ if (c == &boot_cpu_data)
+ amd_probe_legacy_ssbd();
+
/*
* If the user has explicitly chosen to disable Memory Disambiguation
* to mitigiate Speculative Store Bypass, poke the appropriate MSR.
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index af92866..40a71e2 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -260,7 +260,8 @@ static void __init print_details(enum ind_thunk thunk,
uint64_t caps)
thunk == THUNK_JMP ? "JMP" : "?",
!boot_cpu_has(X86_FEATURE_IBRSB) ? "No" :
(default_xen_spec_ctrl & SPEC_CTRL_IBRS) ? "IBRS+" : "IBRS-",
- !boot_cpu_has(X86_FEATURE_SSBD) ? "" :
+ !boot_cpu_has(X86_FEATURE_SSBD) ?
+ cpu_has_legacy_ssbd ? " LEGACY_SSBD" : "" :
(default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-",
opt_ibpb ? " IBPB" : "",
opt_l1d_flush ? " L1D_FLUSH" : "");
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index c2b0f6a..2923003 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -110,11 +110,17 @@
/* CPUID level 0x80000007.edx */
#define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC)
+/* CPUID level 0x80000008.ebx */
+#define cpu_has_amd_ssbd boot_cpu_has(X86_FEATURE_AMD_SSBD)
+#define cpu_has_virt_sc_ssbd boot_cpu_has(X86_FEATURE_VIRT_SC_SSBD)
+#define cpu_has_amd_ssb_no boot_cpu_has(X86_FEATURE_AMD_SSB_NO)
+
/* Synthesized. */
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
#define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF)
#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
+#define cpu_has_legacy_ssbd boot_cpu_has(X86_FEATURE_LEGACY_SSBD)
#define cpu_has_xen_lbr boot_cpu_has(X86_FEATURE_XEN_LBR)
enum _cache_type {
diff --git a/xen/include/asm-x86/cpufeatures.h
b/xen/include/asm-x86/cpufeatures.h
index 0c06274..2090613 100644
--- a/xen/include/asm-x86/cpufeatures.h
+++ b/xen/include/asm-x86/cpufeatures.h
@@ -25,6 +25,7 @@ XEN_CPUFEATURE(XEN_SMAP, (FSCAPINTS+0)*32+11) /* SMAP
gets used by Xen it
XEN_CPUFEATURE(LFENCE_DISPATCH, (FSCAPINTS+0)*32+12) /* lfence set as Dispatch
Serialising */
XEN_CPUFEATURE(IND_THUNK_LFENCE,(FSCAPINTS+0)*32+13) /* Use IND_THUNK_LFENCE */
XEN_CPUFEATURE(IND_THUNK_JMP, (FSCAPINTS+0)*32+14) /* Use IND_THUNK_JMP */
+XEN_CPUFEATURE(LEGACY_SSBD, (FSCAPINTS+0)*32+15) /* LS_CFG or
VIRT_SPEC_CTRL available for SSBD */
XEN_CPUFEATURE(SC_MSR_PV, (FSCAPINTS+0)*32+16) /* MSR_SPEC_CTRL used by
Xen for PV */
XEN_CPUFEATURE(SC_MSR_HVM, (FSCAPINTS+0)*32+17) /* MSR_SPEC_CTRL used by
Xen for HVM */
XEN_CPUFEATURE(SC_RSB_PV, (FSCAPINTS+0)*32+18) /* RSB overwrite needed
for PV */
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |