[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 3/3] amd/msr: implement VIRT_SPEC_CTRL for HVM guests using legacy SSBD
Expose VIRT_SPEC_CTRL to guests if the hardware supports setting SSBD in the LS_CFG MSR. Different AMD CPU families use different bits in LS_CFG, so exposing VIRT_SPEC_CTRL.SSBD allows for an unified way of setting SSBD on AMD hardware that's compatible migration wise, regardless of what underlying mechanism is used to set SSBD. Note that on AMD Family 17h (Zen 1) the value of SSBD in LS_CFG is shared between threads on the same core, so there's extra logic in order to synchronize the value and have SSBD set as long as one of the threads in the core requires it to be set. Such logic also requires extra storage for each thread state, which is allocated at initialization time. Do the context switching of the SSBD selection in LS_CFG between hypervisor and guest in the same handler that's already used to switch the value of VIRT_SPEC_CTRL in the hardware when supported. Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/cpu/amd.c | 113 +++++++++++++++++++++---- xen/arch/x86/hvm/svm/svm.c | 14 ++- xen/arch/x86/include/asm/amd.h | 3 + xen/arch/x86/include/asm/cpufeatures.h | 1 + xen/arch/x86/spec_ctrl.c | 4 +- 5 files changed, 115 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index c3fcc0e558..7318623874 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -685,24 +685,10 @@ void amd_init_lfence(struct cpuinfo_x86 *c) * Refer to the AMD Speculative Store Bypass whitepaper: * https://developer.amd.com/wp-content/resources/124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf */ -void amd_init_ssbd(const struct cpuinfo_x86 *c) +static bool set_legacy_ssbd(const struct cpuinfo_x86 *c, bool enable) { - struct cpu_info *info = get_cpu_info(); int bit = -1; - if (cpu_has_ssb_no) - return; - - if (cpu_has_amd_ssbd) { - /* Handled by common MSR_SPEC_CTRL logic */ - return; - } - - if (cpu_has_virt_ssbd) { - wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0); - goto out; - } - switch (c->x86) { case 0x15: bit = 54; break; case 0x16: bit = 33; break; @@ -716,26 +702,117 @@ void amd_init_ssbd(const struct cpuinfo_x86 *c) if (rdmsr_safe(MSR_AMD64_LS_CFG, val) || ({ val &= ~mask; - if (opt_ssbd) + if (enable) val |= mask; false; }) || wrmsr_safe(MSR_AMD64_LS_CFG, val) || ({ rdmsrl(MSR_AMD64_LS_CFG, val); - (val & mask) != (opt_ssbd * mask); + (val & mask) != (enable * mask); })) bit = -1; } - if (bit < 0) + return bit >= 0; +} + +void amd_init_ssbd(const struct cpuinfo_x86 *c) +{ + struct cpu_info *info = get_cpu_info(); + + if (cpu_has_ssb_no) + return; + + if (cpu_has_amd_ssbd) { + /* Handled by common MSR_SPEC_CTRL logic */ + return; + } + + if (cpu_has_virt_ssbd) { + wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0); + goto out; + } + + if (!set_legacy_ssbd(c, opt_ssbd)) { printk_once(XENLOG_ERR "No SSBD controls available\n"); + return; + } + + if (!smp_processor_id()) + setup_force_cpu_cap(X86_FEATURE_LEGACY_SSBD); out: info->last_spec_ctrl = info->xen_spec_ctrl = opt_ssbd ? SPEC_CTRL_SSBD : 0; } +static struct ssbd_core { + spinlock_t lock; + unsigned int count; +} *ssbd_core; +static unsigned int __read_mostly ssbd_max_cores; + +bool __init amd_setup_legacy_ssbd(void) +{ + unsigned int i; + + if (boot_cpu_data.x86 != 0x17 || boot_cpu_data.x86_num_siblings == 1) + return true; + + /* + * One could be forgiven for thinking that c->x86_max_cores is the + * correct value to use here. + * + * However, that value is derived from the current configuration, and + * c->cpu_core_id is sparse on all but the top end CPUs. Derive + * max_cpus from ApicIdCoreIdSize which will cover any sparseness. + */ + if (boot_cpu_data.extended_cpuid_level >= 0x80000008) { + ssbd_max_cores = 1u << MASK_EXTR(cpuid_ecx(0x80000008), 0xf000); + ssbd_max_cores /= boot_cpu_data.x86_num_siblings; + } + if (!ssbd_max_cores) + return false; + + /* Max is two sockets for Fam17h hardware. */ + ssbd_core = xzalloc_array(struct ssbd_core, ssbd_max_cores * 2); + if (!ssbd_core) + return false; + + for (i = 0; i < ssbd_max_cores * 2; i++) { + spin_lock_init(&ssbd_core[i].lock); + /* Record the current state. */ + ssbd_core[i].count = opt_ssbd ? + boot_cpu_data.x86_num_siblings : 0; + } + + return true; +} + +void amd_set_legacy_ssbd(bool enable) +{ + const struct cpuinfo_x86 *c = ¤t_cpu_data; + struct ssbd_core *core; + unsigned long flags; + + if (c->x86 != 0x17 || c->x86_num_siblings == 1) { + set_legacy_ssbd(c, enable); + return; + } + + ASSERT(c->phys_proc_id < 2); + ASSERT(c->cpu_core_id < ssbd_max_cores); + core = &ssbd_core[c->phys_proc_id * ssbd_max_cores + c->cpu_core_id]; + spin_lock_irqsave(&core->lock, flags); + core->count += enable ? 1 : -1; + ASSERT(core->count <= c->x86_num_siblings); + if ((enable && core->count == 1) || + (!enable && core->count == 0)) + BUG_ON(!set_legacy_ssbd(c, enable)); + spin_unlock_irqrestore(&core->lock, flags); +} + void __init detect_zen2_null_seg_behaviour(void) { uint64_t base; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 56c7b30b32..10a5a77ad7 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -3134,6 +3134,15 @@ void vmexit_virt_spec_ctrl(void) wrmsr(MSR_VIRT_SPEC_CTRL, val, 0); curr->arch.msrs->spec_ctrl.raw = lo; info->last_spec_ctrl = val; + + return; + } + + ASSERT(boot_cpu_has(X86_FEATURE_LEGACY_SSBD)); + if ( val != info->last_spec_ctrl ) + { + amd_set_legacy_ssbd(val & SPEC_CTRL_SSBD); + info->last_spec_ctrl = val; } } @@ -3147,7 +3156,10 @@ void vmentry_virt_spec_ctrl(void) ASSERT(!boot_cpu_has(X86_FEATURE_SC_MSR_HVM)); if ( val != info->last_spec_ctrl ) { - wrmsr(MSR_VIRT_SPEC_CTRL, val, 0); + if ( boot_cpu_has(X86_FEATURE_LEGACY_SSBD) ) + amd_set_legacy_ssbd(val & SPEC_CTRL_SSBD); + else + wrmsr(MSR_VIRT_SPEC_CTRL, val, 0); info->last_spec_ctrl = val; } diff --git a/xen/arch/x86/include/asm/amd.h b/xen/arch/x86/include/asm/amd.h index a82382e6bf..823e2f3bd2 100644 --- a/xen/arch/x86/include/asm/amd.h +++ b/xen/arch/x86/include/asm/amd.h @@ -151,4 +151,7 @@ void check_enable_amd_mmconf_dmi(void); extern bool amd_acpi_c1e_quirk; void amd_check_disable_c1e(unsigned int port, u8 value); +bool amd_setup_legacy_ssbd(void); +void amd_set_legacy_ssbd(bool enable); + #endif /* __AMD_H__ */ diff --git a/xen/arch/x86/include/asm/cpufeatures.h b/xen/arch/x86/include/asm/cpufeatures.h index a2c37bfdd4..f12d423fe9 100644 --- a/xen/arch/x86/include/asm/cpufeatures.h +++ b/xen/arch/x86/include/asm/cpufeatures.h @@ -40,6 +40,7 @@ XEN_CPUFEATURE(SC_VERW_HVM, X86_SYNTH(24)) /* VERW used by Xen for HVM */ XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */ XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */ XEN_CPUFEATURE(VIRT_SC_MSR_HVM, X86_SYNTH(27)) /* MSR_VIRT_SPEC_CTRL exposed to HVM */ +XEN_CPUFEATURE(LEGACY_SSBD, X86_SYNTH(28)) /* LS_CFG available for SSBD */ /* Bug words follow the synthetic words. */ #define X86_NR_BUG 1 diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index 2c46e1485f..c7f8ec29f4 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -22,6 +22,7 @@ #include <xen/param.h> #include <xen/warning.h> +#include <asm/amd.h> #include <asm/hvm/svm/svm.h> #include <asm/microcode.h> #include <asm/msr.h> @@ -1056,7 +1057,8 @@ void __init init_speculation_mitigations(void) setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM); } - if ( opt_msr_sc_hvm && cpu_has_virt_ssbd ) + if ( opt_msr_sc_hvm && (cpu_has_virt_ssbd || + (boot_cpu_has(X86_FEATURE_LEGACY_SSBD) && amd_setup_legacy_ssbd())) ) setup_force_cpu_cap(X86_FEATURE_VIRT_SC_MSR_HVM); /* If we have IBRS available, see whether we should use it. */ -- 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |