[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 3/3] x86: Add support for AMD's Automatic IBRS



In cases where AutoIBRS is supported by the host:

* Prefer AutoIBRS to retpolines as BTI mitigation in heuristics
  calculations.
* Always enable AutoIBRS if IBRS is chosen as a BTI mitigation.
* Avoid stuffing the RAS/RSB on VMEXIT if AutoIBRS is enabled.

Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
---
v2:
  * Gated CPUID read to e21a by the presence the leaf
  * Add auto-ibrs to trampoline_efer if chosen
  * Remove smpboot.c modifications, as they are not needed after
    trampoline_efer is modified
  * Avoid the AutoIBRS delay as it doesn't provide any benefit.
---
 xen/arch/x86/spec_ctrl.c | 45 ++++++++++++++++++++++++++++++----------
 1 file changed, 34 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 50d467f74c..36231e65fb 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -390,7 +390,7 @@ custom_param("pv-l1tf", parse_pv_l1tf);
 
 static void __init print_details(enum ind_thunk thunk)
 {
-    unsigned int _7d0 = 0, _7d2 = 0, e8b = 0, max = 0, tmp;
+    unsigned int _7d0 = 0, _7d2 = 0, e8b = 0, e21a = 0, max = 0, tmp;
     uint64_t caps = 0;
 
     /* Collect diagnostics about available mitigations. */
@@ -400,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk)
         cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2);
     if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 )
         cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp);
+    if ( boot_cpu_data.extended_cpuid_level >= 0x80000021 )
+        cpuid(0x80000021, &e21a, &tmp, &tmp, &tmp);
     if ( cpu_has_arch_caps )
         rdmsrl(MSR_ARCH_CAPABILITIES, caps);
 
@@ -430,11 +432,12 @@ static void __init print_details(enum ind_thunk thunk)
            (e8b  & cpufeat_mask(X86_FEATURE_IBPB_RET))       ? " IBPB_RET"     
  : "");
 
     /* Hardware features which need driving to mitigate issues. */
-    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
            (e8b  & cpufeat_mask(X86_FEATURE_IBPB)) ||
            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB))          ? " IBPB"         
  : "",
            (e8b  & cpufeat_mask(X86_FEATURE_IBRS)) ||
            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB))          ? " IBRS"         
  : "",
+           (e21a & cpufeat_mask(X86_FEATURE_AUTO_IBRS))      ? " AUTO_IBRS"    
  : "",
            (e8b  & cpufeat_mask(X86_FEATURE_AMD_STIBP)) ||
            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP))          ? " STIBP"        
  : "",
            (e8b  & cpufeat_mask(X86_FEATURE_AMD_SSBD)) ||
@@ -468,7 +471,9 @@ static void __init print_details(enum ind_thunk thunk)
            thunk == THUNK_JMP       ? "JMP" : "?",
            (!boot_cpu_has(X86_FEATURE_IBRSB) &&
             !boot_cpu_has(X86_FEATURE_IBRS))         ? "No" :
-           (default_xen_spec_ctrl & SPEC_CTRL_IBRS)  ? "IBRS+" :  "IBRS-",
+           (cpu_has_auto_ibrs &&
+            (default_xen_spec_ctrl & SPEC_CTRL_IBRS)) ? "AUTO_IBRS+" :
+            (default_xen_spec_ctrl & SPEC_CTRL_IBRS)  ? "IBRS+" : "IBRS-",
            (!boot_cpu_has(X86_FEATURE_STIBP) &&
             !boot_cpu_has(X86_FEATURE_AMD_STIBP))    ? "" :
            (default_xen_spec_ctrl & SPEC_CTRL_STIBP) ? " STIBP+" : " STIBP-",
@@ -1150,15 +1155,20 @@ void __init init_speculation_mitigations(void)
     }
     else
     {
-        /*
-         * Evaluate the safest Branch Target Injection mitigations to use.
-         * First, begin with compiler-aided mitigations.
-         */
-        if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) )
+        /* Evaluate the safest BTI mitigations with lowest overhead */
+        if ( cpu_has_auto_ibrs )
+        {
+            /*
+             * We'd rather use Automatic IBRS if present. It helps in order
+             * to avoid stuffing the RSB manually on every VMEXIT.
+             */
+            ibrs = true;
+        }
+        else if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) )
         {
             /*
-             * On all hardware, we'd like to use retpoline in preference to
-             * IBRS, but only if it is safe on this hardware.
+             * Otherwise, we'd like to use retpoline in preference to
+             * plain IBRS, but only if it is safe on this hardware.
              */
             if ( retpoline_safe() )
                 thunk = THUNK_RETPOLINE;
@@ -1357,7 +1367,9 @@ void __init init_speculation_mitigations(void)
      */
     if ( opt_rsb_hvm )
     {
-        setup_force_cpu_cap(X86_FEATURE_SC_RSB_HVM);
+        /* Automatic IBRS wipes the RSB for us on VMEXIT */
+        if ( !(ibrs && cpu_has_auto_ibrs) )
+            setup_force_cpu_cap(X86_FEATURE_SC_RSB_HVM);
 
         /*
          * For SVM, Xen's RSB safety actions are performed before STGI, so
@@ -1594,6 +1606,17 @@ void __init init_speculation_mitigations(void)
             barrier();
         }
 
+        /*
+         * If we're to use AutoIBRS, then set it now for the BSP and mark
+         * it in trampoline_efer so it's picked up by the wakeup code. It
+         * will be used while starting up the APs and during S3 wakeups.
+         */
+        if ( ibrs && cpu_has_auto_ibrs )
+        {
+            write_efer(read_efer() | EFER_AIBRSE);
+            bootsym(trampoline_efer) |= EFER_AIBRSE;
+        }
+
         val = bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl;
 
         wrmsrl(MSR_SPEC_CTRL, val);
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.