[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging-4.19] x86/cpu-policy: Rearrange guest_common_*_feature_adjustments()



commit d45dfbcbc6797d038eca754aa2fe19d0b93a67b4
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Jun 27 17:19:19 2025 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Mon Jul 7 22:52:15 2025 +0100

    x86/cpu-policy: Rearrange guest_common_*_feature_adjustments()
    
    Turn the if()s into switch()es, as we're going to need AMD sections.
    
    Move the RTM adjustments into the Intel section, where they ought to live.
    
    No functional change.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    (cherry picked from commit 7c5025394dafe4b788ff9e1afc688f6becab9300)
---
 xen/arch/x86/cpu-policy.c | 69 +++++++++++++++++++++++++----------------------
 1 file changed, 37 insertions(+), 32 deletions(-)

diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
index e34cba189c..af2b4d7fa0 100644
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -415,8 +415,9 @@ static void __init guest_common_default_leaves(struct 
cpu_policy *p)
 
 static void __init guest_common_max_feature_adjustments(uint32_t *fs)
 {
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    switch ( boot_cpu_data.x86_vendor )
     {
+    case X86_VENDOR_INTEL:
         /*
          * MSR_ARCH_CAPS is just feature data, and we can offer it to guests
          * unconditionally, although limit it to Intel systems as it is highly
@@ -461,6 +462,22 @@ static void __init 
guest_common_max_feature_adjustments(uint32_t *fs)
              boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X &&
              raw_cpu_policy.feat.clwb )
             __set_bit(X86_FEATURE_CLWB, fs);
+
+        /*
+         * To mitigate Native-BHI, one option is to use a TSX Abort on capable
+         * systems.  This is safe even if RTM has been disabled for other
+         * reasons via MSR_TSX_{CTRL,FORCE_ABORT}.  However, a guest kernel
+         * doesn't get to know this type of information.
+         *
+         * Therefore the meaning of RTM_ALWAYS_ABORT has been adjusted, to
+         * instead mean "XBEGIN won't fault".  This is enough for a guest
+         * kernel to make an informed choice WRT mitigating Native-BHI.
+         *
+         * If RTM-capable, we can run a VM which has seen RTM_ALWAYS_ABORT.
+         */
+        if ( test_bit(X86_FEATURE_RTM, fs) )
+            __set_bit(X86_FEATURE_RTM_ALWAYS_ABORT, fs);
+        break;
     }
 
     /*
@@ -472,27 +489,13 @@ static void __init 
guest_common_max_feature_adjustments(uint32_t *fs)
      */
     __set_bit(X86_FEATURE_HTT, fs);
     __set_bit(X86_FEATURE_CMP_LEGACY, fs);
-
-    /*
-     * To mitigate Native-BHI, one option is to use a TSX Abort on capable
-     * systems.  This is safe even if RTM has been disabled for other reasons
-     * via MSR_TSX_{CTRL,FORCE_ABORT}.  However, a guest kernel doesn't get to
-     * know this type of information.
-     *
-     * Therefore the meaning of RTM_ALWAYS_ABORT has been adjusted, to instead
-     * mean "XBEGIN won't fault".  This is enough for a guest kernel to make
-     * an informed choice WRT mitigating Native-BHI.
-     *
-     * If RTM-capable, we can run a VM which has seen RTM_ALWAYS_ABORT.
-     */
-    if ( test_bit(X86_FEATURE_RTM, fs) )
-        __set_bit(X86_FEATURE_RTM_ALWAYS_ABORT, fs);
 }
 
 static void __init guest_common_default_feature_adjustments(uint32_t *fs)
 {
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    switch ( boot_cpu_data.x86_vendor )
     {
+    case X86_VENDOR_INTEL:
         /*
          * IvyBridge client parts suffer from leakage of RDRAND data due to 
SRBDS
          * (XSA-320 / CVE-2020-0543), and won't be receiving microcode to
@@ -536,6 +539,23 @@ static void __init 
guest_common_default_feature_adjustments(uint32_t *fs)
              boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X &&
              raw_cpu_policy.feat.clwb )
             __clear_bit(X86_FEATURE_CLWB, fs);
+
+        /*
+         * On certain hardware, speculative or errata workarounds can result
+         * in TSX being placed in "force-abort" mode, where it doesn't
+         * actually function as expected, but is technically compatible with
+         * the ISA.
+         *
+         * Do not advertise RTM to guests by default if it won't actually
+         * work.  Instead, advertise RTM_ALWAYS_ABORT indicating that TSX
+         * Aborts are safe to use, e.g. for mitigating Native-BHI.
+         */
+        if ( rtm_disabled )
+        {
+            __clear_bit(X86_FEATURE_RTM, fs);
+            __set_bit(X86_FEATURE_RTM_ALWAYS_ABORT, fs);
+        }
+        break;
     }
 
     /*
@@ -547,21 +567,6 @@ static void __init 
guest_common_default_feature_adjustments(uint32_t *fs)
 
     if ( !cpu_has_cmp_legacy )
         __clear_bit(X86_FEATURE_CMP_LEGACY, fs);
-
-    /*
-     * On certain hardware, speculative or errata workarounds can result in
-     * TSX being placed in "force-abort" mode, where it doesn't actually
-     * function as expected, but is technically compatible with the ISA.
-     *
-     * Do not advertise RTM to guests by default if it won't actually work.
-     * Instead, advertise RTM_ALWAYS_ABORT indicating that TSX Aborts are safe
-     * to use, e.g. for mitigating Native-BHI.
-     */
-    if ( rtm_disabled )
-    {
-        __clear_bit(X86_FEATURE_RTM, fs);
-        __set_bit(X86_FEATURE_RTM_ALWAYS_ABORT, fs);
-    }
 }
 
 static void __init guest_common_feature_adjustments(uint32_t *fs)
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.19



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.