[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 21/25] x86/cpuid: Move all leaf 7 handling into guest_cpuid()



All per-domain policy data concerning leaf 7 is accurate.  Handle it all in
guest_cpuid() by reading out of the raw array block, and introduing a dynamic
adjustment for OSPKE.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>

v2:
 * Extend the comment concerning dynamic adjustments out of current context.
---
 xen/arch/x86/cpuid.c        | 46 +++++++++++++++++++++++++++++++++++++++++++--
 xen/arch/x86/hvm/hvm.c      | 17 ++++-------------
 xen/arch/x86/traps.c        | 28 ++++-----------------------
 xen/include/asm-x86/cpuid.h |  2 ++
 4 files changed, 54 insertions(+), 39 deletions(-)

diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index d5a3bae..44f52cf 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -363,6 +363,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
      * First pass:
      * - Perform max_leaf/subleaf calculations.  Out-of-range leaves return
      *   all zeros, following the AMD model.
+     * - Fill in *res for leaves no longer handled on the legacy path.
      * - Dispatch the virtualised leaves to their respective handlers.
      */
     switch ( leaf )
@@ -376,12 +377,18 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
         case 0x7:
             if ( subleaf > p->feat.max_subleaf )
                 return;
+
+            BUG_ON(subleaf >= ARRAY_SIZE(p->feat.raw));
+            *res = p->feat.raw[subleaf];
             break;
 
         case XSTATE_CPUID:
             if ( subleaf > ARRAY_SIZE(p->xstate.raw) )
                 return;
-            break;
+
+            /* Fallthrough. */
+        default:
+            goto legacy;
         }
         break;
 
@@ -396,12 +403,47 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
     case 0x80000000 ... 0x80000000 + CPUID_GUEST_NR_EXTD - 1:
         if ( leaf > p->extd.max_leaf )
             return;
-        break;
+        goto legacy;
 
     default:
         return;
     }
 
+    /*
+     * Skip dynamic adjustments if we are in the wrong context.
+     *
+     * All dynamic adjustments depends on current register state, which will
+     * be stale if the vcpu is running elsewhere.  It is simpler, quicker, and
+     * more reliable for the caller to nothing (consistently) than to hand
+     * back stale data which it can't use safely.
+     */
+    if ( v != curr )
+        return;
+
+    /*
+     * Second pass:
+     * - Dynamic adjustments
+     */
+    switch ( leaf )
+    {
+    case 0x7:
+        switch ( subleaf )
+        {
+        case 0:
+            /* OSPKE clear in policy.  Fast-forward CR4 back in. */
+            if ( (is_pv_vcpu(v)
+                  ? v->arch.pv_vcpu.ctrlreg[4]
+                  : v->arch.hvm_vcpu.guest_cr[4]) & X86_CR4_PKE )
+                res->c |= cpufeat_mask(X86_FEATURE_OSPKE);
+            break;
+        }
+        break;
+    }
+
+    /* Done. */
+    return;
+
+ legacy:
     /* {hvm,pv}_cpuid() have this expectation. */
     ASSERT(v == curr);
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index fb8f3d9..f706aad 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3355,19 +3355,6 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, 
unsigned int *ebx,
 
         break;
 
-    case 0x7:
-        if ( count == 0 )
-        {
-            *ebx = d->arch.cpuid->feat._7b0;
-            *ecx = d->arch.cpuid->feat._7c0;
-            *edx = d->arch.cpuid->feat._7d0;
-
-            /* OSPKE clear in policy.  Fast-forward CR4 back in. */
-            if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE )
-                *ecx |= cpufeat_mask(X86_FEATURE_OSPKE);
-        }
-        break;
-
     case 0xb:
         /* Fix the x2APIC identifier. */
         *edx = v->vcpu_id * 2;
@@ -3544,6 +3531,10 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, 
unsigned int *ebx,
         else
             *eax = 0;
         break;
+
+    case 0x7:
+        ASSERT_UNREACHABLE();
+        /* Now handled in guest_cpuid(). */
     }
 }
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 443948c..47d313a 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1179,30 +1179,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
         }
         break;
 
-    case 0x00000007:
-        if ( subleaf == 0 )
-        {
-            b = currd->arch.cpuid->feat._7b0;
-            c = currd->arch.cpuid->feat._7c0;
-            d = currd->arch.cpuid->feat._7d0;
-
-            if ( !is_pvh_domain(currd) )
-            {
-                /*
-                 * Delete the PVH condition when HVMLite formally replaces PVH,
-                 * and HVM guests no longer enter a PV codepath.
-                 */
-
-                /* OSPKE clear in policy.  Fast-forward CR4 back in. */
-                if ( curr->arch.pv_vcpu.ctrlreg[4] & X86_CR4_PKE )
-                    c |= cpufeat_mask(X86_FEATURE_OSPKE);
-            }
-        }
-        else
-            b = c = d = 0;
-        a = 0;
-        break;
-
     case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
         if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
              !vpmu_enabled(curr) )
@@ -1304,6 +1280,10 @@ void pv_cpuid(struct cpu_user_regs *regs)
     unsupported:
         a = b = c = d = 0;
         break;
+
+    case 0x7:
+        ASSERT_UNREACHABLE();
+        /* Now handled in guest_cpuid(). */
     }
 
     regs->rax = a;
diff --git a/xen/include/asm-x86/cpuid.h b/xen/include/asm-x86/cpuid.h
index b2ed725..c219f3c 100644
--- a/xen/include/asm-x86/cpuid.h
+++ b/xen/include/asm-x86/cpuid.h
@@ -81,12 +81,14 @@ struct cpuid_policy
      *   - {xcr0,xss}_{high,low}
      *
      * - Guest accurate:
+     *   - All of the feat union
      *   - max_{,sub}leaf
      *   - All FEATURESET_* words
      *
      * Per-domain objects:
      *
      * - Guest accurate:
+     *   - All of the feat union
      *   - max_{,sub}leaf
      *   - All FEATURESET_* words
      *
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.