[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 4/4] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept()




On 2/28/23 16:58, Jan Beulich wrote:
On 27.02.2023 08:56, Xenia Ragiadakou wrote:
Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
intercept in common vpmu code.

What is this going to buy us? All calls ...

--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -165,9 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
for ( i = 0; i < num_counters; i++ )
      {
-        svm_clear_msr_intercept(v, counters[i], MSR_RW);
-        svm_set_msr_intercept(v, ctrls[i], MSR_W);
-        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
+        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
+        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
+        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
      }
msr_bitmap_on(vpmu);
@@ -180,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
for ( i = 0; i < num_counters; i++ )
      {
-        svm_set_msr_intercept(v, counters[i], MSR_RW);
-        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
+        hvm_set_msr_intercept(v, counters[i], MSR_RW);
+        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
      }
msr_bitmap_off(vpmu);

... here will got to the SVM functions anyway, while ...

--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
/* Allow Read/Write PMU Counters MSR Directly. */
      for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
for ( i = 0; i < arch_pmc_cnt; i++ )
      {
-        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
if ( full_width_write )
-            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
      }
/* Allow Read PMU Non-global Controls Directly. */
      for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
- vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
-    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
  }
static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
      unsigned int i;
for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
for ( i = 0; i < arch_pmc_cnt; i++ )
      {
-        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
if ( full_width_write )
-            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
      }
for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
- vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
-    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
  }
static inline void __core2_vpmu_save(struct vcpu *v)

... all calls here will go to VMX'es. For making either vpmu_<vendor>.c
build without that vendor's virtualization enabled, isn't it all it
takes to have ...

@@ -916,6 +932,18 @@ static inline void hvm_set_reg(struct vcpu *v, unsigned 
int reg, uint64_t val)
      ASSERT_UNREACHABLE();
  }
+static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
+                                         int flags)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
+                                           int flags)
+{
+    ASSERT_UNREACHABLE();
+}

... respective SVM and VMX stubs in place instead?

IMO it is more readable and they looked very good candidates for being abstracted because they are doing the same thing under both technologies. Are you suggesting that their usage in common code should be discouraged and should not be exported via the hvm_funcs interface? Or just that the amount of changes cannot be justified. IIUC Andrew also suggested to use hvm_funcs for msr intercept handling but I 'm not sure whether he had this or sth else in mind.

--
Xenia



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.