[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 4/4] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept()


  • To: Xenia Ragiadakou <burzalodowa@xxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Tue, 28 Feb 2023 15:58:10 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=wDCubEl21OJh4mW3wa05ewPX+PuTGVFIYqXsHA1+OUM=; b=QzaewnYW+uYOnnhzGdxbFjiv0V3sNkvxR4pQHl5qaqLBcE1lLTqJLOJbWdDBEy5ngQk3DscyZvJc8+Sub6VbqxGPL/tdjVzjSUoqlp/N4RcNfKLBAeE7AiBquCJ49yt4tR0oBK7CrpXJ3iLb/OTg/64rbjK9vReh/Ye2YAuoZ7FBquwLOz+8t9eNn8Y99PLKati+LdgWKGAAL+UfwctnHhw7CLTQftnKonA+7Szcqo5BoJrXRk3MxGSphXHeJsRdO5BJ56uvLLwqDdKkeaVFQOFSriTL532tsbw2YiikGB6EqpsbyS70A1iaPOT5Efy5dLYSKfZSEjmECybWHKnxsg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=JgHEE5U415W/qfOkw8em3sWbulikXJVZxIY4Y+AjgrE3HEmmc4HniPUDW2GKh7K2eroocvNd8LRcgl75Y/d/2UmFK6/KIlcOsd7Kkzxcho+mPYsfpogO6T6yUeTOEMw8vB+23Qg6lwWKQsxXW9G0GwYxGqYWEaDyZSOCTenLco5NCmA/YvY3tRBgZlUNp3uXMT/SI4XFM9VyNUTx0pjs53tb4ggwMQFHh2/4jE+lAHDPD1TMCicjTtgncJ/hvAQBpbJdbiMiZrq6U2U4qWwNVEQ4/lAE9WiAFtQ0GKJqk9xnLRcF/vT9tA4IeMBe4yJP/CAaRYzPfQDZLiURTr53SQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Jun Nakajima <jun.nakajima@xxxxxxxxx>, Kevin Tian <kevin.tian@xxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
  • Delivery-date: Tue, 28 Feb 2023 14:58:26 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 27.02.2023 08:56, Xenia Ragiadakou wrote:
> Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
> intercept in common vpmu code.

What is this going to buy us? All calls ...

> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -165,9 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
>  
>      for ( i = 0; i < num_counters; i++ )
>      {
> -        svm_clear_msr_intercept(v, counters[i], MSR_RW);
> -        svm_set_msr_intercept(v, ctrls[i], MSR_W);
> -        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
> +        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
> +        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
> +        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
>      }
>  
>      msr_bitmap_on(vpmu);
> @@ -180,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
>  
>      for ( i = 0; i < num_counters; i++ )
>      {
> -        svm_set_msr_intercept(v, counters[i], MSR_RW);
> -        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
> +        hvm_set_msr_intercept(v, counters[i], MSR_RW);
> +        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
>      }
>  
>      msr_bitmap_off(vpmu);

... here will got to the SVM functions anyway, while ...

> --- a/xen/arch/x86/cpu/vpmu_intel.c
> +++ b/xen/arch/x86/cpu/vpmu_intel.c
> @@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
>  
>      /* Allow Read/Write PMU Counters MSR Directly. */
>      for ( i = 0; i < fixed_pmc_cnt; i++ )
> -        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
> +        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>  
>      for ( i = 0; i < arch_pmc_cnt; i++ )
>      {
> -        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
> +        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>  
>          if ( full_width_write )
> -            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
> +            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>      }
>  
>      /* Allow Read PMU Non-global Controls Directly. */
>      for ( i = 0; i < arch_pmc_cnt; i++ )
> -        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
> +        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>  
> -    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> -    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
> +    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> +    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>  }
>  
>  static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
> @@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
>      unsigned int i;
>  
>      for ( i = 0; i < fixed_pmc_cnt; i++ )
> -        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
> +        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>  
>      for ( i = 0; i < arch_pmc_cnt; i++ )
>      {
> -        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
> +        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>  
>          if ( full_width_write )
> -            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
> +            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>      }
>  
>      for ( i = 0; i < arch_pmc_cnt; i++ )
> -        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
> +        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>  
> -    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> -    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
> +    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
> +    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>  }
>  
>  static inline void __core2_vpmu_save(struct vcpu *v)

... all calls here will go to VMX'es. For making either vpmu_<vendor>.c
build without that vendor's virtualization enabled, isn't it all it
takes to have ...

> @@ -916,6 +932,18 @@ static inline void hvm_set_reg(struct vcpu *v, unsigned 
> int reg, uint64_t val)
>      ASSERT_UNREACHABLE();
>  }
>  
> +static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
> +                                         int flags)
> +{
> +    ASSERT_UNREACHABLE();
> +}
> +
> +static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
> +                                           int flags)
> +{
> +    ASSERT_UNREACHABLE();
> +}

... respective SVM and VMX stubs in place instead?

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.