[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH v4 3/6] x86/monitor: wrap monitor_op under CONFIG_VM_EVENT


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
  • From: "Penny, Zheng" <penny.zheng@xxxxxxx>
  • Date: Fri, 16 Jan 2026 07:39:04 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass header.d=amd.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=HGTSZ8piwgHaKl1GOzMZjZyoyyCvYA907ZSIxn3VuzY=; b=cS5dD8akyt3XahQYRGTVPd7UEohkTU8zaHCo1HHxCAZkC4gIzc436ZdTigOM6vamASn4yyxPVMHik6aAG4XU7+tKfFCpa2SMvTiwTu7Q2f58FBJFnq5bvwaWA6I+TI9vUq0kHp0YzPint26K7c8JEGNHLSUAZIJBN5Y4tTJ4USDXK8tja86olpkgSmyVgiq6rxdcaFfe4Nbgb5r0w907c8us594FlyUtqANvTnuK/Bux1fUXzRUmFDtsVP/a0aH02JLvvJhmzYBvfN3vgQll5WezREQAX/OkH/R2vGyEL8+bu+K06aCEcTcfsS1dZUgXsyxmxASLU15HEG4if38JKw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=i3i41AWBbuBycFApyXWQYU83XjB9cnR9j0ijo/zwKyyl8/9h9nIUMrYwIsUv4EdoR90PJo9RVrW4T7MdOLW+zHLnEsXgHv0BS91DbdKHAAUMnyruuCMuddr4D0uVFijMrvgzBqrYwt0IHz31iJn6j3UQcedkJf0HJO+6fg844zPHZ9eJzMZW0segHAlAF9pMG9U5VD7fXlncTNt8kxdFsczQQBRkB9lLKNUXmzd0H9+MR4Z84Y8NuEGQKU//cAM1H3ssS5UlkM+V/Amogyx9FGD7YyCgStA214X0JNbcHxPw5jLRppkHvmDNUCUITRC3eXCZ1KnAETKgJnZv+ZvukA==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=amd.com;
  • Delivery-date: Fri, 16 Jan 2026 07:39:21 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Msip_labels: MSIP_Label_f265efc6-e181-49d6-80f4-fae95cf838a0_Enabled=True;MSIP_Label_f265efc6-e181-49d6-80f4-fae95cf838a0_SiteId=3dd8961f-e488-4e60-8e11-a82d994e183d;MSIP_Label_f265efc6-e181-49d6-80f4-fae95cf838a0_SetDate=2026-01-16T07:38:56.0000000Z;MSIP_Label_f265efc6-e181-49d6-80f4-fae95cf838a0_Name=Open Source;MSIP_Label_f265efc6-e181-49d6-80f4-fae95cf838a0_ContentBits=3;MSIP_Label_f265efc6-e181-49d6-80f4-fae95cf838a0_Method=Privileged
  • Thread-index: AQHchgFzx+i/rzGyyUiVB7q9IfPMErVUadBg
  • Thread-topic: [PATCH v4 3/6] x86/monitor: wrap monitor_op under CONFIG_VM_EVENT

[Public]

Hi, Tamas

May I ask for a review on this commit?

Many thanks
Penny Zheng

> -----Original Message-----
> From: Penny, Zheng <penny.zheng@xxxxxxx>
> Sent: Thursday, January 15, 2026 5:29 PM
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx; Andryuk, Jason <Jason.Andryuk@xxxxxxx>
> Cc: Huang, Ray <Ray.Huang@xxxxxxx>; Penny, Zheng
> <penny.zheng@xxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>; Andrew Cooper
> <andrew.cooper3@xxxxxxxxxx>; Roger Pau Monné <roger.pau@xxxxxxxxxx>; Tamas
> K Lengyel <tamas@xxxxxxxxxxxxx>; Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>;
> Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
> Subject: [PATCH v4 3/6] x86/monitor: wrap monitor_op under CONFIG_VM_EVENT
>
> Feature monitor_op is based on vm event subsystem, so monitor.o shall be
> wrapped under CONFIG_VM_EVENT.
> The following functions are only invoked by monitor-op, so they all shall be 
> wrapped
> with CONFIG_VM_EVENT (otherwise they will become unreachable and violate
> Misra rule 2.1 when VM_EVENT=n):
> - hvm_enable_msr_interception
>   - hvm_function_table.enable_msr_interception
> - hvm_has_set_descriptor_access_existing
>   - hvm_function_table.set_descriptor_access_existi
> - arch_monitor_get_capabilities
> Function monitored_msr() still needs a stub to pass compilation when
> VM_EVENT=n.
>
> Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
> Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
> Reviewed-by: Jason Andryuk <jason.andryuk@xxxxxxx>
> ---
>  xen/arch/x86/hvm/Makefile          |  2 +-
>  xen/arch/x86/hvm/svm/svm.c         |  8 +++++++-
>  xen/arch/x86/hvm/vmx/vmx.c         | 10 ++++++++++
>  xen/arch/x86/include/asm/hvm/hvm.h | 18 +++++++++++-------
> xen/arch/x86/include/asm/monitor.h |  9 +++++++++
>  5 files changed, 38 insertions(+), 9 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile index
> 1b97bdc624..ee4b45a4ee 100644
> --- a/xen/arch/x86/hvm/Makefile
> +++ b/xen/arch/x86/hvm/Makefile
> @@ -16,7 +16,7 @@ obj-y += io.o
>  obj-y += ioreq.o
>  obj-y += irq.o
>  obj-y += mmio.o
> -obj-y += monitor.o
> +obj-$(CONFIG_VM_EVENT) += monitor.o
>  obj-y += mtrr.o
>  obj-y += nestedhvm.o
>  obj-y += pmtimer.o
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index
> 21f355a657..5d23603fc1 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -297,6 +297,7 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int
> flags)
>          __clear_bit(msr * 2 + 1, msr_bit);  }
>
> +#ifdef CONFIG_VM_EVENT
>  static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t 
> msr)
> {
>      struct vcpu *v;
> @@ -304,6 +305,7 @@ static void cf_check svm_enable_msr_interception(struct
> domain *d, uint32_t msr)
>      for_each_vcpu ( d, v )
>          svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);  }
> +#endif /* CONFIG_VM_EVENT */
>
>  static void svm_save_dr(struct vcpu *v)  { @@ -824,6 +826,7 @@ static void
> cf_check svm_set_rdtsc_exiting(struct vcpu *v, bool enable)
>      vmcb_set_general2_intercepts(vmcb, general2_intercepts);  }
>
> +#ifdef CONFIG_VM_EVENT
>  static void cf_check svm_set_descriptor_access_exiting(
>      struct vcpu *v, bool enable)
>  {
> @@ -841,6 +844,7 @@ static void cf_check svm_set_descriptor_access_exiting(
>
>      vmcb_set_general1_intercepts(vmcb, general1_intercepts);  }
> +#endif /* CONFIG_VM_EVENT */
>
>  static unsigned int cf_check svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
> { @@ -2454,9 +2458,11 @@ static struct hvm_function_table 
> __initdata_cf_clobber
> svm_function_table = {
>      .fpu_dirty_intercept  = svm_fpu_dirty_intercept,
>      .msr_read_intercept   = svm_msr_read_intercept,
>      .msr_write_intercept  = svm_msr_write_intercept,
> +#ifdef CONFIG_VM_EVENT
>      .enable_msr_interception = svm_enable_msr_interception,
> -    .set_rdtsc_exiting    = svm_set_rdtsc_exiting,
>      .set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
> +#endif
> +    .set_rdtsc_exiting    = svm_set_rdtsc_exiting,
>      .get_insn_bytes       = svm_get_insn_bytes,
>
>      .nhvm_vcpu_initialise = nsvm_vcpu_initialise, diff --git
> a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index
> 89f9d9c7f6..40e4c71244 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1581,6 +1581,7 @@ static void cf_check vmx_set_rdtsc_exiting(struct vcpu
> *v, bool enable)
>      vmx_vmcs_exit(v);
>  }
>
> +#ifdef CONFIG_VM_EVENT
>  static void cf_check vmx_set_descriptor_access_exiting(
>      struct vcpu *v, bool enable)
>  {
> @@ -1595,6 +1596,7 @@ static void cf_check
> vmx_set_descriptor_access_exiting(
>      vmx_update_secondary_exec_control(v);
>      vmx_vmcs_exit(v);
>  }
> +#endif /* CONFIG_VM_EVENT */
>
>  static void cf_check vmx_init_hypercall_page(void *p)  { @@ -2474,6 +2476,7 
> @@
> static void cf_check vmx_handle_eoi(uint8_t vector, int isr)
>          printk_once(XENLOG_WARNING "EOI for %02x but SVI=%02x\n", vector,
> old_svi);  }
>
> +#ifdef CONFIG_VM_EVENT
>  static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t 
> msr)
> {
>      struct vcpu *v;
> @@ -2481,6 +2484,7 @@ static void cf_check
> vmx_enable_msr_interception(struct domain *d, uint32_t msr)
>      for_each_vcpu ( d, v )
>          vmx_set_msr_intercept(v, msr, VMX_MSR_W);  }
> +#endif /* CONFIG_VM_EVENT */
>
>  #ifdef CONFIG_ALTP2M
>
> @@ -2932,7 +2936,9 @@ static struct hvm_function_table __initdata_cf_clobber
> vmx_function_table = {
>      .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
>      .update_vlapic_mode = vmx_vlapic_msr_changed,
>      .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
> +#ifdef CONFIG_VM_EVENT
>      .enable_msr_interception = vmx_enable_msr_interception,
> +#endif
>  #ifdef CONFIG_ALTP2M
>      .altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
>      .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve, @@ -
> 3141,9 +3147,11 @@ const struct hvm_function_table * __init start_vmx(void)
>
>      vmx_function_table.caps.singlestep = cpu_has_monitor_trap_flag;
>
> +#ifdef CONFIG_VM_EVENT
>      if ( cpu_has_vmx_dt_exiting )
>          vmx_function_table.set_descriptor_access_exiting =
>              vmx_set_descriptor_access_exiting;
> +#endif
>
>      /*
>       * Do not enable EPT when (!cpu_has_vmx_pat), to prevent security hole 
> @@ -
> 3214,8 +3222,10 @@ void __init vmx_fill_funcs(void)
>      if ( !cpu_has_xen_ibt )
>          return;
>
> +#ifdef CONFIG_VM_EVENT
>      vmx_function_table.set_descriptor_access_exiting =
>          vmx_set_descriptor_access_exiting;
> +#endif
>
>      vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
>      vmx_function_table.process_isr            = vmx_process_isr;
> diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
> b/xen/arch/x86/include/asm/hvm/hvm.h
> index 666fa402a8..af042ae858 100644
> --- a/xen/arch/x86/include/asm/hvm/hvm.h
> +++ b/xen/arch/x86/include/asm/hvm/hvm.h
> @@ -192,7 +192,11 @@ struct hvm_function_table {
>      void (*handle_cd)(struct vcpu *v, unsigned long value);
>      void (*set_info_guest)(struct vcpu *v);
>      void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
> +
> +#ifdef CONFIG_VM_EVENT
>      void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
> +    void (*enable_msr_interception)(struct domain *d, uint32_t msr);
> +#endif
>
>      /* Nested HVM */
>      int (*nhvm_vcpu_initialise)(struct vcpu *v); @@ -224,8 +228,6 @@ struct
> hvm_function_table {
>                                  paddr_t *L1_gpa, unsigned int *page_order,
>                                  uint8_t *p2m_acc, struct npfec npfec);
>
> -    void (*enable_msr_interception)(struct domain *d, uint32_t msr);
> -
>  #ifdef CONFIG_ALTP2M
>      /* Alternate p2m */
>      void (*altp2m_vcpu_update_p2m)(struct vcpu *v); @@ -435,11 +437,18 @@
> static inline bool using_svm(void)
>
>  #define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
>
> +#ifdef CONFIG_VM_EVENT
>  static inline bool hvm_has_set_descriptor_access_exiting(void)
>  {
>      return hvm_funcs.set_descriptor_access_exiting;
>  }
>
> +static inline void hvm_enable_msr_interception(struct domain *d,
> +uint32_t msr) {
> +    alternative_vcall(hvm_funcs.enable_msr_interception, d, msr); }
> +#endif /* CONFIG_VM_EVENT */
> +
>  static inline void hvm_domain_creation_finished(struct domain *d)  {
>      if ( hvm_funcs.domain_creation_finished ) @@ -681,11 +690,6 @@ static 
> inline
> int nhvm_hap_walk_L1_p2m(
>          v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);  }
>
> -static inline void hvm_enable_msr_interception(struct domain *d, uint32_t 
> msr) -{
> -    alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
> -}
> -
>  static inline bool hvm_is_singlestep_supported(void)  {
>      return hvm_funcs.caps.singlestep;
> diff --git a/xen/arch/x86/include/asm/monitor.h
> b/xen/arch/x86/include/asm/monitor.h
> index 3c64d8258f..9249324fd0 100644
> --- a/xen/arch/x86/include/asm/monitor.h
> +++ b/xen/arch/x86/include/asm/monitor.h
> @@ -71,6 +71,7 @@ int arch_monitor_domctl_op(struct domain *d, struct
> xen_domctl_monitor_op *mop)
>      return rc;
>  }
>
> +#ifdef CONFIG_VM_EVENT
>  static inline uint32_t arch_monitor_get_capabilities(struct domain *d)  {
>      uint32_t capabilities = 0;
> @@ -102,6 +103,7 @@ static inline uint32_t 
> arch_monitor_get_capabilities(struct
> domain *d)
>
>      return capabilities;
>  }
> +#endif /* CONFIG_VM_EVENT */
>
>  int arch_monitor_domctl_event(struct domain *d,
>                                struct xen_domctl_monitor_op *mop); @@ -123,7 
> +125,14
> @@ static inline void arch_monitor_cleanup_domain(struct domain *d) {}
>
>  #endif
>
> +#ifdef CONFIG_VM_EVENT
>  bool monitored_msr(const struct domain *d, u32 msr);
> +#else
> +static inline bool monitored_msr(const struct domain *d, u32 msr) {
> +    return false;
> +}
> +#endif
>  bool monitored_msr_onchangeonly(const struct domain *d, u32 msr);
>
>  #endif /* __ASM_X86_MONITOR_H__ */
> --
> 2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.