|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] RE: [PATCH v4 3/6] x86/monitor: wrap monitor_op under CONFIG_VM_EVENT
[Public]
Hi, Tamas
May I ask for a review on this commit?
Many thanks
Penny Zheng
> -----Original Message-----
> From: Penny, Zheng <penny.zheng@xxxxxxx>
> Sent: Thursday, January 15, 2026 5:29 PM
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx; Andryuk, Jason <Jason.Andryuk@xxxxxxx>
> Cc: Huang, Ray <Ray.Huang@xxxxxxx>; Penny, Zheng
> <penny.zheng@xxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>; Andrew Cooper
> <andrew.cooper3@xxxxxxxxxx>; Roger Pau Monné <roger.pau@xxxxxxxxxx>; Tamas
> K Lengyel <tamas@xxxxxxxxxxxxx>; Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>;
> Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
> Subject: [PATCH v4 3/6] x86/monitor: wrap monitor_op under CONFIG_VM_EVENT
>
> Feature monitor_op is based on vm event subsystem, so monitor.o shall be
> wrapped under CONFIG_VM_EVENT.
> The following functions are only invoked by monitor-op, so they all shall be
> wrapped
> with CONFIG_VM_EVENT (otherwise they will become unreachable and violate
> Misra rule 2.1 when VM_EVENT=n):
> - hvm_enable_msr_interception
> - hvm_function_table.enable_msr_interception
> - hvm_has_set_descriptor_access_existing
> - hvm_function_table.set_descriptor_access_existi
> - arch_monitor_get_capabilities
> Function monitored_msr() still needs a stub to pass compilation when
> VM_EVENT=n.
>
> Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
> Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
> Reviewed-by: Jason Andryuk <jason.andryuk@xxxxxxx>
> ---
> xen/arch/x86/hvm/Makefile | 2 +-
> xen/arch/x86/hvm/svm/svm.c | 8 +++++++-
> xen/arch/x86/hvm/vmx/vmx.c | 10 ++++++++++
> xen/arch/x86/include/asm/hvm/hvm.h | 18 +++++++++++-------
> xen/arch/x86/include/asm/monitor.h | 9 +++++++++
> 5 files changed, 38 insertions(+), 9 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile index
> 1b97bdc624..ee4b45a4ee 100644
> --- a/xen/arch/x86/hvm/Makefile
> +++ b/xen/arch/x86/hvm/Makefile
> @@ -16,7 +16,7 @@ obj-y += io.o
> obj-y += ioreq.o
> obj-y += irq.o
> obj-y += mmio.o
> -obj-y += monitor.o
> +obj-$(CONFIG_VM_EVENT) += monitor.o
> obj-y += mtrr.o
> obj-y += nestedhvm.o
> obj-y += pmtimer.o
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index
> 21f355a657..5d23603fc1 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -297,6 +297,7 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int
> flags)
> __clear_bit(msr * 2 + 1, msr_bit); }
>
> +#ifdef CONFIG_VM_EVENT
> static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t
> msr)
> {
> struct vcpu *v;
> @@ -304,6 +305,7 @@ static void cf_check svm_enable_msr_interception(struct
> domain *d, uint32_t msr)
> for_each_vcpu ( d, v )
> svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE); }
> +#endif /* CONFIG_VM_EVENT */
>
> static void svm_save_dr(struct vcpu *v) { @@ -824,6 +826,7 @@ static void
> cf_check svm_set_rdtsc_exiting(struct vcpu *v, bool enable)
> vmcb_set_general2_intercepts(vmcb, general2_intercepts); }
>
> +#ifdef CONFIG_VM_EVENT
> static void cf_check svm_set_descriptor_access_exiting(
> struct vcpu *v, bool enable)
> {
> @@ -841,6 +844,7 @@ static void cf_check svm_set_descriptor_access_exiting(
>
> vmcb_set_general1_intercepts(vmcb, general1_intercepts); }
> +#endif /* CONFIG_VM_EVENT */
>
> static unsigned int cf_check svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
> { @@ -2454,9 +2458,11 @@ static struct hvm_function_table
> __initdata_cf_clobber
> svm_function_table = {
> .fpu_dirty_intercept = svm_fpu_dirty_intercept,
> .msr_read_intercept = svm_msr_read_intercept,
> .msr_write_intercept = svm_msr_write_intercept,
> +#ifdef CONFIG_VM_EVENT
> .enable_msr_interception = svm_enable_msr_interception,
> - .set_rdtsc_exiting = svm_set_rdtsc_exiting,
> .set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
> +#endif
> + .set_rdtsc_exiting = svm_set_rdtsc_exiting,
> .get_insn_bytes = svm_get_insn_bytes,
>
> .nhvm_vcpu_initialise = nsvm_vcpu_initialise, diff --git
> a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index
> 89f9d9c7f6..40e4c71244 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1581,6 +1581,7 @@ static void cf_check vmx_set_rdtsc_exiting(struct vcpu
> *v, bool enable)
> vmx_vmcs_exit(v);
> }
>
> +#ifdef CONFIG_VM_EVENT
> static void cf_check vmx_set_descriptor_access_exiting(
> struct vcpu *v, bool enable)
> {
> @@ -1595,6 +1596,7 @@ static void cf_check
> vmx_set_descriptor_access_exiting(
> vmx_update_secondary_exec_control(v);
> vmx_vmcs_exit(v);
> }
> +#endif /* CONFIG_VM_EVENT */
>
> static void cf_check vmx_init_hypercall_page(void *p) { @@ -2474,6 +2476,7
> @@
> static void cf_check vmx_handle_eoi(uint8_t vector, int isr)
> printk_once(XENLOG_WARNING "EOI for %02x but SVI=%02x\n", vector,
> old_svi); }
>
> +#ifdef CONFIG_VM_EVENT
> static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t
> msr)
> {
> struct vcpu *v;
> @@ -2481,6 +2484,7 @@ static void cf_check
> vmx_enable_msr_interception(struct domain *d, uint32_t msr)
> for_each_vcpu ( d, v )
> vmx_set_msr_intercept(v, msr, VMX_MSR_W); }
> +#endif /* CONFIG_VM_EVENT */
>
> #ifdef CONFIG_ALTP2M
>
> @@ -2932,7 +2936,9 @@ static struct hvm_function_table __initdata_cf_clobber
> vmx_function_table = {
> .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
> .update_vlapic_mode = vmx_vlapic_msr_changed,
> .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
> +#ifdef CONFIG_VM_EVENT
> .enable_msr_interception = vmx_enable_msr_interception,
> +#endif
> #ifdef CONFIG_ALTP2M
> .altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
> .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve, @@ -
> 3141,9 +3147,11 @@ const struct hvm_function_table * __init start_vmx(void)
>
> vmx_function_table.caps.singlestep = cpu_has_monitor_trap_flag;
>
> +#ifdef CONFIG_VM_EVENT
> if ( cpu_has_vmx_dt_exiting )
> vmx_function_table.set_descriptor_access_exiting =
> vmx_set_descriptor_access_exiting;
> +#endif
>
> /*
> * Do not enable EPT when (!cpu_has_vmx_pat), to prevent security hole
> @@ -
> 3214,8 +3222,10 @@ void __init vmx_fill_funcs(void)
> if ( !cpu_has_xen_ibt )
> return;
>
> +#ifdef CONFIG_VM_EVENT
> vmx_function_table.set_descriptor_access_exiting =
> vmx_set_descriptor_access_exiting;
> +#endif
>
> vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
> vmx_function_table.process_isr = vmx_process_isr;
> diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
> b/xen/arch/x86/include/asm/hvm/hvm.h
> index 666fa402a8..af042ae858 100644
> --- a/xen/arch/x86/include/asm/hvm/hvm.h
> +++ b/xen/arch/x86/include/asm/hvm/hvm.h
> @@ -192,7 +192,11 @@ struct hvm_function_table {
> void (*handle_cd)(struct vcpu *v, unsigned long value);
> void (*set_info_guest)(struct vcpu *v);
> void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
> +
> +#ifdef CONFIG_VM_EVENT
> void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
> + void (*enable_msr_interception)(struct domain *d, uint32_t msr);
> +#endif
>
> /* Nested HVM */
> int (*nhvm_vcpu_initialise)(struct vcpu *v); @@ -224,8 +228,6 @@ struct
> hvm_function_table {
> paddr_t *L1_gpa, unsigned int *page_order,
> uint8_t *p2m_acc, struct npfec npfec);
>
> - void (*enable_msr_interception)(struct domain *d, uint32_t msr);
> -
> #ifdef CONFIG_ALTP2M
> /* Alternate p2m */
> void (*altp2m_vcpu_update_p2m)(struct vcpu *v); @@ -435,11 +437,18 @@
> static inline bool using_svm(void)
>
> #define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
>
> +#ifdef CONFIG_VM_EVENT
> static inline bool hvm_has_set_descriptor_access_exiting(void)
> {
> return hvm_funcs.set_descriptor_access_exiting;
> }
>
> +static inline void hvm_enable_msr_interception(struct domain *d,
> +uint32_t msr) {
> + alternative_vcall(hvm_funcs.enable_msr_interception, d, msr); }
> +#endif /* CONFIG_VM_EVENT */
> +
> static inline void hvm_domain_creation_finished(struct domain *d) {
> if ( hvm_funcs.domain_creation_finished ) @@ -681,11 +690,6 @@ static
> inline
> int nhvm_hap_walk_L1_p2m(
> v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec); }
>
> -static inline void hvm_enable_msr_interception(struct domain *d, uint32_t
> msr) -{
> - alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
> -}
> -
> static inline bool hvm_is_singlestep_supported(void) {
> return hvm_funcs.caps.singlestep;
> diff --git a/xen/arch/x86/include/asm/monitor.h
> b/xen/arch/x86/include/asm/monitor.h
> index 3c64d8258f..9249324fd0 100644
> --- a/xen/arch/x86/include/asm/monitor.h
> +++ b/xen/arch/x86/include/asm/monitor.h
> @@ -71,6 +71,7 @@ int arch_monitor_domctl_op(struct domain *d, struct
> xen_domctl_monitor_op *mop)
> return rc;
> }
>
> +#ifdef CONFIG_VM_EVENT
> static inline uint32_t arch_monitor_get_capabilities(struct domain *d) {
> uint32_t capabilities = 0;
> @@ -102,6 +103,7 @@ static inline uint32_t
> arch_monitor_get_capabilities(struct
> domain *d)
>
> return capabilities;
> }
> +#endif /* CONFIG_VM_EVENT */
>
> int arch_monitor_domctl_event(struct domain *d,
> struct xen_domctl_monitor_op *mop); @@ -123,7
> +125,14
> @@ static inline void arch_monitor_cleanup_domain(struct domain *d) {}
>
> #endif
>
> +#ifdef CONFIG_VM_EVENT
> bool monitored_msr(const struct domain *d, u32 msr);
> +#else
> +static inline bool monitored_msr(const struct domain *d, u32 msr) {
> + return false;
> +}
> +#endif
> bool monitored_msr_onchangeonly(const struct domain *d, u32 msr);
>
> #endif /* __ASM_X86_MONITOR_H__ */
> --
> 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |