[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Xen-devel] [Patch v5 0/2] bug fixes for APICV
Hi Jan,
Eddie had already ack the patches.
BR,
Li Jiongxi
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: Wednesday, February 13, 2013 12:53 AM
> To: Dong, Eddie; Nakajima, Jun
> Cc: Keir Fraser; Li, Jiongxi; xen-devel
> Subject: Re: [Xen-devel] [Patch v5 0/2] bug fixes for APICV
>
> >>> On 30.01.13 at 04:23, "Li, Jiongxi" <jiongxi.li@xxxxxxxxx> wrote:
> > This patchset fixes some APICV issues, including a potential issue
> > while doing live migration and enabling APICV while guest is in X2APIC mode.
> >
> > PATCH 1/2: Xen: Fix live migration while enabling APICV.
> >
> > PATCH 2/2: Xen: Fix VMCS setting for x2APIC mode guest while enabling
> APICV.
>
> Jun, Eddie - we're still waiting for an ack of at least one of you on these
> two...
>
> Thanks, Jan
>
> > Changes v4 to v5
> > Correct coding style. Add debug log in msr enable/disable intercept
> > function for out of range case.
> >
> > Changes v3 to v4
> > According to Jan's comment, add define for numbers used in
> > GUEST_INTR_STATUS, change function name.
> > Use 'set_bit/clear_bit' instead of '__set_bit/__clear_bit'
> >
> > Changes v2 to v3
> > When enabling APICV for x2apic mode, ppr, tmict tmcct MSR read still
> > need to be intercepted.
> >
> > Changes v1 to v2
> > According to Keir's comment, don't change the save/restore order for
> > compatibility.
>
>
--- Begin Message ---
Acked by: Eddie Dong <eddie.dong@xxxxxxxxx>
> -----Original Message-----
> From: xen-devel-bounces@xxxxxxxxxxxxx
> [mailto:xen-devel-bounces@xxxxxxxxxxxxx] On Behalf Of Li, Jiongxi
> Sent: Wednesday, January 30, 2013 11:23 AM
> To: 'xen-devel@xxxxxxxxxxxxx'; Jan Beulich
> Cc: Keir Fraser
> Subject: [Xen-devel] [PATCH v5 1/2] Xen: Fix live migration while enabling
> APICV
>
> SVI should be restored in case guest is processing virtual interrupt
> while saveing a domain state. Otherwise SVI would be missed when
> virtual interrupt delivery is enabled.
>
> Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
>
> diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
> index ee2294c..38ff216 100644
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -1198,6 +1198,9 @@ static int lapic_load_regs(struct domain *d,
> hvm_domain_context_t *h)
> if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
> return -EINVAL;
>
> + if ( hvm_funcs.process_isr )
> + hvm_funcs.process_isr(vlapic_find_highest_isr(s), v);
> +
> vlapic_adjust_i8259_target(d);
> lapic_rearm(s);
> return 0;
> diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
> index c5c503e..c961782 100644
> --- a/xen/arch/x86/hvm/vmx/intr.c
> +++ b/xen/arch/x86/hvm/vmx/intr.c
> @@ -290,8 +290,8 @@ void vmx_intr_assist(void)
> vmx_set_eoi_exit_bitmap(v, pt_vector);
>
> /* we need update the RVI field */
> - status &= ~(unsigned long)0x0FF;
> - status |= (unsigned long)0x0FF &
> + status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
> + status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK &
> intack.vector;
> __vmwrite(GUEST_INTR_STATUS, status);
> if (v->arch.hvm_vmx.eoi_exitmap_changed) {
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 4d7c93f..362273b 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1419,6 +1419,29 @@ static int
> vmx_virtual_intr_delivery_enabled(void)
> return cpu_has_vmx_virtual_intr_delivery;
> }
>
> +static void vmx_process_isr(int isr, struct vcpu *v)
> +{
> + unsigned long status;
> + u8 old;
> +
> + if ( !cpu_has_vmx_virtual_intr_delivery )
> + return;
> +
> + if ( isr < 0 )
> + isr = 0;
> +
> + vmx_vmcs_enter(v);
> + status = __vmread(GUEST_INTR_STATUS);
> + old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
> + if ( isr != old )
> + {
> + status &= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
> + status |= isr << VMX_GUEST_INTR_STATUS_SVI_OFFSET;
> + __vmwrite(GUEST_INTR_STATUS, status);
> + }
> + vmx_vmcs_exit(v);
> +}
> +
> static struct hvm_function_table __read_mostly vmx_function_table = {
> .name = "VMX",
> .cpu_up_prepare = vmx_cpu_up_prepare,
> @@ -1468,6 +1491,7 @@ static struct hvm_function_table __read_mostly
> vmx_function_table = {
> .nhvm_domain_relinquish_resources =
> nvmx_domain_relinquish_resources,
> .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
> .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
> + .process_isr = vmx_process_isr,
> .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
> };
>
> diff --git a/xen/include/asm-x86/hvm/hvm.h
> b/xen/include/asm-x86/hvm/hvm.h
> index 76e9cc8..011a6a3 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -183,6 +183,7 @@ struct hvm_function_table {
> /* Virtual interrupt delivery */
> void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
> int (*virtual_intr_delivery_enabled)(void);
> + void (*process_isr)(int isr, struct vcpu *v);
>
> /*Walk nested p2m */
> int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
> b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 51df81e..d4958c3 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -257,6 +257,10 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info;
> */
> #define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55)
>
> +/* Guest interrupt status */
> +#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF
> +#define VMX_GUEST_INTR_STATUS_SVI_OFFSET 8
> +
> /* VMCS field encodings. */
> enum vmcs_field {
> VIRTUAL_PROCESSOR_ID = 0x00000000,
> --
> 1.7.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
--- End Message ---
--- Begin Message ---
Acked-by: Eddie Dong <eddie.dong@xxxxxxxxx>
> -----Original Message-----
> From: xen-devel-bounces@xxxxxxxxxxxxx
> [mailto:xen-devel-bounces@xxxxxxxxxxxxx] On Behalf Of Li, Jiongxi
> Sent: Wednesday, January 30, 2013 11:24 AM
> To: 'xen-devel@xxxxxxxxxxxxx'; Jan Beulich
> Cc: Keir Fraser
> Subject: [Xen-devel] [PATCH v5 2/2] Xen: Fix VMCS setting for x2APIC mode
> guest while enabling APICV
>
> The "APIC-register virtualization" and "virtual-interrupt deliver"
> VM-execution control has no effect on the behavior of RDMSR/WRMSR if
> the "virtualize x2APIC mode" VM-execution control is 0.
> When guest uses x2APIC mode, we should enable "virtualize x2APIC mode"
> for APICV first.
>
> Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
>
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index de22e03..8abdf6d 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -190,7 +190,8 @@ static int vmx_init_vmcs_config(void)
> */
> if ( _vmx_cpu_based_exec_control &
> CPU_BASED_TPR_SHADOW )
> opt |= SECONDARY_EXEC_APIC_REGISTER_VIRT |
> - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
> + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
> + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
>
>
> _vmx_secondary_exec_control = adjust_vmx_controls(
> @@ -658,19 +659,60 @@ void vmx_disable_intercept_for_msr(struct vcpu
> *v, u32 msr, int type)
> */
> if ( msr <= 0x1fff )
> {
> - if (type & MSR_TYPE_R)
> - __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /*
> read-low */
> - if (type & MSR_TYPE_W)
> - __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /*
> write-low */
> + if ( type & MSR_TYPE_R )
> + clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /*
> read-low */
> + if ( type & MSR_TYPE_W )
> + clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /*
> write-low */
> }
> else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
> {
> msr &= 0x1fff;
> - if (type & MSR_TYPE_R)
> - __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /*
> read-high */
> - if (type & MSR_TYPE_W)
> - __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /*
> write-high */
> + if ( type & MSR_TYPE_R )
> + clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /*
> read-high */
> + if ( type & MSR_TYPE_W )
> + clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /*
> write-high */
> }
> + else
> + HVM_DBG_LOG(DBG_LEVEL_0,
> + "msr %x is out of the control range"
> + "0x00000000-0x00001fff and
> 0xc0000000-0xc0001fff"
> + "RDMSR or WRMSR will cause a VM exit", msr);
> +
> +}
> +
> +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type)
> +{
> + unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
> +
> + /* VMX MSR bitmap supported? */
> + if ( msr_bitmap == NULL )
> + return;
> +
> + /*
> + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
> + * have the write-low and read-high bitmap offsets the wrong way
> round.
> + * We can control MSRs 0x00000000-0x00001fff and
> 0xc0000000-0xc0001fff.
> + */
> + if ( msr <= 0x1fff )
> + {
> + if ( type & MSR_TYPE_R )
> + set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /*
> read-low */
> + if ( type & MSR_TYPE_W )
> + set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /*
> write-low */
> + }
> + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
> + {
> + msr &= 0x1fff;
> + if ( type & MSR_TYPE_R )
> + set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /*
> read-high */
> + if ( type & MSR_TYPE_W )
> + set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /*
> write-high */
> + }
> + else
> + HVM_DBG_LOG(DBG_LEVEL_0,
> + "msr %x is out of the control range"
> + "0x00000000-0x00001fff and
> 0xc0000000-0xc0001fff"
> + "RDMSR or WRMSR will cause a VM exit", msr);
> }
>
> /*
> @@ -764,6 +806,9 @@ static int construct_vmcs(struct vcpu *v)
> vmentry_ctl &= ~VM_ENTRY_LOAD_GUEST_PAT;
> }
>
> + /* Disable Virtualize x2APIC mode by default. */
> + v->arch.hvm_vmx.secondary_exec_control &=
> ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
> +
> /* Do not enable Monitor Trap Flag unless start single step debug */
> v->arch.hvm_vmx.exec_control &=
> ~CPU_BASED_MONITOR_TRAP_FLAG;
>
> @@ -800,18 +845,6 @@ static int construct_vmcs(struct vcpu *v)
> vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP,
> MSR_TYPE_R | MSR_TYPE_W);
> if ( cpu_has_vmx_pat && paging_mode_hap(d) )
> vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT,
> MSR_TYPE_R | MSR_TYPE_W);
> - if ( cpu_has_vmx_apic_reg_virt )
> - {
> - int msr;
> - for (msr = MSR_IA32_APICBASE_MSR; msr <=
> MSR_IA32_APICBASE_MSR + 0xff; msr++)
> - vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R);
> - }
> - if ( cpu_has_vmx_virtual_intr_delivery )
> - {
> - vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR,
> MSR_TYPE_W);
> - vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR,
> MSR_TYPE_W);
> - vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR,
> MSR_TYPE_W);
> - }
> }
>
> /* I/O access bitmap. */
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 362273b..855a003 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1887,18 +1887,61 @@ static void vmx_install_vlapic_mapping(struct
> vcpu *v)
>
> void vmx_vlapic_msr_changed(struct vcpu *v)
> {
> + int virtualize_x2apic_mode;
> struct vlapic *vlapic = vcpu_vlapic(v);
>
> - if ( !cpu_has_vmx_virtualize_apic_accesses )
> + virtualize_x2apic_mode = ( (cpu_has_vmx_apic_reg_virt ||
> + cpu_has_vmx_virtual_intr_delivery)
> &&
> +
> cpu_has_vmx_virtualize_x2apic_mode );
> +
> + if ( !cpu_has_vmx_virtualize_apic_accesses &&
> + !virtualize_x2apic_mode )
> return;
>
> vmx_vmcs_enter(v);
> v->arch.hvm_vmx.secondary_exec_control &=
> - ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
> + ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
> + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
> if ( !vlapic_hw_disabled(vlapic) &&
> (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) )
> - v->arch.hvm_vmx.secondary_exec_control |=
> - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
> + {
> + if ( virtualize_x2apic_mode && vlapic_x2apic_mode(vlapic) )
> + {
> + v->arch.hvm_vmx.secondary_exec_control |=
> + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
> + if ( cpu_has_vmx_apic_reg_virt )
> + {
> + for (int msr = MSR_IA32_APICBASE_MSR;
> + msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++)
> + vmx_disable_intercept_for_msr(v, msr,
> MSR_TYPE_R);
> +
> + vmx_enable_intercept_for_msr(v,
> MSR_IA32_APICPPR_MSR,
> + MSR_TYPE_R);
> + vmx_enable_intercept_for_msr(v,
> MSR_IA32_APICTMICT_MSR,
> + MSR_TYPE_R);
> + vmx_enable_intercept_for_msr(v,
> MSR_IA32_APICTMCCT_MSR,
> + MSR_TYPE_R);
> + }
> + if ( cpu_has_vmx_virtual_intr_delivery )
> + {
> + vmx_disable_intercept_for_msr(v,
> MSR_IA32_APICTPR_MSR,
> + MSR_TYPE_W);
> + vmx_disable_intercept_for_msr(v,
> MSR_IA32_APICEOI_MSR,
> + MSR_TYPE_W);
> + vmx_disable_intercept_for_msr(v,
> MSR_IA32_APICSELF_MSR,
> + MSR_TYPE_W);
> + }
> + }
> + else
> + {
> + v->arch.hvm_vmx.secondary_exec_control |=
> + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
> + for (int msr = MSR_IA32_APICBASE_MSR;
> + msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++)
> + vmx_enable_intercept_for_msr(v, msr,
> + MSR_TYPE_R |
> MSR_TYPE_W);
> + }
> + }
> vmx_update_secondary_exec_control(v);
> vmx_vmcs_exit(v);
> }
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
> b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index d4958c3..25c94a6 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -184,6 +184,7 @@ extern u32 vmx_vmentry_control;
> #define SECONDARY_EXEC_ENABLE_EPT 0x00000002
> #define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004
> #define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008
> +#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
> #define SECONDARY_EXEC_ENABLE_VPID 0x00000020
> #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
> #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
> @@ -244,6 +245,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info;
> (vmx_secondary_exec_control &
> SECONDARY_EXEC_APIC_REGISTER_VIRT)
> #define cpu_has_vmx_virtual_intr_delivery \
> (vmx_secondary_exec_control &
> SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
> +#define cpu_has_vmx_virtualize_x2apic_mode \
> + (vmx_secondary_exec_control &
> SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)
>
> /* GUEST_INTERRUPTIBILITY_INFO flags. */
> #define VMX_INTR_SHADOW_STI 0x00000001
> @@ -434,6 +437,7 @@ enum vmcs_field {
> #define MSR_TYPE_R 1
> #define MSR_TYPE_W 2
> void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
> +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
> int vmx_read_guest_msr(u32 msr, u64 *val);
> int vmx_write_guest_msr(u32 msr, u64 val);
> int vmx_add_guest_msr(u32 msr);
> diff --git a/xen/include/asm-x86/msr-index.h
> b/xen/include/asm-x86/msr-index.h
> index 5c1de6e..f500efd 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -300,7 +300,10 @@
> #define MSR_IA32_APICBASE_BASE (0xfffff<<12)
> #define MSR_IA32_APICBASE_MSR 0x800
> #define MSR_IA32_APICTPR_MSR 0x808
> +#define MSR_IA32_APICPPR_MSR 0x80a
> #define MSR_IA32_APICEOI_MSR 0x80b
> +#define MSR_IA32_APICTMICT_MSR 0x838
> +#define MSR_IA32_APICTMCCT_MSR 0x839
> #define MSR_IA32_APICSELF_MSR 0x83f
>
> #define MSR_IA32_UCODE_WRITE 0x00000079
> --
> 1.7.1
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
--- End Message ---
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|