[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 1/3] VMX: VMX: fix interaction of APIC-V and Viridian emulation
Viridian using a synthetic MSR for issuing EOI notifications bypasses the normal in-processor handling, which would clear GUEST_INTR_STATUS.SVI. Hence we need to do this in software in order for future interrupts to get delivered. Based on analysis by Yang Z Zhang <yang.z.zhang@xxxxxxxxx>. Also get the other virtual interrupt delivery related actors in sync with the newly added one in clearing the respective pointers (thus avoiding the call from generic code) when the feature is unavailable instead of checking feature availability in the actors. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -386,6 +386,9 @@ void vlapic_EOI_set(struct vlapic *vlapi vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]); + if ( hvm_funcs.handle_eoi ) + hvm_funcs.handle_eoi(vector); + if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) ) vioapic_update_EOI(vlapic_domain(vlapic), vector); --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1411,13 +1411,10 @@ static void vmx_set_info_guest(struct vc static void vmx_update_eoi_exit_bitmap(struct vcpu *v, u8 vector, u8 trig) { - if ( cpu_has_vmx_virtual_intr_delivery ) - { - if (trig) - vmx_set_eoi_exit_bitmap(v, vector); - else - vmx_clear_eoi_exit_bitmap(v, vector); - } + if ( trig ) + vmx_set_eoi_exit_bitmap(v, vector); + else + vmx_clear_eoi_exit_bitmap(v, vector); } static int vmx_virtual_intr_delivery_enabled(void) @@ -1430,9 +1427,6 @@ static void vmx_process_isr(int isr, str unsigned long status; u8 old; - if ( !cpu_has_vmx_virtual_intr_delivery ) - return; - if ( isr < 0 ) isr = 0; @@ -1502,6 +1496,15 @@ static void vmx_sync_pir_to_irr(struct v vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]); } +static void vmx_handle_eoi(u8 vector) +{ + unsigned long status = __vmread(GUEST_INTR_STATUS); + + /* We need to clear the SVI field. */ + status &= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK; + __vmwrite(GUEST_INTR_STATUS, status); +} + static struct hvm_function_table __initdata vmx_function_table = { .name = "VMX", .cpu_up_prepare = vmx_cpu_up_prepare, @@ -1554,6 +1557,7 @@ static struct hvm_function_table __initd .process_isr = vmx_process_isr, .deliver_posted_intr = vmx_deliver_posted_intr, .sync_pir_to_irr = vmx_sync_pir_to_irr, + .handle_eoi = vmx_handle_eoi, .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m, }; @@ -1580,6 +1584,13 @@ const struct hvm_function_table * __init setup_ept_dump(); } + + if ( !cpu_has_vmx_virtual_intr_delivery ) + { + vmx_function_table.update_eoi_exit_bitmap = NULL; + vmx_function_table.process_isr = NULL; + vmx_function_table.handle_eoi = NULL; + } if ( cpu_has_vmx_posted_intr_processing ) alloc_direct_apic_vector(&posted_intr_vector, event_check_interrupt); --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -186,6 +186,7 @@ struct hvm_function_table { void (*process_isr)(int isr, struct vcpu *v); void (*deliver_posted_intr)(struct vcpu *v, u8 vector); void (*sync_pir_to_irr)(struct vcpu *v); + void (*handle_eoi)(u8 vector); /*Walk nested p2m */ int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa, Attachment:
x86-VMX-Viridian-APICV.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |