[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH for-4.17 v2] hvm/apic: repurpose the reporting of the APIC assist options
The current reporting of the hardware assisted APIC options is done by checking "virtualize APIC accesses" which is not very helpful, as that feature doesn't avoid a vmexit, instead it does provide some help in order to detect APIC MMIO accesses in vmexit processing. Repurpose the current reporting of xAPIC assistance to instead report such feature as present when there's support for "TPR shadow" and "APIC register virtualization" because in that case some xAPIC MMIO register accesses are handled directly by the hardware, without requiring a vmexit. For symetry also change assisted x2APIC reporting to require "virtualize x2APIC mode" and "APIC register virtualization", dropping the option to also be reported when "virtual interrupt delivery" is available. Presence of the "virtual interrupt delivery" feature will be reported using a different option. Fixes: 2ce11ce249 ('x86/HVM: allow per-domain usage of hardware virtualized APIC') Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- I find the logic in vmx_vlapic_msr_changed() hard to follow, but I don't want to rewrite the function logic at this point. --- Changes since v1: - Fix Viridian MSR tip conditions. --- xen/arch/x86/hvm/viridian/viridian.c | 2 +- xen/arch/x86/hvm/vmx/vmcs.c | 8 ++++---- xen/arch/x86/hvm/vmx/vmx.c | 25 ++++++++++++++++++------- xen/arch/x86/traps.c | 4 +--- 4 files changed, 24 insertions(+), 15 deletions(-) diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c index 25dca93e8b..44eb3d0519 100644 --- a/xen/arch/x86/hvm/viridian/viridian.c +++ b/xen/arch/x86/hvm/viridian/viridian.c @@ -197,7 +197,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, res->a = CPUID4A_RELAX_TIMER_INT; if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush ) res->a |= CPUID4A_HCALL_REMOTE_TLB_FLUSH; - if ( !cpu_has_vmx_apic_reg_virt ) + if ( !has_assisted_xapic(d) ) res->a |= CPUID4A_MSR_BASED_APIC; if ( viridian_feature_mask(d) & HVMPV_hcall_ipi ) res->a |= CPUID4A_SYNTHETIC_CLUSTER_IPI; diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index a1aca1ec04..7bb96e1a8e 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1136,7 +1136,7 @@ static int construct_vmcs(struct vcpu *v) if ( !has_assisted_xapic(d) ) v->arch.hvm.vmx.secondary_exec_control &= - ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + ~SECONDARY_EXEC_APIC_REGISTER_VIRT; if ( cpu_has_vmx_secondary_exec_control ) __vmwrite(SECONDARY_VM_EXEC_CONTROL, @@ -2156,10 +2156,10 @@ int __init vmx_vmcs_init(void) if ( !ret ) { /* Check whether hardware supports accelerated xapic and x2apic. */ - assisted_xapic_available = cpu_has_vmx_virtualize_apic_accesses; + assisted_xapic_available = cpu_has_vmx_tpr_shadow && + cpu_has_vmx_apic_reg_virt; assisted_x2apic_available = cpu_has_vmx_virtualize_x2apic_mode && - (cpu_has_vmx_apic_reg_virt || - cpu_has_vmx_virtual_intr_delivery); + cpu_has_vmx_apic_reg_virt; register_keyhandler('v', vmcs_dump, "dump VT-x VMCSs", 1); } diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index e624b415c9..bf0fe3355c 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -3405,25 +3405,29 @@ static void vmx_install_vlapic_mapping(struct vcpu *v) void vmx_vlapic_msr_changed(struct vcpu *v) { + bool virtualize_x2apic_mode = has_assisted_x2apic(v->domain) || + (cpu_has_vmx_virtualize_x2apic_mode && + cpu_has_vmx_virtual_intr_delivery); struct vlapic *vlapic = vcpu_vlapic(v); unsigned int msr; - if ( !has_assisted_xapic(v->domain) && - !has_assisted_x2apic(v->domain) ) + if ( !cpu_has_vmx_virtualize_apic_accesses && + !virtualize_x2apic_mode ) return; vmx_vmcs_enter(v); v->arch.hvm.vmx.secondary_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + SECONDARY_EXEC_APIC_REGISTER_VIRT); if ( !vlapic_hw_disabled(vlapic) && (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) ) { - if ( has_assisted_x2apic(v->domain) && vlapic_x2apic_mode(vlapic) ) + if ( virtualize_x2apic_mode && vlapic_x2apic_mode(vlapic) ) { v->arch.hvm.vmx.secondary_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; - if ( cpu_has_vmx_apic_reg_virt ) + if ( has_assisted_x2apic(v->domain) ) { for ( msr = MSR_X2APIC_FIRST; msr <= MSR_X2APIC_LAST; msr++ ) @@ -3432,6 +3436,10 @@ void vmx_vlapic_msr_changed(struct vcpu *v) vmx_set_msr_intercept(v, MSR_X2APIC_PPR, VMX_MSR_R); vmx_set_msr_intercept(v, MSR_X2APIC_TMICT, VMX_MSR_R); vmx_set_msr_intercept(v, MSR_X2APIC_TMCCT, VMX_MSR_R); + + v->arch.hvm.vmx.secondary_exec_control |= + SECONDARY_EXEC_APIC_REGISTER_VIRT; + } if ( cpu_has_vmx_virtual_intr_delivery ) { @@ -3440,9 +3448,12 @@ void vmx_vlapic_msr_changed(struct vcpu *v) vmx_clear_msr_intercept(v, MSR_X2APIC_SELF, VMX_MSR_W); } } - else if ( has_assisted_xapic(v->domain) ) + else if ( vlapic_xapic_mode(vlapic) ) v->arch.hvm.vmx.secondary_exec_control |= - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + (cpu_has_vmx_virtualize_apic_accesses ? + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES : 0) | + (has_assisted_xapic(v->domain) ? + SECONDARY_EXEC_APIC_REGISTER_VIRT : 0); } if ( !(v->arch.hvm.vmx.secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) ) diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 7207390118..5c0aabe8a3 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -1124,8 +1124,7 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, if ( !is_hvm_domain(d) || subleaf != 0 ) break; - if ( cpu_has_vmx_apic_reg_virt && - has_assisted_xapic(d) ) + if ( has_assisted_xapic(d) ) res->a |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; /* @@ -1135,7 +1134,6 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, * vmx_vlapic_msr_changed()). */ if ( has_assisted_x2apic(d) && - cpu_has_vmx_apic_reg_virt && cpu_has_vmx_virtual_intr_delivery ) res->a |= XEN_HVM_CPUID_X2APIC_VIRT; -- 2.37.3
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |