[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 6/6] x86/hvm: Move hvm_hypervisor_cpuid_leaf() handling into cpuid_hypervisor_leaves()
This reduces the net complexity of CPUID handling by having all adjustments in at the same place. Remove the now-unused hvm_funcs.hypervisor_cpuid_leaf() infrastructure. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 22 ---------------------- xen/arch/x86/hvm/vmx/vmx.c | 19 ------------------- xen/arch/x86/traps.c | 34 ++++++++++++++++++++++++++++++---- xen/include/asm-x86/hvm/hvm.h | 7 ------- 4 files changed, 30 insertions(+), 52 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 862ab76..5ae8270 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3376,28 +3376,6 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len) return rc ? len : 0; /* fake a copy_from_user() return code */ } -void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - *eax = *ebx = *ecx = *edx = 0; - if ( hvm_funcs.hypervisor_cpuid_leaf ) - hvm_funcs.hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx); - - if ( sub_idx == 0 ) - { - /* - * Indicate that memory mapped from other domains (either grants or - * foreign pages) has valid IOMMU entries. - */ - *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; - - /* Indicate presence of vcpu id and set it in ebx */ - *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; - *ebx = current->vcpu_id; - } -} - void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 5a73076..2a5551d 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1969,24 +1969,6 @@ static void vmx_handle_eoi(u8 vector) __vmwrite(GUEST_INTR_STATUS, status); } -void vmx_hypervisor_cpuid_leaf(uint32_t sub_idx, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - if ( sub_idx != 0 ) - return; - if ( cpu_has_vmx_apic_reg_virt ) - *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; - /* - * We want to claim that x2APIC is virtualized if APIC MSR accesses are - * not intercepted. When all three of these are true both rdmsr and wrmsr - * in the guest will run without VMEXITs (see vmx_vlapic_msr_changed()). - */ - if ( cpu_has_vmx_virtualize_x2apic_mode && cpu_has_vmx_apic_reg_virt && - cpu_has_vmx_virtual_intr_delivery ) - *eax |= XEN_HVM_CPUID_X2APIC_VIRT; -} - static void vmx_enable_msr_interception(struct domain *d, uint32_t msr) { struct vcpu *v; @@ -2187,7 +2169,6 @@ static struct hvm_function_table __initdata vmx_function_table = { .sync_pir_to_irr = vmx_sync_pir_to_irr, .handle_eoi = vmx_handle_eoi, .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m, - .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf, .enable_msr_interception = vmx_enable_msr_interception, .is_singlestep_supported = vmx_is_singlestep_supported, .set_mode = vmx_set_mode, diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 9be9fe3..59fe98d 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -869,7 +869,8 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val) int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { - struct domain *currd = current->domain; + struct vcpu *curr = current; + struct domain *currd = curr->domain; /* Optionally shift out of the way of Viridian architectural leaves. */ uint32_t base = is_viridian_domain(currd) ? 0x40000100 : 0x40000000; uint32_t limit, dummy; @@ -961,13 +962,38 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx, } break; - case 4: - if ( !has_hvm_container_domain(currd) ) + case 4: /* HVM hypervisor leaf. */ + if ( !has_hvm_container_domain(currd) || sub_idx != 0 ) { *eax = *ebx = *ecx = *edx = 0; break; } - hvm_hypervisor_cpuid_leaf(sub_idx, eax, ebx, ecx, edx); + + if ( cpu_has_vmx_apic_reg_virt ) + *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; + + /* + * We want to claim that x2APIC is virtualized if APIC MSR accesses + * are not intercepted. When all three of these are true both rdmsr + * and wrmsr in the guest will run without VMEXITs (see + * vmx_vlapic_msr_changed()). + */ + if ( cpu_has_vmx_virtualize_x2apic_mode && + cpu_has_vmx_apic_reg_virt && + cpu_has_vmx_virtual_intr_delivery ) + *eax |= XEN_HVM_CPUID_X2APIC_VIRT; + + /* + * Indicate that memory mapped from other domains (either grants or + * foreign pages) has valid IOMMU entries. + */ + *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; + + /* Indicate presence of vcpu id and set it in ebx */ + *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; + *ebx = curr->vcpu_id; + + *ecx = *edx = 0; break; default: diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 8e366c0..1b9bbaa 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -207,10 +207,6 @@ struct hvm_function_table { uint8_t *p2m_acc, bool_t access_r, bool_t access_w, bool_t access_x); - void (*hypervisor_cpuid_leaf)(uint32_t sub_idx, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx); - void (*enable_msr_interception)(struct domain *d, uint32_t msr); bool_t (*is_singlestep_supported)(void); int (*set_mode)(struct vcpu *v, int mode); @@ -406,9 +402,6 @@ bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val); #define has_viridian_apic_assist(d) \ (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist)) -void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx); void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx); bool hvm_check_cpuid_faulting(struct vcpu *v); -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |