[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] x86/traps: Move cpuid_hypervisor_leaves() into cpuid.c
It's out of place in traps.c, and only has a single caller. Make it static inside cpuid.c. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/cpuid.c | 136 +++++++++++++++++++++++++++ xen/arch/x86/include/asm/processor.h | 2 - xen/arch/x86/traps.c | 136 --------------------------- 3 files changed, 136 insertions(+), 138 deletions(-) diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c index 2a777436ee27..8dc68945f7ae 100644 --- a/xen/arch/x86/cpuid.c +++ b/xen/arch/x86/cpuid.c @@ -1,6 +1,8 @@ #include <xen/sched.h> #include <xen/types.h> +#include <xen/version.h> +#include <public/arch-x86/cpuid.h> #include <public/hvm/params.h> #include <asm/cpu-policy.h> @@ -32,6 +34,140 @@ bool recheck_cpu_features(unsigned int cpu) return okay; } +static void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, + uint32_t subleaf, struct cpuid_leaf *res) +{ + const struct domain *d = v->domain; + const struct cpu_policy *p = d->arch.cpu_policy; + uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000; + uint32_t idx = leaf - base; + unsigned int limit = is_viridian_domain(d) ? p->hv2_limit : p->hv_limit; + + if ( limit == 0 ) + /* Default number of leaves */ + limit = XEN_CPUID_MAX_NUM_LEAVES; + else + /* Clamp toolstack value between 2 and MAX_NUM_LEAVES. */ + limit = min(max(limit, 2u), XEN_CPUID_MAX_NUM_LEAVES + 0u); + + if ( idx > limit ) + return; + + switch ( idx ) + { + case 0: + res->a = base + limit; /* Largest leaf */ + res->b = XEN_CPUID_SIGNATURE_EBX; + res->c = XEN_CPUID_SIGNATURE_ECX; + res->d = XEN_CPUID_SIGNATURE_EDX; + break; + + case 1: + res->a = (xen_major_version() << 16) | xen_minor_version(); + break; + + case 2: + res->a = 1; /* Number of hypercall-transfer pages */ + /* MSR base address */ + res->b = is_viridian_domain(d) ? 0x40000200 : 0x40000000; + if ( is_pv_domain(d) ) /* Features */ + res->c |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD; + break; + + case 3: /* Time leaf. */ + switch ( subleaf ) + { + case 0: /* features */ + res->a = ((d->arch.vtsc << 0) | + (!!host_tsc_is_safe() << 1) | + (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2)); + res->b = d->arch.tsc_mode; + res->c = d->arch.tsc_khz; + res->d = d->arch.incarnation; + break; + + case 1: /* scale and offset */ + { + uint64_t offset; + + if ( !d->arch.vtsc ) + offset = d->arch.vtsc_offset; + else + /* offset already applied to value returned by virtual rdtscp */ + offset = 0; + res->a = offset; + res->b = offset >> 32; + res->c = d->arch.vtsc_to_ns.mul_frac; + res->d = d->arch.vtsc_to_ns.shift; + break; + } + + case 2: /* physical cpu_khz */ + res->a = cpu_khz; + break; + } + break; + + case 4: /* HVM hypervisor leaf. */ + if ( !is_hvm_domain(d) || subleaf != 0 ) + break; + + if ( cpu_has_vmx_apic_reg_virt ) + res->a |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; + + /* + * We want to claim that x2APIC is virtualized if APIC MSR accesses + * are not intercepted. When all three of these are true both rdmsr + * and wrmsr in the guest will run without VMEXITs (see + * vmx_vlapic_msr_changed()). + */ + if ( cpu_has_vmx_virtualize_x2apic_mode && + cpu_has_vmx_apic_reg_virt && + cpu_has_vmx_virtual_intr_delivery ) + res->a |= XEN_HVM_CPUID_X2APIC_VIRT; + + /* + * 1) Xen 4.10 and older was broken WRT grant maps requesting a DMA + * mapping, and forgot to honour the guest's request. + * 2) 4.11 (and presumably backports) fixed the bug, so the map + * hypercall actually did what the guest asked. + * 3) To work around the bug, guests must bounce buffer all DMA that + * would otherwise use a grant map, because it doesn't know whether the + * DMA is originating from an emulated or a real device. + * 4) This flag tells guests it is safe not to bounce-buffer all DMA to + * work around the bug. + */ + res->a |= XEN_HVM_CPUID_IOMMU_MAPPINGS; + + /* Indicate presence of vcpu id and set it in ebx */ + res->a |= XEN_HVM_CPUID_VCPU_ID_PRESENT; + res->b = v->vcpu_id; + + /* Indicate presence of domain id and set it in ecx */ + res->a |= XEN_HVM_CPUID_DOMID_PRESENT; + res->c = d->domain_id; + + /* + * Per-vCPU event channel upcalls are implemented and work + * correctly with PIRQs routed over event channels. + */ + res->a |= XEN_HVM_CPUID_UPCALL_VECTOR; + + break; + + case 5: /* PV-specific parameters */ + if ( is_hvm_domain(d) || subleaf != 0 ) + break; + + res->b = flsl(get_upper_mfn_bound()) + PAGE_SHIFT; + break; + + default: + ASSERT_UNREACHABLE(); + break; + } +} + void guest_cpuid(const struct vcpu *v, uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res) { diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h index d247ef8dd226..4f176bc575ef 100644 --- a/xen/arch/x86/include/asm/processor.h +++ b/xen/arch/x86/include/asm/processor.h @@ -472,8 +472,6 @@ struct stubs { DECLARE_PER_CPU(struct stubs, stubs); unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn); -void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, - uint32_t subleaf, struct cpuid_leaf *res); int guest_rdmsr_xen(const struct vcpu *v, uint32_t idx, uint64_t *val); int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val); diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index dca11a613dbd..91af814badf7 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -34,7 +34,6 @@ #include <xen/domain_page.h> #include <xen/symbols.h> #include <xen/iocap.h> -#include <xen/version.h> #include <xen/kexec.h> #include <xen/trace.h> #include <xen/paging.h> @@ -65,7 +64,6 @@ #include <asm/mc146818rtc.h> #include <asm/hpet.h> #include <asm/vpmu.h> -#include <public/arch-x86/cpuid.h> #include <public/hvm/params.h> #include <asm/cpuid.h> #include <xsm/xsm.h> @@ -1053,140 +1051,6 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) } } -void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, - uint32_t subleaf, struct cpuid_leaf *res) -{ - const struct domain *d = v->domain; - const struct cpu_policy *p = d->arch.cpu_policy; - uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000; - uint32_t idx = leaf - base; - unsigned int limit = is_viridian_domain(d) ? p->hv2_limit : p->hv_limit; - - if ( limit == 0 ) - /* Default number of leaves */ - limit = XEN_CPUID_MAX_NUM_LEAVES; - else - /* Clamp toolstack value between 2 and MAX_NUM_LEAVES. */ - limit = min(max(limit, 2u), XEN_CPUID_MAX_NUM_LEAVES + 0u); - - if ( idx > limit ) - return; - - switch ( idx ) - { - case 0: - res->a = base + limit; /* Largest leaf */ - res->b = XEN_CPUID_SIGNATURE_EBX; - res->c = XEN_CPUID_SIGNATURE_ECX; - res->d = XEN_CPUID_SIGNATURE_EDX; - break; - - case 1: - res->a = (xen_major_version() << 16) | xen_minor_version(); - break; - - case 2: - res->a = 1; /* Number of hypercall-transfer pages */ - /* MSR base address */ - res->b = is_viridian_domain(d) ? 0x40000200 : 0x40000000; - if ( is_pv_domain(d) ) /* Features */ - res->c |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD; - break; - - case 3: /* Time leaf. */ - switch ( subleaf ) - { - case 0: /* features */ - res->a = ((d->arch.vtsc << 0) | - (!!host_tsc_is_safe() << 1) | - (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2)); - res->b = d->arch.tsc_mode; - res->c = d->arch.tsc_khz; - res->d = d->arch.incarnation; - break; - - case 1: /* scale and offset */ - { - uint64_t offset; - - if ( !d->arch.vtsc ) - offset = d->arch.vtsc_offset; - else - /* offset already applied to value returned by virtual rdtscp */ - offset = 0; - res->a = offset; - res->b = offset >> 32; - res->c = d->arch.vtsc_to_ns.mul_frac; - res->d = d->arch.vtsc_to_ns.shift; - break; - } - - case 2: /* physical cpu_khz */ - res->a = cpu_khz; - break; - } - break; - - case 4: /* HVM hypervisor leaf. */ - if ( !is_hvm_domain(d) || subleaf != 0 ) - break; - - if ( cpu_has_vmx_apic_reg_virt ) - res->a |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; - - /* - * We want to claim that x2APIC is virtualized if APIC MSR accesses - * are not intercepted. When all three of these are true both rdmsr - * and wrmsr in the guest will run without VMEXITs (see - * vmx_vlapic_msr_changed()). - */ - if ( cpu_has_vmx_virtualize_x2apic_mode && - cpu_has_vmx_apic_reg_virt && - cpu_has_vmx_virtual_intr_delivery ) - res->a |= XEN_HVM_CPUID_X2APIC_VIRT; - - /* - * 1) Xen 4.10 and older was broken WRT grant maps requesting a DMA - * mapping, and forgot to honour the guest's request. - * 2) 4.11 (and presumably backports) fixed the bug, so the map - * hypercall actually did what the guest asked. - * 3) To work around the bug, guests must bounce buffer all DMA that - * would otherwise use a grant map, because it doesn't know whether the - * DMA is originating from an emulated or a real device. - * 4) This flag tells guests it is safe not to bounce-buffer all DMA to - * work around the bug. - */ - res->a |= XEN_HVM_CPUID_IOMMU_MAPPINGS; - - /* Indicate presence of vcpu id and set it in ebx */ - res->a |= XEN_HVM_CPUID_VCPU_ID_PRESENT; - res->b = v->vcpu_id; - - /* Indicate presence of domain id and set it in ecx */ - res->a |= XEN_HVM_CPUID_DOMID_PRESENT; - res->c = d->domain_id; - - /* - * Per-vCPU event channel upcalls are implemented and work - * correctly with PIRQs routed over event channels. - */ - res->a |= XEN_HVM_CPUID_UPCALL_VECTOR; - - break; - - case 5: /* PV-specific parameters */ - if ( is_hvm_domain(d) || subleaf != 0 ) - break; - - res->b = flsl(get_upper_mfn_bound()) + PAGE_SHIFT; - break; - - default: - ASSERT_UNREACHABLE(); - break; - } -} - void asmlinkage do_invalid_op(struct cpu_user_regs *regs) { u8 bug_insn[2]; -- 2.39.5
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |