|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 5/6] x86: Remove x86 prefixed names misc
struct cpuinfo_x86
.x86 => .family
.x86_vendor => .vendor
.x86_model => .model
.x86_mask => .stepping
No functional change.
This work is part of making Xen safe for Intel family 18/19.
Signed-off-by: Kevin Lampis <kevin.lampis@xxxxxxxxxx>
---
xen/arch/x86/alternative.c | 8 ++++----
xen/arch/x86/apic.c | 2 +-
xen/arch/x86/cpuid.c | 2 +-
xen/arch/x86/dom0_build.c | 4 ++--
xen/arch/x86/domain.c | 12 ++++++------
xen/arch/x86/e820.c | 2 +-
xen/arch/x86/i8259.c | 2 +-
xen/arch/x86/irq.c | 4 ++--
xen/arch/x86/mpparse.c | 6 +++---
xen/arch/x86/msr.c | 4 ++--
xen/arch/x86/nmi.c | 12 ++++++------
xen/arch/x86/platform_hypercall.c | 8 ++++----
xen/arch/x86/pv/domain.c | 2 +-
xen/arch/x86/setup.c | 6 +++---
xen/arch/x86/traps-setup.c | 4 ++--
xen/arch/x86/tsx.c | 4 ++--
xen/drivers/passthrough/amd/iommu_init.c | 6 +++---
17 files changed, 44 insertions(+), 44 deletions(-)
diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c
index 9f844241bc..5ed0c26725 100644
--- a/xen/arch/x86/alternative.c
+++ b/xen/arch/x86/alternative.c
@@ -89,7 +89,7 @@ static bool init_or_livepatch_read_mostly
toolchain_nops_are_ideal;
static void __init arch_init_ideal_nops(void)
{
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
case X86_VENDOR_INTEL:
/*
@@ -97,10 +97,10 @@ static void __init arch_init_ideal_nops(void)
* actually perform better with the "k8_nops" than with the SDM-
* recommended NOPs.
*/
- if ( boot_cpu_data.x86 != 6 )
+ if ( boot_cpu_data.family != 6 )
break;
- switch ( boot_cpu_data.x86_model )
+ switch ( boot_cpu_data.model )
{
case 0x0f ... 0x1b:
case 0x1d ... 0x25:
@@ -111,7 +111,7 @@ static void __init arch_init_ideal_nops(void)
break;
case X86_VENDOR_AMD:
- if ( boot_cpu_data.x86 <= 0xf )
+ if ( boot_cpu_data.family <= 0xf )
ideal_nops = k8_nops;
break;
}
diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c
index fb38be7ec3..d69ef9e16a 100644
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -406,7 +406,7 @@ void __init init_bsp_APIC(void)
value |= APIC_SPIV_APIC_ENABLED;
/* This bit is reserved on P4/Xeon and should be cleared */
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86
== 15))
+ if ((boot_cpu_data.vendor == X86_VENDOR_INTEL) && (boot_cpu_data.family ==
15))
value &= ~APIC_SPIV_FOCUS_DISABLED;
else
value |= APIC_SPIV_FOCUS_DISABLED;
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 5decfad8cd..6e9b15c9c3 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -437,7 +437,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
case 0xa:
/* TODO: Rework vPMU control in terms of toolstack choices. */
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
!vpmu_available(v) )
*res = EMPTY_LEAF;
else
diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 0b467fd4a4..864dd9e53e 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -572,10 +572,10 @@ int __init dom0_setup_permissions(struct domain *d)
rc |= iomem_deny_access(d, mfn, mfn);
}
/* HyperTransport range. */
- if ( boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
{
mfn = paddr_to_pfn(1UL <<
- (boot_cpu_data.x86 < 0x17 ? 40 : paddr_bits));
+ (boot_cpu_data.family < 0x17 ? 40 : paddr_bits));
rc |= iomem_deny_access(d, mfn - paddr_to_pfn(3UL << 32), mfn - 1);
}
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 8eb1509782..b36e113724 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -319,7 +319,7 @@ void domain_cpu_policy_changed(struct domain *d)
if ( cpu_has_htt )
edx |= cpufeat_mask(X86_FEATURE_HTT);
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
case X86_VENDOR_INTEL:
/*
@@ -372,7 +372,7 @@ void domain_cpu_policy_changed(struct domain *d)
{
uint64_t mask = cpuidmask_defaults._6c;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ if ( boot_cpu_data.vendor == X86_VENDOR_AMD )
mask &= (~0ULL << 32) | p->basic.raw[6].c;
d->arch.pv.cpuidmasks->_6c = mask;
@@ -387,7 +387,7 @@ void domain_cpu_policy_changed(struct domain *d)
* wholesale from the policy, but clamp the features in 7[0].ebx
* per usual.
*/
- if ( boot_cpu_data.x86_vendor &
+ if ( boot_cpu_data.vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON) )
mask = (((uint64_t)p->feat.max_subleaf << 32) |
((uint32_t)mask & p->feat._7b0));
@@ -400,7 +400,7 @@ void domain_cpu_policy_changed(struct domain *d)
uint64_t mask = cpuidmask_defaults.Da1;
uint32_t eax = p->xstate.Da1;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( boot_cpu_data.vendor == X86_VENDOR_INTEL )
mask &= (~0ULL << 32) | eax;
d->arch.pv.cpuidmasks->Da1 = mask;
@@ -427,7 +427,7 @@ void domain_cpu_policy_changed(struct domain *d)
if ( !(p->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
edx &= ~CPUID_COMMON_1D_FEATURES;
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
case X86_VENDOR_INTEL:
mask &= ((uint64_t)edx << 32) | ecx;
@@ -457,7 +457,7 @@ void domain_cpu_policy_changed(struct domain *d)
cpu_policy_updated(v);
/* If PMU version is zero then the guest doesn't have VPMU */
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ if ( boot_cpu_data.vendor == X86_VENDOR_INTEL &&
p->basic.pmu_version == 0 )
vpmu_destroy(v);
}
diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c
index ca577c0bde..872208ab37 100644
--- a/xen/arch/x86/e820.c
+++ b/xen/arch/x86/e820.c
@@ -426,7 +426,7 @@ static uint64_t __init mtrr_top_of_ram(void)
/* By default we check only Intel systems. */
if ( e820_mtrr_clip == -1 )
- e820_mtrr_clip = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+ e820_mtrr_clip = boot_cpu_data.vendor == X86_VENDOR_INTEL;
if ( !e820_mtrr_clip )
return 0;
diff --git a/xen/arch/x86/i8259.c b/xen/arch/x86/i8259.c
index 5c7e21a751..d2a08661f8 100644
--- a/xen/arch/x86/i8259.c
+++ b/xen/arch/x86/i8259.c
@@ -419,7 +419,7 @@ void __init init_IRQ(void)
* the interrupt.
*/
cpumask_copy(desc->arch.cpu_mask,
- (boot_cpu_data.x86_vendor &
+ (boot_cpu_data.vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON) ? &cpumask_all
: cpumask_of(cpu)));
desc->arch.vector = LEGACY_VECTOR(irq);
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index cc2934bfca..739fc04bd1 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -2011,8 +2011,8 @@ void do_IRQ(struct cpu_user_regs *regs)
* interrupts have been delivered to CPUs
* different than the BSP.
*/
- (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD |
- X86_VENDOR_HYGON))) &&
+ (boot_cpu_data.vendor & (X86_VENDOR_AMD |
+ X86_VENDOR_HYGON))) &&
bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR)) )
{
printk("CPU%u: No irq handler for vector %02x (IRQ %d%s)\n",
diff --git a/xen/arch/x86/mpparse.c b/xen/arch/x86/mpparse.c
index c1171f7a3a..1b41aa573f 100644
--- a/xen/arch/x86/mpparse.c
+++ b/xen/arch/x86/mpparse.c
@@ -511,9 +511,9 @@ static inline void __init construct_default_ISA_mptable(int
mpc_default_type)
/* Either an integrated APIC or a discrete 82489DX. */
processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
processor.mpc_cpuflag = CPU_ENABLED;
- processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
- (boot_cpu_data.x86_model << 4) |
- boot_cpu_data.x86_mask;
+ processor.mpc_cpufeature = (boot_cpu_data.family << 8) |
+ (boot_cpu_data.model << 4) |
+ boot_cpu_data.stepping;
processor.mpc_featureflag =
boot_cpu_data.x86_capability[FEATURESET_1d];
processor.mpc_reserved[0] = 0;
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index ad75a2e108..6a97be59d5 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -170,7 +170,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
case MSR_IA32_PLATFORM_ID:
if ( !(cp->x86_vendor & X86_VENDOR_INTEL) ||
- !(boot_cpu_data.x86_vendor & X86_VENDOR_INTEL) )
+ !(boot_cpu_data.vendor & X86_VENDOR_INTEL) )
goto gp_fault;
rdmsrl(MSR_IA32_PLATFORM_ID, *val);
break;
@@ -190,7 +190,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
* the guest.
*/
if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
- !(boot_cpu_data.x86_vendor &
+ !(boot_cpu_data.vendor &
(X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
rdmsr_safe(MSR_AMD_PATCHLEVEL, val) )
goto gp_fault;
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index a0c9194ff0..447fac19d3 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -216,12 +216,12 @@ void disable_lapic_nmi_watchdog(void)
{
if (nmi_active <= 0)
return;
- switch (boot_cpu_data.x86_vendor) {
+ switch (boot_cpu_data.vendor) {
case X86_VENDOR_AMD:
wrmsrns(MSR_K7_EVNTSEL0, 0);
break;
case X86_VENDOR_INTEL:
- switch (boot_cpu_data.x86) {
+ switch (boot_cpu_data.family) {
case 6:
wrmsrns(MSR_P6_EVNTSEL(0), 0);
break;
@@ -362,7 +362,7 @@ static void setup_p4_watchdog(void)
clear_msr_range(0x3F1, 2);
/* MSR 0x3F0 seems to have a default value of 0xFC00, but current
docs doesn't fully define it, so leave it alone for now. */
- if (boot_cpu_data.x86_model >= 0x3) {
+ if (boot_cpu_data.model >= 0x3) {
/* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
clear_msr_range(0x3A0, 26);
clear_msr_range(0x3BC, 3);
@@ -387,16 +387,16 @@ void setup_apic_nmi_watchdog(void)
if ( nmi_watchdog == NMI_NONE )
return;
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
case X86_VENDOR_AMD:
setup_k7_watchdog();
break;
case X86_VENDOR_INTEL:
- switch (boot_cpu_data.x86) {
+ switch (boot_cpu_data.family) {
case 6:
- setup_p6_watchdog((boot_cpu_data.x86_model < 14)
+ setup_p6_watchdog((boot_cpu_data.model < 14)
? P6_EVENT_CPU_CLOCKS_NOT_HALTED
: CORE_EVENT_CPU_CLOCKS_NOT_HALTED);
break;
diff --git a/xen/arch/x86/platform_hypercall.c
b/xen/arch/x86/platform_hypercall.c
index cd4f0ae5e5..0431f875af 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -104,7 +104,7 @@ static bool msr_read_allowed(unsigned int msr)
* the platform doesn't actually support this MSR.
*/
case MSR_TEMPERATURE_TARGET:
- return boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ return boot_cpu_data.vendor == X86_VENDOR_INTEL &&
host_cpu_policy.basic.digital_temp_sensor;
case MSR_PACKAGE_THERM_STATUS:
@@ -688,9 +688,9 @@ ret_t do_platform_op(
const struct cpuinfo_x86 *c = &cpu_data[ver->xen_cpuid];
memcpy(ver->vendor_id, c->x86_vendor_id, sizeof(ver->vendor_id));
- ver->family = c->x86;
- ver->model = c->x86_model;
- ver->stepping = c->x86_mask;
+ ver->family = c->family;
+ ver->model = c->model;
+ ver->stepping = c->stepping;
}
ver->max_present = cpumask_last(&cpu_present_map);
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index d16583a745..0fd05c7059 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -174,7 +174,7 @@ static int __init cf_check pge_init(void)
{
if ( opt_global_pages == -1 )
opt_global_pages = !cpu_has_hypervisor ||
- !(boot_cpu_data.x86_vendor &
+ !(boot_cpu_data.vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON));
return 0;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 27c63d1d97..e5f1b4b787 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1407,7 +1407,7 @@ void asmlinkage __init noreturn __start_xen(void)
* supervisor shadow stacks are now safe to use.
*/
bool cpu_has_bug_shstk_fracture =
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.vendor == X86_VENDOR_INTEL &&
!boot_cpu_has(X86_FEATURE_CET_SSS);
/*
@@ -2038,10 +2038,10 @@ void asmlinkage __init noreturn __start_xen(void)
/* Do not enable SMEP/SMAP in PV shim on AMD and Hygon by default */
if ( opt_smep == -1 )
- opt_smep = !pv_shim || !(boot_cpu_data.x86_vendor &
+ opt_smep = !pv_shim || !(boot_cpu_data.vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON));
if ( opt_smap == -1 )
- opt_smap = !pv_shim || !(boot_cpu_data.x86_vendor &
+ opt_smap = !pv_shim || !(boot_cpu_data.vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON));
if ( !opt_smep )
diff --git a/xen/arch/x86/traps-setup.c b/xen/arch/x86/traps-setup.c
index d77be8f839..1aff0327dc 100644
--- a/xen/arch/x86/traps-setup.c
+++ b/xen/arch/x86/traps-setup.c
@@ -243,10 +243,10 @@ static void __init init_ler(void)
* Intel Pentium 4 is the only known CPU to not use the architectural MSR
* indicies.
*/
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
case X86_VENDOR_INTEL:
- if ( boot_cpu_data.x86 == 0xf )
+ if ( boot_cpu_data.family == 0xf )
{
msr = MSR_P4_LER_FROM_LIP;
break;
diff --git a/xen/arch/x86/tsx.c b/xen/arch/x86/tsx.c
index fe9f0ab4f7..38371487b9 100644
--- a/xen/arch/x86/tsx.c
+++ b/xen/arch/x86/tsx.c
@@ -148,8 +148,8 @@ void tsx_init(void)
{
printk(XENLOG_ERR
"FIRMWARE BUG: CPU %02x-%02x-%02x, ucode 0x%08x:
RTM_ALWAYS_ABORT vs RTM mismatch\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model,
- boot_cpu_data.x86_mask, this_cpu(cpu_sig).rev);
+ boot_cpu_data.family, boot_cpu_data.model,
+ boot_cpu_data.stepping, this_cpu(cpu_sig).rev);
setup_clear_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
setup_clear_cpu_cap(X86_FEATURE_TSX_FORCE_ABORT);
diff --git a/xen/drivers/passthrough/amd/iommu_init.c
b/xen/drivers/passthrough/amd/iommu_init.c
index 00d2c46cbc..d77dd85112 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -840,9 +840,9 @@ static void amd_iommu_erratum_746_workaround(struct
amd_iommu *iommu)
{
u32 value;
- if ( (boot_cpu_data.x86 != 0x15) ||
- (boot_cpu_data.x86_model < 0x10) ||
- (boot_cpu_data.x86_model > 0x1f) )
+ if ( (boot_cpu_data.family != 0x15) ||
+ (boot_cpu_data.model < 0x10) ||
+ (boot_cpu_data.model > 0x1f) )
return;
pci_conf_write32(iommu->sbdf, 0xf0, 0x90);
--
2.51.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |