|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/4] x86/capabilities: set/clear them using non-locked bitops
Their initialization happens without races, so there's no point in
using atomic (locked) operations to update the respective flags.
(There's one case where the clear_bit() was completely pointless.)
Also drop a neighboring stale comment from AMD code.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -872,7 +872,7 @@ static int __init detect_init_APIC (void
return -1;
}
- set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+ __set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
/* The BIOS may have set up the APIC at some other address */
@@ -1369,7 +1369,7 @@ void pmu_apic_interrupt(struct cpu_user_
int __init APIC_init_uniprocessor (void)
{
if (enable_local_apic < 0)
- clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+ __clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
if (!smp_found_config && !cpu_has_apic) {
skip_ioapic_setup = 1;
@@ -1382,7 +1382,6 @@ int __init APIC_init_uniprocessor (void)
if (!cpu_has_apic &&
APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_physical_apicid);
- clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
skip_ioapic_setup = 1;
return -1;
}
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -442,15 +442,9 @@ static void __devinit init_amd(struct cp
wrmsrl(MSR_K7_HWCR, value);
}
- /*
- * FIXME: We should handle the K5 here. Set up the write
- * range and also turn on MSR 83 bits 4 and 31 (write alloc,
- * no bus pipeline)
- */
-
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
- clear_bit(0*32+31, c->x86_capability);
+ __clear_bit(0*32+31, c->x86_capability);
if (c->x86 == 0xf && c->x86_model < 0x14
&& cpu_has(c, X86_FEATURE_LAHF_LM)) {
@@ -459,7 +453,7 @@ static void __devinit init_amd(struct cp
* revision D (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759).
*/
- clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability);
+ __clear_bit(X86_FEATURE_LAHF_LM, c->x86_capability);
if (!rdmsr_amd_safe(0xc001100d, &l, &h))
wrmsr_amd_safe(0xc001100d, l, h & ~1);
}
@@ -482,10 +476,11 @@ static void __devinit init_amd(struct cp
if (c->extended_cpuid_level >= 0x80000007) {
c->x86_power = cpuid_edx(0x80000007);
if (c->x86_power & (1<<8)) {
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
if (c->x86 != 0x11)
- set_bit(X86_FEATURE_TSC_RELIABLE,
c->x86_capability);
+ __set_bit(X86_FEATURE_TSC_RELIABLE,
+ c->x86_capability);
}
}
@@ -498,7 +493,7 @@ static void __devinit init_amd(struct cp
wrmsr_safe(MSR_K8_EXT_FEATURE_MASK, value);
rdmsrl(MSR_K8_EXT_FEATURE_MASK, value);
if (value & (1ULL << 54)) {
- set_bit(X86_FEATURE_TOPOEXT, c->x86_capability);
+ __set_bit(X86_FEATURE_TOPOEXT, c->x86_capability);
printk(KERN_INFO "CPU: Re-enabling disabled "
"Topology Extensions Support\n");
}
@@ -516,7 +511,7 @@ static void __devinit init_amd(struct cp
/* Pointless to use MWAIT on Family10 as it does not deep sleep. */
if (c->x86 >= 0x10 && !force_mwait)
- clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
+ __clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121))
opt_allow_unsafe = 1;
@@ -566,7 +561,7 @@ static void __devinit init_amd(struct cp
}
/* AMD CPUs do not support SYSENTER outside of legacy mode. */
- clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ __clear_bit(X86_FEATURE_SEP, c->x86_capability);
if (c->x86 == 0x10) {
/* do this for boot cpu */
@@ -592,7 +587,7 @@ static void __devinit init_amd(struct cp
* running in deep C states.
*/
if ( opt_arat && c->x86 > 0x11 )
- set_bit(X86_FEATURE_ARAT, c->x86_capability);
+ __set_bit(X86_FEATURE_ARAT, c->x86_capability);
/*
* Prior to Family 0x14, perf counters are not reset during warm reboot.
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -47,7 +47,7 @@ static void __init init_c3(struct cpuinf
if (c->x86 == 0x6 && c->x86_model >= 0xf) {
c->x86_cache_alignment = c->x86_clflush_size * 2;
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
}
get_model_name(c);
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -312,7 +312,7 @@ void __cpuinit identify_cpu(struct cpuin
/* Initialize xsave/xrstor features */
if ( !use_xsave )
- clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability);
+ __clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability);
if ( cpu_has_xsave )
xstate_init(c);
@@ -392,7 +392,7 @@ void __cpuinit detect_extended_topology(
if ( ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE) )
return;
- set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability);
+ __set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability);
initial_apicid = edx;
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -209,7 +209,7 @@ static void __devinit Intel_errata_worka
if (c->x86 == 6 && cpu_has_clflush &&
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
- set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
+ __set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
}
@@ -244,7 +244,7 @@ static void __devinit init_intel(struct
unsigned eax = cpuid_eax(10);
/* Check for version and the number of counters */
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
- set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
+ __set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
}
if ( !cpu_has(c, X86_FEATURE_XTOPOLOGY) )
@@ -255,10 +255,11 @@ static void __devinit init_intel(struct
if (c == &boot_cpu_data && c->x86 == 6) {
if (probe_intel_cpuid_faulting())
- set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ __set_bit(X86_FEATURE_CPUID_FAULTING,
+ c->x86_capability);
} else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
BUG_ON(!probe_intel_cpuid_faulting());
- set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
}
if (!cpu_has_cpuid_faulting)
@@ -274,16 +275,16 @@ static void __devinit init_intel(struct
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
if (cpuid_edx(0x80000007) & (1u<<8)) {
- set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
- set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
- set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
+ __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+ __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
}
if ( opt_arat &&
( c->cpuid_level >= 0x00000006 ) &&
( cpuid_eax(0x00000006) & (1u<<2) ) )
- set_bit(X86_FEATURE_ARAT, c->x86_capability);
+ __set_bit(X86_FEATURE_ARAT, c->x86_capability);
}
static const struct cpu_dev intel_cpu_dev = {
Attachment:
x86-caps-use-non-atomic-bitops.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |