[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 3/7] x86: initialize per socket cpu map
For each socket in the system, we create a separate bitmap to tag its related CPUs. This per socket bitmap will be initialized on system start up, and adjusted when CPU is dynamically online/offline. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/smp.c | 7 ++++++- xen/arch/x86/smpboot.c | 19 +++++++++++++++++-- xen/include/asm-x86/smp.h | 2 ++ xen/include/xen/cpumask.h | 1 + 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c index 0433f30..7959447 100644 --- a/xen/arch/x86/smp.c +++ b/xen/arch/x86/smp.c @@ -283,6 +283,9 @@ void smp_send_call_function_mask(const cpumask_t *mask) void __stop_this_cpu(void) { + int cpu = smp_processor_id(); + int socket = cpu_to_socket(cpu); + ASSERT(!local_irq_is_enabled()); disable_local_APIC(); @@ -296,7 +299,9 @@ void __stop_this_cpu(void) clts(); asm volatile ( "fninit" ); - cpumask_clear_cpu(smp_processor_id(), &cpu_online_map); + cpumask_clear_cpu(cpu, &cpu_online_map); + if ( socket >= 0 ) + cpumask_clear_cpu(cpu, &socket_cpu_map[socket]); } static void stop_this_cpu(void *dummy) diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 9f81c7b..bfebc2b 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -59,6 +59,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask); cpumask_t cpu_online_map __read_mostly; EXPORT_SYMBOL(cpu_online_map); +cpumask_t socket_cpu_map[MAX_NUM_SOCKETS] __read_mostly; +EXPORT_SYMBOL(socket_cpu_map); + struct cpuinfo_x86 cpu_data[NR_CPUS]; u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly = @@ -319,6 +322,7 @@ void start_secondary(void *unused) * want to limit the things done here to the most necessary things. */ unsigned int cpu = booting_cpu; + int socket; set_processor_id(cpu); set_current(idle_vcpu[cpu]); @@ -381,6 +385,9 @@ void start_secondary(void *unused) cpumask_set_cpu(cpu, &cpu_online_map); unlock_vector_lock(); + if ( (socket = cpu_to_socket(cpu)) >= 0 ) + cpumask_set_cpu(cpu, &socket_cpu_map[socket]); + init_percpu_time(); /* We can take interrupts now: we're officially "up". */ @@ -788,8 +795,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { - cpumask_set_cpu(smp_processor_id(), &cpu_online_map); - cpumask_set_cpu(smp_processor_id(), &cpu_present_map); + int cpu = smp_processor_id(); + int socket = cpu_to_socket(cpu); + + cpumask_set_cpu(cpu, &cpu_online_map); + cpumask_set_cpu(cpu, &cpu_present_map); + if ( socket >= 0 ) + cpumask_set_cpu(cpu, &socket_cpu_map[socket]); } static void @@ -819,6 +831,7 @@ remove_siblinginfo(int cpu) void __cpu_disable(void) { int cpu = smp_processor_id(); + int socket = cpu_to_socket(cpu); set_cpu_state(CPU_STATE_DYING); @@ -836,6 +849,8 @@ void __cpu_disable(void) /* It's now safe to remove this processor from the online map */ cpumask_clear_cpu(cpu, cpupool0->cpu_valid); cpumask_clear_cpu(cpu, &cpu_online_map); + if ( socket >= 0 ) + cpumask_clear_cpu(cpu, &socket_cpu_map[socket]); fixup_irqs(); if ( cpu_disable_scheduler(cpu) ) diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h index 81f8610..f47fa1b 100644 --- a/xen/include/asm-x86/smp.h +++ b/xen/include/asm-x86/smp.h @@ -57,6 +57,8 @@ int hard_smp_processor_id(void); void __stop_this_cpu(void); +#define MAX_NUM_SOCKETS 256 + #endif /* !__ASSEMBLY__ */ #endif diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h index 850b4a2..883a52a 100644 --- a/xen/include/xen/cpumask.h +++ b/xen/include/xen/cpumask.h @@ -419,6 +419,7 @@ static inline void free_cpumask_var(cpumask_var_t mask) extern cpumask_t cpu_possible_map; extern cpumask_t cpu_online_map; extern cpumask_t cpu_present_map; +extern cpumask_t socket_cpu_map[]; #if NR_CPUS > 1 #define num_online_cpus() cpumask_weight(&cpu_online_map) -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |