[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/5] xen/vcpu: Handle xen_vcpu_setup() failure at boot



On PVH, PVHVM, at failure in the VCPUOP_register_vcpu_info hypercall
we limit the number of cpus to to MAX_VIRT_CPUS. However, if this
failure had occurred for a cpu beyond MAX_VIRT_CPUS, we continue
to function with > MAX_VIRT_CPUS.

This leads to problems at the next save/restore cycle when there
are > MAX_VIRT_CPUS threads going into stop_machine() but coming
back up there's valid state for only the first MAX_VIRT_CPUS.

This patch pulls the excess CPUs down via cpu_down().

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
 arch/x86/xen/smp.c     | 31 +++++++++++++++++++++++++++++++
 arch/x86/xen/smp.h     |  2 ++
 arch/x86/xen/smp_hvm.c |  1 +
 arch/x86/xen/smp_pv.c  |  6 +-----
 4 files changed, 35 insertions(+), 5 deletions(-)

diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 82ac611f2fc1..e7f02eb73727 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -1,4 +1,5 @@
 #include <linux/smp.h>
+#include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
@@ -114,6 +115,36 @@ int xen_smp_intr_init(unsigned int cpu)
        return rc;
 }
 
+void __init xen_smp_cpus_done(unsigned int max_cpus)
+{
+       int cpu, rc, count = 0;
+
+       if (xen_hvm_domain())
+               native_smp_cpus_done(max_cpus);
+
+       if (xen_have_vcpu_info_placement)
+               return;
+
+       for_each_online_cpu(cpu) {
+               if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
+                       continue;
+
+               rc = cpu_down(cpu);
+
+               if (rc == 0) {
+                       /*
+                        * Reset vcpu_info so this cpu cannot be onlined again.
+                        */
+                       xen_vcpu_info_reset(cpu);
+                       count++;
+               } else {
+                       pr_warn("%s: failed to bring CPU %d down, error %d\n",
+                               __func__, cpu, rc);
+               }
+       }
+       WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
+}
+
 void xen_smp_send_reschedule(int cpu)
 {
        xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 8ebb6acca64a..87d3c76cba37 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -14,6 +14,8 @@ extern void xen_smp_intr_free(unsigned int cpu);
 int xen_smp_intr_init_pv(unsigned int cpu);
 void xen_smp_intr_free_pv(unsigned int cpu);
 
+void xen_smp_cpus_done(unsigned int max_cpus);
+
 void xen_smp_send_reschedule(int cpu);
 void xen_smp_send_call_function_ipi(const struct cpumask *mask);
 void xen_smp_send_call_function_single_ipi(int cpu);
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index 6c8a805819ff..fd60abedf658 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -71,4 +71,5 @@ void __init xen_hvm_smp_init(void)
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
        smp_ops.send_call_func_single_ipi = 
xen_smp_send_call_function_single_ipi;
        smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+       smp_ops.smp_cpus_done = xen_smp_cpus_done;
 }
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index aae32535f4ec..1ea598e5f030 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -371,10 +371,6 @@ static int xen_pv_cpu_up(unsigned int cpu, struct 
task_struct *idle)
        return 0;
 }
 
-static void xen_pv_smp_cpus_done(unsigned int max_cpus)
-{
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 static int xen_pv_cpu_disable(void)
 {
@@ -469,7 +465,7 @@ static irqreturn_t xen_irq_work_interrupt(int irq, void 
*dev_id)
 static const struct smp_ops xen_smp_ops __initconst = {
        .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_pv_smp_prepare_cpus,
-       .smp_cpus_done = xen_pv_smp_cpus_done,
+       .smp_cpus_done = xen_smp_cpus_done,
 
        .cpu_up = xen_pv_cpu_up,
        .cpu_die = xen_pv_cpu_die,
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.