[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up()



On Thu, Mar 17, 2016 at 09:33:32AM -0400, Boris Ostrovsky wrote:
> Commit ce0d3c0a6fb1 ("genirq: Revert sparse irq locking around
> __cpu_up() and move it to x86 for now") reverted irq locking
> introduced by commit a89941816726 ("hotplug: Prevent alloc/free
> of irq descriptors during cpu up/down") because of Xen allocating
> irqs in both of its cpu_up ops.
> 
> We can move those allocations into CPU notifiers so that original
> patch can be reinstated.

Original being "hotplug: Prevent alloc/free..." ?

> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>  arch/x86/xen/enlighten.c |   53 ++++++++++++++++++++++++++++++++++++++-------
>  arch/x86/xen/smp.c       |   45 +-------------------------------------
>  arch/x86/xen/smp.h       |    3 ++
>  3 files changed, 49 insertions(+), 52 deletions(-)
> 
> diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
> index 2c26108..d1a86db 100644
> --- a/arch/x86/xen/enlighten.c
> +++ b/arch/x86/xen/enlighten.c
> @@ -137,6 +137,8 @@ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
>  __read_mostly int xen_have_vector_callback;
>  EXPORT_SYMBOL_GPL(xen_have_vector_callback);
>  
> +static struct notifier_block xen_cpu_notifier;
> +
>  /*
>   * Point at some empty memory to start with. We map the real shared_info
>   * page as soon as fixmap is up and running.
> @@ -1596,6 +1598,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
>       xen_initial_gdt = &per_cpu(gdt_page, 0);
>  
>       xen_smp_init();
> +     register_cpu_notifier(&xen_cpu_notifier);
>  
>  #ifdef CONFIG_ACPI_NUMA
>       /*
> @@ -1783,17 +1786,49 @@ static void __init init_hvm_pv_info(void)
>       xen_domain_type = XEN_HVM_DOMAIN;
>  }
>  
> -static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long 
> action,
> -                           void *hcpu)
> +static int xen_cpu_notify(struct notifier_block *self, unsigned long action,
> +                       void *hcpu)
>  {
>       int cpu = (long)hcpu;
> +     int rc;
> +
>       switch (action) {
>       case CPU_UP_PREPARE:
> -             xen_vcpu_setup(cpu);
> -             if (xen_have_vector_callback) {
> -                     if (xen_feature(XENFEAT_hvm_safe_pvclock))
> -                             xen_setup_timer(cpu);
> +             if (xen_hvm_domain()) {
> +                     /*
> +                      * This can happen if CPU was offlined earlier and
> +                      * offlining timed out in common_cpu_die().
> +                      */
> +                     if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
> +                             xen_smp_intr_free(cpu);
> +                             xen_uninit_lock_cpu(cpu);
> +                     }
> +
> +                     xen_vcpu_setup(cpu);
>               }
> +
> +             if (xen_pv_domain() ||
> +                 (xen_have_vector_callback &&
> +                  xen_feature(XENFEAT_hvm_safe_pvclock)))
> +                     xen_setup_timer(cpu);
> +
> +             rc = xen_smp_intr_init(cpu);
> +             if (rc) {
> +                     WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
> +                          cpu, rc);
> +                     return NOTIFY_BAD;
> +             }
> +
> +             break;
> +     case CPU_ONLINE:
> +             xen_init_lock_cpu(cpu);
> +             break;
> +     case CPU_UP_CANCELED:
> +             xen_smp_intr_free(cpu);

xen_uninit_lock_cpu ?


> +             if (xen_pv_domain() ||
> +                 (xen_have_vector_callback &&
> +                  xen_feature(XENFEAT_hvm_safe_pvclock)))
> +                     xen_teardown_timer(cpu);
>               break;
>       default:
>               break;
> @@ -1801,8 +1836,8 @@ static int xen_hvm_cpu_notify(struct notifier_block 
> *self, unsigned long action,
>       return NOTIFY_OK;
>  }
>  
> -static struct notifier_block xen_hvm_cpu_notifier = {
> -     .notifier_call  = xen_hvm_cpu_notify,
> +static struct notifier_block xen_cpu_notifier = {
> +     .notifier_call  = xen_cpu_notify,
>  };
>  
>  #ifdef CONFIG_KEXEC_CORE
> @@ -1834,7 +1869,7 @@ static void __init xen_hvm_guest_init(void)
>       if (xen_feature(XENFEAT_hvm_callback_vector))
>               xen_have_vector_callback = 1;
>       xen_hvm_smp_init();
> -     register_cpu_notifier(&xen_hvm_cpu_notifier);
> +     register_cpu_notifier(&xen_cpu_notifier);
>       xen_unplug_emulated_devices();
>       x86_init.irqs.intr_init = xen_init_IRQ;
>       xen_hvm_init_time_ops();
> diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
> index 719cf29..09d5cc0 100644
> --- a/arch/x86/xen/smp.c
> +++ b/arch/x86/xen/smp.c
> @@ -115,7 +115,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
>       cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
>  }
>  
> -static void xen_smp_intr_free(unsigned int cpu)
> +void xen_smp_intr_free(unsigned int cpu)
>  {
>       if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
>               unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
> @@ -159,7 +159,7 @@ static void xen_smp_intr_free(unsigned int cpu)
>               per_cpu(xen_pmu_irq, cpu).name = NULL;
>       }
>  };
> -static int xen_smp_intr_init(unsigned int cpu)
> +int xen_smp_intr_init(unsigned int cpu)
>  {
>       int rc;
>       char *resched_name, *callfunc_name, *debug_name, *pmu_name;
> @@ -468,8 +468,6 @@ static int xen_cpu_up(unsigned int cpu, struct 
> task_struct *idle)
>       common_cpu_up(cpu, idle);
>  
>       xen_setup_runstate_info(cpu);
> -     xen_setup_timer(cpu);
> -     xen_init_lock_cpu(cpu);
>  
>       /*
>        * PV VCPUs are always successfully taken down (see 'while' loop
> @@ -488,10 +486,6 @@ static int xen_cpu_up(unsigned int cpu, struct 
> task_struct *idle)
>  
>       xen_pmu_init(cpu);
>  
> -     rc = xen_smp_intr_init(cpu);
> -     if (rc)
> -             return rc;
> -
>       rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
>       BUG_ON(rc);
>  
> @@ -761,47 +755,12 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned 
> int max_cpus)
>       xen_init_lock_cpu(0);
>  }
>  
> -static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
> -{
> -     int rc;
> -
> -     /*
> -      * This can happen if CPU was offlined earlier and
> -      * offlining timed out in common_cpu_die().
> -      */
> -     if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
> -             xen_smp_intr_free(cpu);
> -             xen_uninit_lock_cpu(cpu);
> -     }
> -
> -     /*
> -      * xen_smp_intr_init() needs to run before native_cpu_up()
> -      * so that IPI vectors are set up on the booting CPU before
> -      * it is marked online in native_cpu_up().
> -     */
> -     rc = xen_smp_intr_init(cpu);
> -     WARN_ON(rc);
> -     if (!rc)
> -             rc =  native_cpu_up(cpu, tidle);
> -
> -     /*
> -      * We must initialize the slowpath CPU kicker _after_ the native
> -      * path has executed. If we initialized it before none of the
> -      * unlocker IPI kicks would reach the booting CPU as the booting
> -      * CPU had not set itself 'online' in cpu_online_mask. That mask
> -      * is checked when IPIs are sent (on HVM at least).
> -      */
> -     xen_init_lock_cpu(cpu);
> -     return rc;
> -}
> -
>  void __init xen_hvm_smp_init(void)
>  {
>       if (!xen_have_vector_callback)
>               return;
>       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
>       smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
> -     smp_ops.cpu_up = xen_hvm_cpu_up;
>       smp_ops.cpu_die = xen_cpu_die;
>       smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
>       smp_ops.send_call_func_single_ipi = 
> xen_smp_send_call_function_single_ipi;
> diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
> index 963d62a..45faaf3 100644
> --- a/arch/x86/xen/smp.h
> +++ b/arch/x86/xen/smp.h
> @@ -8,6 +8,9 @@ extern void xen_send_IPI_allbutself(int vector);
>  extern void xen_send_IPI_all(int vector);
>  extern void xen_send_IPI_self(int vector);
>  
> +extern int xen_smp_intr_init(unsigned int cpu);
> +extern void xen_smp_intr_free(unsigned int cpu);
> +
>  #ifdef CONFIG_XEN_PVH
>  extern void xen_pvh_early_cpu_init(int cpu, bool entry);
>  #else
> -- 
> 1.7.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.