[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/4] x86: split PV dom0 builder to pv/dom0_builder.c



On Mon, Mar 20, 2017 at 04:40:49AM -0600, Jan Beulich wrote:
> >>> On 16.03.17 at 18:54, <wei.liu2@xxxxxxxxxx> wrote:
> > @@ -154,11 +155,11 @@ static void __init parse_dom0_nodes(const char *s)
> >  }
> >  custom_param("dom0_nodes", parse_dom0_nodes);
> >  
> > -static cpumask_t __initdata dom0_cpus;
> > +cpumask_t __initdata dom0_cpus;
> 
> I'd prefer if this variable remained static, and I think this is doable:
> 
> > -static struct vcpu *__init setup_dom0_vcpu(struct domain *d,
> > -                                           unsigned int vcpu_id,
> > -                                           unsigned int cpu)
> > +struct vcpu *__init dom0_setup_vcpu(struct domain *d,
> > +                                    unsigned int vcpu_id,
> > +                                    unsigned int cpu)
> >  {
> 
> It's needed only by the callers of this function afaics, and the
> cpumask_first() / cpumask_cycle() invocations could easily move
> into the function, with the callers updating their "cpu" variables
> from v->processor (with v assumed to be a variable to store the
> return value of the function, and checked to be non-NULL).
> 

Like this?

From 6b066814a424fdaf9ee0a1d2afc6b0765961e932 Mon Sep 17 00:00:00 2001
From: Wei Liu <wei.liu2@xxxxxxxxxx>
Date: Mon, 20 Mar 2017 13:05:08 +0000
Subject: [PATCH] x86: modify setup_dom0_vcpu to keep dom0_cpus static

We will later move dom0 builders to different directory. To avoid the
need of making dom0_cpus visible outside its original file, modify
setup_dom0_vcpus to cycle through dom0_cpus internally instead of
relying on the callers to do that.

No functional change.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/dom0_build.c | 20 ++++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 1c723c9ef1..102d3daea1 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -158,8 +158,9 @@ static cpumask_t __initdata dom0_cpus;
 
 static struct vcpu *__init setup_dom0_vcpu(struct domain *d,
                                            unsigned int vcpu_id,
-                                           unsigned int cpu)
+                                           unsigned int prev_cpu)
 {
+    unsigned int cpu = cpumask_cycle(prev_cpu, &dom0_cpus);
     struct vcpu *v = alloc_vcpu(d, vcpu_id, cpu);
 
     if ( v )
@@ -215,7 +216,8 @@ struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0)
         return NULL;
     dom0->max_vcpus = max_vcpus;
 
-    return setup_dom0_vcpu(dom0, 0, cpumask_first(&dom0_cpus));
+    return setup_dom0_vcpu(dom0, 0,
+                           cpumask_last(&dom0_cpus) /* so it wraps around to 
first pcpu */);
 }
 
 #ifdef CONFIG_SHADOW_PAGING
@@ -1155,8 +1157,11 @@ static int __init construct_dom0_pv(
     cpu = v->processor;
     for ( i = 1; i < d->max_vcpus; i++ )
     {
-        cpu = cpumask_cycle(cpu, &dom0_cpus);
-        setup_dom0_vcpu(d, i, cpu);
+        struct vcpu *p = setup_dom0_vcpu(d, i, cpu);
+        if ( !p )
+            panic("Cannot allocate vcpu%u for Dom0", i);
+
+        cpu = p->processor;
     }
 
     d->arch.paging.mode = 0;
@@ -1902,8 +1907,11 @@ static int __init pvh_setup_cpus(struct domain *d, 
paddr_t entry,
     cpu = v->processor;
     for ( i = 1; i < d->max_vcpus; i++ )
     {
-        cpu = cpumask_cycle(cpu, &dom0_cpus);
-        setup_dom0_vcpu(d, i, cpu);
+        struct vcpu *p = setup_dom0_vcpu(d, i, cpu);
+        if ( !p )
+            panic("Cannot allocate vcpu%u for Dom0", i);
+
+        cpu = p->processor;
     }
 
     rc = arch_set_info_hvm_guest(v, &cpu_ctx);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.