|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] Adjust number of domains in cpupools when destroying domain
On 12/11/14 10:40, Juergen Gross wrote:
> Commit bac6334b51d9bcfe57ecf4a4cb5288348fcf044a (move domain to
> cpupool0 before destroying it) introduced an error in the accounting
> of cpupools regarding the number of domains. The number of domains
> is nor adjusted when a domain is moved to cpupool0 in kill_domain().
>
> Correct this by introducing a cpupool function doing the move
> instead of open coding it by calling sched_move_domain().
>
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
> Tested-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
> ---
> xen/common/cpupool.c | 47 +++++++++++++++++++++++++++++++++--------------
> xen/common/domain.c | 2 +-
> xen/include/xen/sched.h | 1 +
> 3 files changed, 35 insertions(+), 15 deletions(-)
>
> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
> index 73249d3..c6e3869 100644
> --- a/xen/common/cpupool.c
> +++ b/xen/common/cpupool.c
> @@ -225,6 +225,35 @@ static int cpupool_destroy(struct cpupool *c)
> }
>
> /*
> + * Move domain to another cpupool
> + */
> +static int cpupool_move_domain_unlocked(struct domain *d, struct cpupool *c)
This isn't an unlocked function. It is strictly called with the
cpupool_lock held. Per prevailing style, it should be named
"__cpupool_move_domain()".
> +{
> + int ret;
> +
> + d->cpupool->n_dom--;
> + ret = sched_move_domain(d, c);
> + if ( ret )
> + d->cpupool->n_dom++;
> + else
> + c->n_dom++;
> +
> + return ret;
> +}
Newline here please.
Once these two issues are fixed, content Reviewed-by: Andrew Cooper
<andrew.cooper3@xxxxxxxxxx>
> +int cpupool_move_domain(struct domain *d, struct cpupool *c)
> +{
> + int ret;
> +
> + spin_lock(&cpupool_lock);
> +
> + ret = cpupool_move_domain_unlocked(d, c);
> +
> + spin_unlock(&cpupool_lock);
> +
> + return ret;
> +}
> +
> +/*
> * assign a specific cpu to a cpupool
> * cpupool_lock must be held
> */
> @@ -338,14 +367,9 @@ static int cpupool_unassign_cpu(struct cpupool *c,
> unsigned int cpu)
> ret = -EBUSY;
> break;
> }
> - c->n_dom--;
> - ret = sched_move_domain(d, cpupool0);
> + ret = cpupool_move_domain_unlocked(d, cpupool0);
> if ( ret )
> - {
> - c->n_dom++;
> break;
> - }
> - cpupool0->n_dom++;
> }
> rcu_read_unlock(&domlist_read_lock);
> if ( ret )
> @@ -613,16 +637,11 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
> d->domain_id, op->cpupool_id);
> ret = -ENOENT;
> spin_lock(&cpupool_lock);
> +
> c = cpupool_find_by_id(op->cpupool_id);
> if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
> - {
> - d->cpupool->n_dom--;
> - ret = sched_move_domain(d, c);
> - if ( ret )
> - d->cpupool->n_dom++;
> - else
> - c->n_dom++;
> - }
> + ret = cpupool_move_domain_unlocked(d, c);
> +
> spin_unlock(&cpupool_lock);
> cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
> d->domain_id, op->cpupool_id, ret);
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index a3f51ec..4a62c1d 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -621,7 +621,7 @@ int domain_kill(struct domain *d)
> rc = -EAGAIN;
> break;
> }
> - if ( sched_move_domain(d, cpupool0) )
> + if ( cpupool_move_domain(d, cpupool0) )
> return -EAGAIN;
> for_each_vcpu ( d, v )
> unmap_vcpu_info(v);
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index c5157e6..46fc6e3 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -871,6 +871,7 @@ struct cpupool *cpupool_get_by_id(int poolid);
> void cpupool_put(struct cpupool *pool);
> int cpupool_add_domain(struct domain *d, int poolid);
> void cpupool_rm_domain(struct domain *d);
> +int cpupool_move_domain(struct domain *d, struct cpupool *c);
> int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
> void schedule_dump(struct cpupool *c);
> extern void dump_runq(unsigned char key);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |