|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] domctl: cleanup
- drop redundant "ret = 0" statements
- drop unnecessary braces
- eliminate a few single use local variables
- move break statements inside case-specific braced scopes
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -517,8 +517,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
}
free_vcpu_guest_context(c.nat);
+ break;
}
- break;
case XEN_DOMCTL_pausedomain:
ret = -EINVAL;
@@ -531,11 +531,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
break;
case XEN_DOMCTL_resumedomain:
- {
domain_resume(d);
- ret = 0;
- }
- break;
+ break;
case XEN_DOMCTL_createdomain:
{
@@ -608,8 +605,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
op->domain = d->domain_id;
copyback = 1;
d = NULL;
+ break;
}
- break;
case XEN_DOMCTL_max_vcpus:
{
@@ -698,8 +695,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
maxvcpu_out_novcpulock:
domain_unpause(d);
+ break;
}
- break;
case XEN_DOMCTL_destroydomain:
ret = domain_kill(d);
@@ -715,14 +710,13 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
&op->u.nodeaffinity.nodemap);
if ( !ret )
ret = domain_set_node_affinity(d, &new_affinity);
+ break;
}
- break;
+
case XEN_DOMCTL_getnodeaffinity:
- {
ret = nodemask_to_xenctl_bitmap(&op->u.nodeaffinity.nodemap,
&d->node_affinity);
- }
- break;
+ break;
case XEN_DOMCTL_setvcpuaffinity:
case XEN_DOMCTL_getvcpuaffinity:
@@ -831,15 +825,13 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
v->cpu_soft_affinity);
}
+ break;
}
- break;
case XEN_DOMCTL_scheduler_op:
- {
ret = sched_adjust(d, &op->u.scheduler_op);
copyback = 1;
- }
- break;
+ break;
case XEN_DOMCTL_getdomaininfo:
{
@@ -851,12 +843,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
if ( d->domain_id >= dom )
break;
+ ret = -ESRCH;
if ( d == NULL )
- {
- rcu_read_unlock(&domlist_read_lock);
- ret = -ESRCH;
- break;
- }
+ goto getdomaininfo_out;
ret = xsm_getdomaininfo(XSM_HOOK, d);
if ( ret )
@@ -870,8 +859,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
getdomaininfo_out:
rcu_read_unlock(&domlist_read_lock);
d = NULL;
+ break;
}
- break;
case XEN_DOMCTL_getvcpucontext:
{
@@ -919,8 +908,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
getvcpucontext_out:
xfree(c.nat);
+ break;
}
- break;
case XEN_DOMCTL_getvcpuinfo:
{
@@ -944,15 +933,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
op->u.getvcpuinfo.cpu = v->processor;
ret = 0;
copyback = 1;
+ break;
}
- break;
case XEN_DOMCTL_max_mem:
{
- unsigned long new_max;
-
- ret = -EINVAL;
- new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
+ unsigned long new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT - 10);
spin_lock(&d->page_alloc_lock);
/*
@@ -961,21 +947,16 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
* the meantime, while tot > max, all new allocations are disallowed.
*/
d->max_pages = new_max;
- ret = 0;
spin_unlock(&d->page_alloc_lock);
+ break;
}
- break;
case XEN_DOMCTL_setdomainhandle:
- {
memcpy(d->handle, op->u.setdomainhandle.handle,
sizeof(xen_domain_handle_t));
- ret = 0;
- }
- break;
+ break;
case XEN_DOMCTL_setdebugging:
- {
ret = -EINVAL;
if ( d == current->domain ) /* no domain_pause() */
break;
@@ -983,9 +964,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
domain_pause(d);
d->debugger_attached = !!op->u.setdebugging.enable;
domain_unpause(d); /* causes guest to latch new status */
- ret = 0;
- }
- break;
+ break;
case XEN_DOMCTL_irq_permission:
{
@@ -1004,8 +983,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
ret = irq_permit_access(d, irq);
else
ret = irq_deny_access(d, irq);
+ break;
}
- break;
case XEN_DOMCTL_iomem_permission:
{
@@ -1027,8 +1006,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
if ( !ret )
memory_type_changed(d);
+ break;
}
- break;
case XEN_DOMCTL_memory_mapping:
{
@@ -1079,15 +1058,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
}
/* Do this unconditionally to cover errors on above failure paths. */
memory_type_changed(d);
+ break;
}
- break;
case XEN_DOMCTL_settimeoffset:
- {
domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
- ret = 0;
- }
- break;
+ break;
case XEN_DOMCTL_set_target:
{
@@ -1113,16 +1089,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
/* Hold reference on @e until we destroy @d. */
d->target = e;
-
- ret = 0;
+ break;
}
- break;
case XEN_DOMCTL_subscribe:
- {
d->suspend_evtchn = op->u.subscribe.port;
- }
- break;
+ break;
case XEN_DOMCTL_mem_event_op:
ret = mem_event_domctl(d, &op->u.mem_event_op,
@@ -1131,41 +1103,28 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
break;
case XEN_DOMCTL_disable_migrate:
- {
d->disable_migrate = op->u.disable_migrate.disable;
- }
- break;
+ break;
#ifdef HAS_MEM_ACCESS
case XEN_DOMCTL_set_access_required:
- {
- struct p2m_domain* p2m;
-
- ret = -EPERM;
if ( current->domain == d )
- break;
-
- ret = 0;
- p2m = p2m_get_hostp2m(d);
- p2m->access_required = op->u.access_required.access_required;
- }
- break;
+ ret = -EPERM;
+ else
+ p2m_get_hostp2m(d)->access_required =
+ op->u.access_required.access_required;
+ break;
#endif
case XEN_DOMCTL_set_virq_handler:
- {
- uint32_t virq = op->u.set_virq_handler.virq;
- ret = set_global_virq_handler(d, virq);
- }
- break;
+ ret = set_global_virq_handler(d, op->u.set_virq_handler.virq);
+ break;
case XEN_DOMCTL_set_max_evtchn:
- {
d->max_evtchn_port = min_t(unsigned int,
op->u.set_max_evtchn.max_port,
INT_MAX);
- }
- break;
+ break;
case XEN_DOMCTL_setvnumainfo:
{
@@ -1184,9 +1143,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
d->vnuma = vnuma;
write_unlock(&d->vnuma_rwlock);
- ret = 0;
+ break;
}
- break;
default:
ret = arch_do_domctl(op, d, u_domctl);
Attachment:
domctl-cleanup.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |