[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] domctl: cleanup



- drop redundant "ret = 0" statements
- drop unnecessary braces
- eliminate a few single use local variables
- move break statements inside case-specific braced scopes
- eliminate trailing whitespace

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v2: Fix XEN_DOMCTL_setdebugging case and eliminate trailing whitespace
    (both pointed out by Andrew).

--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -1,8 +1,8 @@
 /******************************************************************************
  * domctl.c
- * 
+ *
  * Domain management operations. For use by node control stack.
- * 
+ *
  * Copyright (c) 2002-2006, K A Fraser
  */
 
@@ -154,13 +154,13 @@ void getdomaininfo(struct domain *d, str
     u64 cpu_time = 0;
     int flags = XEN_DOMINF_blocked;
     struct vcpu_runstate_info runstate;
-    
+
     info->domain = d->domain_id;
     info->max_vcpu_id = XEN_INVALID_MAX_VCPU_ID;
     info->nr_online_vcpus = 0;
     info->ssidref = 0;
-    
-    /* 
+
+    /*
      * - domain is marked as blocked only if all its vcpus are blocked
      * - domain is marked as running if any of its vcpus is running
      */
@@ -237,7 +237,7 @@ static unsigned int default_vcpu0_locati
     }
 
     /*
-     * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
+     * If we're on a HT system, we only auto-allocate to a non-primary HT. We
      * favour high numbered CPUs in the event of a tie.
      */
     cpumask_copy(&cpu_exclude_map, per_cpu(cpu_sibling_mask, 0));
@@ -517,8 +517,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         }
 
         free_vcpu_guest_context(c.nat);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_pausedomain:
         ret = -EINVAL;
@@ -531,11 +531,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         break;
 
     case XEN_DOMCTL_resumedomain:
-    {
         domain_resume(d);
-        ret = 0;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_createdomain:
     {
@@ -608,8 +605,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         op->domain = d->domain_id;
         copyback = 1;
         d = NULL;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_max_vcpus:
     {
@@ -698,8 +695,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
 
     maxvcpu_out_novcpulock:
         domain_unpause(d);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_destroydomain:
         ret = domain_kill(d);
@@ -715,14 +710,13 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
                                         &op->u.nodeaffinity.nodemap);
         if ( !ret )
             ret = domain_set_node_affinity(d, &new_affinity);
+        break;
     }
-    break;
+
     case XEN_DOMCTL_getnodeaffinity:
-    {
         ret = nodemask_to_xenctl_bitmap(&op->u.nodeaffinity.nodemap,
                                         &d->node_affinity);
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_setvcpuaffinity:
     case XEN_DOMCTL_getvcpuaffinity:
@@ -831,18 +825,16 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
                                                v->cpu_soft_affinity);
         }
+        break;
     }
-    break;
 
     case XEN_DOMCTL_scheduler_op:
-    {
         ret = sched_adjust(d, &op->u.scheduler_op);
         copyback = 1;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_getdomaininfo:
-    { 
+    {
         domid_t dom = op->domain;
 
         rcu_read_lock(&domlist_read_lock);
@@ -851,12 +843,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
             if ( d->domain_id >= dom )
                 break;
 
+        ret = -ESRCH;
         if ( d == NULL )
-        {
-            rcu_read_unlock(&domlist_read_lock);
-            ret = -ESRCH;
-            break;
-        }
+            goto getdomaininfo_out;
 
         ret = xsm_getdomaininfo(XSM_HOOK, d);
         if ( ret )
@@ -870,11 +859,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
     getdomaininfo_out:
         rcu_read_unlock(&domlist_read_lock);
         d = NULL;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_getvcpucontext:
-    { 
+    {
         vcpu_guest_context_u c = { .nat = NULL };
         struct vcpu         *v;
 
@@ -919,11 +908,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
 
     getvcpucontext_out:
         xfree(c.nat);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_getvcpuinfo:
-    { 
+    {
         struct vcpu   *v;
         struct vcpu_runstate_info runstate;
 
@@ -944,15 +933,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         op->u.getvcpuinfo.cpu      = v->processor;
         ret = 0;
         copyback = 1;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_max_mem:
     {
-        unsigned long new_max;
-
-        ret = -EINVAL;
-        new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
+        unsigned long new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT - 10);
 
         spin_lock(&d->page_alloc_lock);
         /*
@@ -961,31 +947,25 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
          * the meantime, while tot > max, all new allocations are disallowed.
          */
         d->max_pages = new_max;
-        ret = 0;
         spin_unlock(&d->page_alloc_lock);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_setdomainhandle:
-    {
         memcpy(d->handle, op->u.setdomainhandle.handle,
                sizeof(xen_domain_handle_t));
-        ret = 0;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_setdebugging:
-    {
-        ret = -EINVAL;
-        if ( d == current->domain ) /* no domain_pause() */
-            break;
-
-        domain_pause(d);
-        d->debugger_attached = !!op->u.setdebugging.enable;
-        domain_unpause(d); /* causes guest to latch new status */
-        ret = 0;
-    }
-    break;
+        if ( unlikely(d == current->domain) ) /* no domain_pause() */
+            ret = -EINVAL;
+        else
+        {
+            domain_pause(d);
+            d->debugger_attached = !!op->u.setdebugging.enable;
+            domain_unpause(d); /* causes guest to latch new status */
+        }
+        break;
 
     case XEN_DOMCTL_irq_permission:
     {
@@ -1004,8 +984,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
             ret = irq_permit_access(d, irq);
         else
             ret = irq_deny_access(d, irq);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_iomem_permission:
     {
@@ -1027,8 +1007,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
             ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
         if ( !ret )
             memory_type_changed(d);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_memory_mapping:
     {
@@ -1079,15 +1059,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         }
         /* Do this unconditionally to cover errors on above failure paths. */
         memory_type_changed(d);
+        break;
     }
-    break;
 
     case XEN_DOMCTL_settimeoffset:
-    {
         domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
-        ret = 0;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_set_target:
     {
@@ -1113,16 +1090,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
 
         /* Hold reference on @e until we destroy @d. */
         d->target = e;
-
-        ret = 0;
+        break;
     }
-    break;
 
     case XEN_DOMCTL_subscribe:
-    {
         d->suspend_evtchn = op->u.subscribe.port;
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_mem_event_op:
         ret = mem_event_domctl(d, &op->u.mem_event_op,
@@ -1131,41 +1104,28 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         break;
 
     case XEN_DOMCTL_disable_migrate:
-    {
         d->disable_migrate = op->u.disable_migrate.disable;
-    }
-    break;
+        break;
 
 #ifdef HAS_MEM_ACCESS
     case XEN_DOMCTL_set_access_required:
-    {
-        struct p2m_domain* p2m;
-
-        ret = -EPERM;
-        if ( current->domain == d )
-            break;
-
-        ret = 0;
-        p2m = p2m_get_hostp2m(d);
-        p2m->access_required = op->u.access_required.access_required;
-    }
-    break;
+        if ( unlikely(current->domain == d) )
+            ret = -EPERM;
+        else
+            p2m_get_hostp2m(d)->access_required =
+                op->u.access_required.access_required;
+        break;
 #endif
 
     case XEN_DOMCTL_set_virq_handler:
-    {
-        uint32_t virq = op->u.set_virq_handler.virq;
-        ret = set_global_virq_handler(d, virq);
-    }
-    break;
+        ret = set_global_virq_handler(d, op->u.set_virq_handler.virq);
+        break;
 
     case XEN_DOMCTL_set_max_evtchn:
-    {
         d->max_evtchn_port = min_t(unsigned int,
                                    op->u.set_max_evtchn.max_port,
                                    INT_MAX);
-    }
-    break;
+        break;
 
     case XEN_DOMCTL_setvnumainfo:
     {
@@ -1184,9 +1144,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         d->vnuma = vnuma;
         write_unlock(&d->vnuma_rwlock);
 
-        ret = 0;
+        break;
     }
-    break;
 
     default:
         ret = arch_do_domctl(op, d, u_domctl);


Attachment: domctl-cleanup.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.