[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v7 07/12] x86/hvm/ioreq: use bool rather than bool_t



This patch changes use of bool_t to bool in the ioreq server code. It also
fixes an incorrect indentation in a continuation line.

This patch is purely cosmetic. No semantic or functional change.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/dm.c            |   2 +-
 xen/arch/x86/hvm/hvm.c           |   2 +-
 xen/arch/x86/hvm/io.c            |   4 +-
 xen/arch/x86/hvm/ioreq.c         | 100 +++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h |   6 +--
 xen/include/asm-x86/hvm/ioreq.h  |  14 +++---
 6 files changed, 64 insertions(+), 64 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index f7cb883fec..87ef4b6ca9 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -409,7 +409,7 @@ static int dm_op(const struct dmop_args *op_args)
         if ( data->pad[0] || data->pad[1] || data->pad[2] )
             break;
 
-        rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
+        rc = hvm_create_ioreq_server(d, curr_d->domain_id, false,
                                      data->handle_bufioreq, &data->id);
         break;
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 58b4afa1d1..031d07baf0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4361,7 +4361,7 @@ static int hvmop_get_param(
         {
             domid_t domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
 
-            rc = hvm_create_ioreq_server(d, domid, 1,
+            rc = hvm_create_ioreq_server(d, domid, true,
                                          HVM_IOREQSRV_BUFIOREQ_LEGACY, NULL);
             if ( rc != 0 && rc != -EEXIST )
                 goto out;
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index bf41954f59..1ddcaba52e 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -59,7 +59,7 @@ void send_timeoffset_req(unsigned long timeoff)
     if ( timeoff == 0 )
         return;
 
-    if ( hvm_broadcast_ioreq(&p, 1) != 0 )
+    if ( hvm_broadcast_ioreq(&p, true) != 0 )
         gprintk(XENLOG_ERR, "Unsuccessful timeoffset update\n");
 }
 
@@ -73,7 +73,7 @@ void send_invalidate_req(void)
         .data = ~0UL, /* flush all */
     };
 
-    if ( hvm_broadcast_ioreq(&p, 0) != 0 )
+    if ( hvm_broadcast_ioreq(&p, false) != 0 )
         gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n");
 }
 
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 69913cf3cd..f2e0b3f74a 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -43,7 +43,7 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct 
vcpu *v)
     return &p->vcpu_ioreq[v->vcpu_id];
 }
 
-bool_t hvm_io_pending(struct vcpu *v)
+bool hvm_io_pending(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct hvm_ioreq_server *s;
@@ -59,11 +59,11 @@ bool_t hvm_io_pending(struct vcpu *v)
                               list_entry )
         {
             if ( sv->vcpu == v && sv->pending )
-                return 1;
+                return true;
         }
     }
 
-    return 0;
+    return false;
 }
 
 static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
@@ -82,10 +82,10 @@ static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, 
uint64_t data)
     msix_write_completion(v);
     vcpu_end_shutdown_deferral(v);
 
-    sv->pending = 0;
+    sv->pending = false;
 }
 
-static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
+static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
 {
     while ( sv->pending )
     {
@@ -112,16 +112,16 @@ static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, 
ioreq_t *p)
             break;
         default:
             gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
-            sv->pending = 0;
+            sv->pending = false;
             domain_crash(sv->vcpu->domain);
-            return 0; /* bail */
+            return false; /* bail */
         }
     }
 
-    return 1;
+    return true;
 }
 
-bool_t handle_hvm_io_completion(struct vcpu *v)
+bool handle_hvm_io_completion(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
@@ -141,7 +141,7 @@ bool_t handle_hvm_io_completion(struct vcpu *v)
             if ( sv->vcpu == v && sv->pending )
             {
                 if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
-                    return 0;
+                    return false;
 
                 break;
             }
@@ -178,7 +178,7 @@ bool_t handle_hvm_io_completion(struct vcpu *v)
         break;
     }
 
-    return 1;
+    return true;
 }
 
 static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
@@ -208,7 +208,7 @@ static void hvm_free_ioreq_gfn(struct domain *d, unsigned 
long gfn)
         set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 }
 
-static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
+static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf)
 {
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
 
@@ -216,7 +216,7 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server 
*s, bool_t buf)
 }
 
 static int hvm_map_ioreq_page(
-    struct hvm_ioreq_server *s, bool_t buf, unsigned long gfn)
+    struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
@@ -240,10 +240,10 @@ static int hvm_map_ioreq_page(
     return 0;
 }
 
-bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
+bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
 {
     const struct hvm_ioreq_server *s;
-    bool_t found = 0;
+    bool found = false;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
@@ -254,7 +254,7 @@ bool_t is_ioreq_server_page(struct domain *d, const struct 
page_info *page)
         if ( (s->ioreq.va && s->ioreq.page == page) ||
              (s->bufioreq.va && s->bufioreq.page == page) )
         {
-            found = 1;
+            found = true;
             break;
         }
     }
@@ -302,7 +302,7 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server 
*s,
 }
 
 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
-                                     bool_t is_default, struct vcpu *v)
+                                     bool is_default, struct vcpu *v)
 {
     struct hvm_ioreq_vcpu *sv;
     int rc;
@@ -417,22 +417,22 @@ static int hvm_ioreq_server_map_pages(struct 
hvm_ioreq_server *s,
 {
     int rc;
 
-    rc = hvm_map_ioreq_page(s, 0, ioreq_gfn);
+    rc = hvm_map_ioreq_page(s, false, ioreq_gfn);
     if ( rc )
         return rc;
 
     if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
-        rc = hvm_map_ioreq_page(s, 1, bufioreq_gfn);
+        rc = hvm_map_ioreq_page(s, true, bufioreq_gfn);
 
     if ( rc )
-        hvm_unmap_ioreq_page(s, 0);
+        hvm_unmap_ioreq_page(s, false);
 
     return rc;
 }
 
 static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
-                                        bool_t is_default,
-                                        bool_t handle_bufioreq)
+                                        bool is_default,
+                                        bool handle_bufioreq)
 {
     struct domain *d = s->domain;
     unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
@@ -469,15 +469,15 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
 }
 
 static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
-                                         bool_t is_default)
+                                         bool is_default)
 {
     struct domain *d = s->domain;
-    bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
+    bool handle_bufioreq = !!s->bufioreq.va;
 
     if ( handle_bufioreq )
-        hvm_unmap_ioreq_page(s, 1);
+        hvm_unmap_ioreq_page(s, true);
 
-    hvm_unmap_ioreq_page(s, 0);
+    hvm_unmap_ioreq_page(s, false);
 
     if ( !is_default )
     {
@@ -489,7 +489,7 @@ static void hvm_ioreq_server_unmap_pages(struct 
hvm_ioreq_server *s,
 }
 
 static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
-                                            bool_t is_default)
+                                            bool is_default)
 {
     unsigned int i;
 
@@ -501,7 +501,7 @@ static void hvm_ioreq_server_free_rangesets(struct 
hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
-                                            bool_t is_default)
+                                            bool is_default)
 {
     unsigned int i;
     int rc;
@@ -537,17 +537,17 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
     return 0;
 
  fail:
-    hvm_ioreq_server_free_rangesets(s, 0);
+    hvm_ioreq_server_free_rangesets(s, false);
 
     return rc;
 }
 
 static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
-                                    bool_t is_default)
+                                    bool is_default)
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_vcpu *sv;
-    bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
+    bool handle_bufioreq = !!s->bufioreq.va;
 
     spin_lock(&s->lock);
 
@@ -562,7 +562,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server 
*s,
             hvm_remove_ioreq_gfn(d, &s->bufioreq);
     }
 
-    s->enabled = 1;
+    s->enabled = true;
 
     list_for_each_entry ( sv,
                           &s->ioreq_vcpu_list,
@@ -574,10 +574,10 @@ static void hvm_ioreq_server_enable(struct 
hvm_ioreq_server *s,
 }
 
 static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
-                                    bool_t is_default)
+                                     bool is_default)
 {
     struct domain *d = s->domain;
-    bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
+    bool handle_bufioreq = !!s->bufioreq.va;
 
     spin_lock(&s->lock);
 
@@ -592,7 +592,7 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
         hvm_add_ioreq_gfn(d, &s->ioreq);
     }
 
-    s->enabled = 0;
+    s->enabled = false;
 
  done:
     spin_unlock(&s->lock);
@@ -600,7 +600,7 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
 
 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
                                  struct domain *d, domid_t domid,
-                                 bool_t is_default, int bufioreq_handling,
+                                 bool is_default, int bufioreq_handling,
                                  ioservid_t id)
 {
     struct vcpu *v;
@@ -619,7 +619,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
         return rc;
 
     if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
-        s->bufioreq_atomic = 1;
+        s->bufioreq_atomic = true;
 
     rc = hvm_ioreq_server_setup_pages(
              s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
@@ -646,7 +646,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
 }
 
 static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
-                                    bool_t is_default)
+                                    bool is_default)
 {
     ASSERT(!s->enabled);
     hvm_ioreq_server_remove_all_vcpus(s);
@@ -681,7 +681,7 @@ static ioservid_t next_ioservid(struct domain *d)
 }
 
 int hvm_create_ioreq_server(struct domain *d, domid_t domid,
-                            bool_t is_default, int bufioreq_handling,
+                            bool is_default, int bufioreq_handling,
                             ioservid_t *id)
 {
     struct hvm_ioreq_server *s;
@@ -713,7 +713,7 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
     if ( is_default )
     {
         d->arch.hvm_domain.default_ioreq_server = s;
-        hvm_ioreq_server_enable(s, 1);
+        hvm_ioreq_server_enable(s, true);
     }
 
     if ( id )
@@ -756,11 +756,11 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
 
         p2m_set_ioreq_server(d, 0, s);
 
-        hvm_ioreq_server_disable(s, 0);
+        hvm_ioreq_server_disable(s, false);
 
         list_del(&s->list_entry);
 
-        hvm_ioreq_server_deinit(s, 0);
+        hvm_ioreq_server_deinit(s, false);
 
         domain_unpause(d);
 
@@ -968,7 +968,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
 }
 
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
-                               bool_t enabled)
+                               bool enabled)
 {
     struct list_head *entry;
     int rc;
@@ -992,9 +992,9 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t 
id,
         domain_pause(d);
 
         if ( enabled )
-            hvm_ioreq_server_enable(s, 0);
+            hvm_ioreq_server_enable(s, false);
         else
-            hvm_ioreq_server_disable(s, 0);
+            hvm_ioreq_server_disable(s, false);
 
         domain_unpause(d);
 
@@ -1017,7 +1017,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
     {
-        bool_t is_default = (s == d->arch.hvm_domain.default_ioreq_server);
+        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
 
         rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
         if ( rc )
@@ -1066,7 +1066,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
                                &d->arch.hvm_domain.ioreq_server.list,
                                list_entry )
     {
-        bool_t is_default = (s == d->arch.hvm_domain.default_ioreq_server);
+        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
 
         hvm_ioreq_server_disable(s, is_default);
 
@@ -1347,7 +1347,7 @@ static int hvm_send_buffered_ioreq(struct 
hvm_ioreq_server *s, ioreq_t *p)
 }
 
 int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
-                   bool_t buffered)
+                   bool buffered)
 {
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
@@ -1398,7 +1398,7 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t 
*proto_p,
             p->state = STATE_IOREQ_READY;
             notify_via_xen_event_channel(d, port);
 
-            sv->pending = 1;
+            sv->pending = true;
             return X86EMUL_RETRY;
         }
     }
@@ -1406,7 +1406,7 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t 
*proto_p,
     return X86EMUL_UNHANDLEABLE;
 }
 
-unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
+unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
 {
     struct domain *d = current->domain;
     struct hvm_ioreq_server *s;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index ce536f75ef..7f128c05ff 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -45,7 +45,7 @@ struct hvm_ioreq_vcpu {
     struct list_head list_entry;
     struct vcpu      *vcpu;
     evtchn_port_t    ioreq_evtchn;
-    bool_t           pending;
+    bool             pending;
 };
 
 #define NR_IO_RANGE_TYPES (XEN_DMOP_IO_RANGE_PCI + 1)
@@ -69,8 +69,8 @@ struct hvm_ioreq_server {
     spinlock_t             bufioreq_lock;
     evtchn_port_t          bufioreq_evtchn;
     struct rangeset        *range[NR_IO_RANGE_TYPES];
-    bool_t                 enabled;
-    bool_t                 bufioreq_atomic;
+    bool                   enabled;
+    bool                   bufioreq_atomic;
 };
 
 /*
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index 43fbe115dc..1829fcf43e 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -19,12 +19,12 @@
 #ifndef __ASM_X86_HVM_IOREQ_H__
 #define __ASM_X86_HVM_IOREQ_H__
 
-bool_t hvm_io_pending(struct vcpu *v);
-bool_t handle_hvm_io_completion(struct vcpu *v);
-bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
+bool hvm_io_pending(struct vcpu *v);
+bool handle_hvm_io_completion(struct vcpu *v);
+bool is_ioreq_server_page(struct domain *d, const struct page_info *page);
 
 int hvm_create_ioreq_server(struct domain *d, domid_t domid,
-                            bool_t is_default, int bufioreq_handling,
+                            bool is_default, int bufioreq_handling,
                             ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
@@ -40,7 +40,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, 
ioservid_t id,
 int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
                                      uint32_t type, uint32_t flags);
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
-                               bool_t enabled);
+                               bool enabled);
 
 int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v);
 void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v);
@@ -51,8 +51,8 @@ int hvm_set_dm_domain(struct domain *d, domid_t domid);
 struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
                                                  ioreq_t *p);
 int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
-                   bool_t buffered);
-unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered);
+                   bool buffered);
+unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
 
 void hvm_ioreq_init(struct domain *d);
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.