[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 06/12] x86/hvm/ioreq: rename .*pfn and .*gmfn to .*gfn



Since ioreq servers are only relevant to HVM guests and all the names in
question unequivocally refer to guest frame numbers, name them all .*gfn
to avoid any confusion.

This patch is purely cosmetic. No semantic or functional change.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 tools/libs/devicemodel/core.c                   | 10 ++--
 tools/libs/devicemodel/include/xendevicemodel.h | 12 ++--
 xen/arch/x86/hvm/dm.c                           |  4 +-
 xen/arch/x86/hvm/hvm.c                          |  6 +-
 xen/arch/x86/hvm/ioreq.c                        | 74 ++++++++++++-------------
 xen/include/asm-x86/hvm/domain.h                |  4 +-
 xen/include/asm-x86/hvm/ioreq.h                 |  4 +-
 xen/include/public/hvm/dm_op.h                  | 20 +++----
 8 files changed, 67 insertions(+), 67 deletions(-)

diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
index d7c6476006..fcb260d29b 100644
--- a/tools/libs/devicemodel/core.c
+++ b/tools/libs/devicemodel/core.c
@@ -174,7 +174,7 @@ int xendevicemodel_create_ioreq_server(
 
 int xendevicemodel_get_ioreq_server_info(
     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
-    xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
+    xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
     evtchn_port_t *bufioreq_port)
 {
     struct xen_dm_op op;
@@ -192,11 +192,11 @@ int xendevicemodel_get_ioreq_server_info(
     if (rc)
         return rc;
 
-    if (ioreq_pfn)
-        *ioreq_pfn = data->ioreq_pfn;
+    if (ioreq_gfn)
+        *ioreq_gfn = data->ioreq_gfn;
 
-    if (bufioreq_pfn)
-        *bufioreq_pfn = data->bufioreq_pfn;
+    if (bufioreq_gfn)
+        *bufioreq_gfn = data->bufioreq_gfn;
 
     if (bufioreq_port)
         *bufioreq_port = data->bufioreq_port;
diff --git a/tools/libs/devicemodel/include/xendevicemodel.h 
b/tools/libs/devicemodel/include/xendevicemodel.h
index 580fad2f49..13216db04a 100644
--- a/tools/libs/devicemodel/include/xendevicemodel.h
+++ b/tools/libs/devicemodel/include/xendevicemodel.h
@@ -60,17 +60,17 @@ int xendevicemodel_create_ioreq_server(
  * @parm dmod a handle to an open devicemodel interface.
  * @parm domid the domain id to be serviced
  * @parm id the IOREQ Server id.
- * @parm ioreq_pfn pointer to a xen_pfn_t to receive the synchronous ioreq
- *                  gmfn
- * @parm bufioreq_pfn pointer to a xen_pfn_t to receive the buffered ioreq
- *                    gmfn
+ * @parm ioreq_gfn pointer to a xen_pfn_t to receive the synchronous ioreq
+ *                  gfn
+ * @parm bufioreq_gfn pointer to a xen_pfn_t to receive the buffered ioreq
+ *                    gfn
  * @parm bufioreq_port pointer to a evtchn_port_t to receive the buffered
  *                     ioreq event channel
  * @return 0 on success, -1 on failure.
  */
 int xendevicemodel_get_ioreq_server_info(
     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
-    xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
+    xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
     evtchn_port_t *bufioreq_port);
 
 /**
@@ -168,7 +168,7 @@ int xendevicemodel_destroy_ioreq_server(
  * This function sets IOREQ Server state. An IOREQ Server
  * will not be passed emulation requests until it is in
  * the enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_pfn are
+ * Note that the contents of the ioreq_gfn and bufioreq_gfn are
  * not meaningful until the IOREQ Server is in the enabled state.
  *
  * @parm dmod a handle to an open devicemodel interface.
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 4cf6deedc7..f7cb883fec 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -426,8 +426,8 @@ static int dm_op(const struct dmop_args *op_args)
             break;
 
         rc = hvm_get_ioreq_server_info(d, data->id,
-                                       &data->ioreq_pfn,
-                                       &data->bufioreq_pfn,
+                                       &data->ioreq_gfn,
+                                       &data->bufioreq_gfn,
                                        &data->bufioreq_port);
         break;
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6cb903def5..58b4afa1d1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4185,20 +4185,20 @@ static int hvmop_set_param(
             rc = -EINVAL;
         break;
     case HVM_PARAM_IOREQ_SERVER_PFN:
-        d->arch.hvm_domain.ioreq_gmfn.base = a.value;
+        d->arch.hvm_domain.ioreq_gfn.base = a.value;
         break;
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     {
         unsigned int i;
 
         if ( a.value == 0 ||
-             a.value > sizeof(d->arch.hvm_domain.ioreq_gmfn.mask) * 8 )
+             a.value > sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8 )
         {
             rc = -EINVAL;
             break;
         }
         for ( i = 0; i < a.value; i++ )
-            set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
+            set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 
         break;
     }
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 752976d16d..69913cf3cd 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -181,17 +181,17 @@ bool_t handle_hvm_io_completion(struct vcpu *v)
     return 1;
 }
 
-static int hvm_alloc_ioreq_gmfn(struct domain *d, unsigned long *gmfn)
+static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
 {
     unsigned int i;
     int rc;
 
     rc = -ENOMEM;
-    for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gmfn.mask) * 8; i++ )
+    for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
     {
-        if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask) )
+        if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
         {
-            *gmfn = d->arch.hvm_domain.ioreq_gmfn.base + i;
+            *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
             rc = 0;
             break;
         }
@@ -200,12 +200,12 @@ static int hvm_alloc_ioreq_gmfn(struct domain *d, 
unsigned long *gmfn)
     return rc;
 }
 
-static void hvm_free_ioreq_gmfn(struct domain *d, unsigned long gmfn)
+static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
 {
-    unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base;
+    unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
 
-    if ( gmfn != gfn_x(INVALID_GFN) )
-        set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
+    if ( gfn != gfn_x(INVALID_GFN) )
+        set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 }
 
 static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
@@ -216,7 +216,7 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server 
*s, bool_t buf)
 }
 
 static int hvm_map_ioreq_page(
-    struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
+    struct hvm_ioreq_server *s, bool_t buf, unsigned long gfn)
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
@@ -224,7 +224,7 @@ static int hvm_map_ioreq_page(
     void *va;
     int rc;
 
-    if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
+    if ( (rc = prepare_ring_for_helper(d, gfn, &page, &va)) )
         return rc;
 
     if ( (iorp->va != NULL) || d->is_dying )
@@ -235,7 +235,7 @@ static int hvm_map_ioreq_page(
 
     iorp->va = va;
     iorp->page = page;
-    iorp->gmfn = gmfn;
+    iorp->gfn = gfn;
 
     return 0;
 }
@@ -264,23 +264,23 @@ bool_t is_ioreq_server_page(struct domain *d, const 
struct page_info *page)
     return found;
 }
 
-static void hvm_remove_ioreq_gmfn(
+static void hvm_remove_ioreq_gfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
-    if ( guest_physmap_remove_page(d, _gfn(iorp->gmfn),
+    if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
                                    _mfn(page_to_mfn(iorp->page)), 0) )
         domain_crash(d);
     clear_page(iorp->va);
 }
 
-static int hvm_add_ioreq_gmfn(
+static int hvm_add_ioreq_gfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
     int rc;
 
     clear_page(iorp->va);
 
-    rc = guest_physmap_add_page(d, _gfn(iorp->gmfn),
+    rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
         paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
@@ -412,17 +412,17 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
hvm_ioreq_server *s)
 }
 
 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
-                                      unsigned long ioreq_pfn,
-                                      unsigned long bufioreq_pfn)
+                                      unsigned long ioreq_gfn,
+                                      unsigned long bufioreq_gfn)
 {
     int rc;
 
-    rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
+    rc = hvm_map_ioreq_page(s, 0, ioreq_gfn);
     if ( rc )
         return rc;
 
-    if ( bufioreq_pfn != gfn_x(INVALID_GFN) )
-        rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
+    if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
+        rc = hvm_map_ioreq_page(s, 1, bufioreq_gfn);
 
     if ( rc )
         hvm_unmap_ioreq_page(s, 0);
@@ -435,8 +435,8 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
                                         bool_t handle_bufioreq)
 {
     struct domain *d = s->domain;
-    unsigned long ioreq_pfn = gfn_x(INVALID_GFN);
-    unsigned long bufioreq_pfn = gfn_x(INVALID_GFN);
+    unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
+    unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
     int rc;
 
     if ( is_default )
@@ -451,18 +451,18 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
                    d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
     }
 
-    rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
+    rc = hvm_alloc_ioreq_gfn(d, &ioreq_gfn);
 
     if ( !rc && handle_bufioreq )
-        rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
+        rc = hvm_alloc_ioreq_gfn(d, &bufioreq_gfn);
 
     if ( !rc )
-        rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn);
+        rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn);
 
     if ( rc )
     {
-        hvm_free_ioreq_gmfn(d, ioreq_pfn);
-        hvm_free_ioreq_gmfn(d, bufioreq_pfn);
+        hvm_free_ioreq_gfn(d, ioreq_gfn);
+        hvm_free_ioreq_gfn(d, bufioreq_gfn);
     }
 
     return rc;
@@ -482,9 +482,9 @@ static void hvm_ioreq_server_unmap_pages(struct 
hvm_ioreq_server *s,
     if ( !is_default )
     {
         if ( handle_bufioreq )
-            hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+            hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
 
-        hvm_free_ioreq_gmfn(d, s->ioreq.gmfn);
+        hvm_free_ioreq_gfn(d, s->ioreq.gfn);
     }
 }
 
@@ -556,10 +556,10 @@ static void hvm_ioreq_server_enable(struct 
hvm_ioreq_server *s,
 
     if ( !is_default )
     {
-        hvm_remove_ioreq_gmfn(d, &s->ioreq);
+        hvm_remove_ioreq_gfn(d, &s->ioreq);
 
         if ( handle_bufioreq )
-            hvm_remove_ioreq_gmfn(d, &s->bufioreq);
+            hvm_remove_ioreq_gfn(d, &s->bufioreq);
     }
 
     s->enabled = 1;
@@ -587,9 +587,9 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
     if ( !is_default )
     {
         if ( handle_bufioreq )
-            hvm_add_ioreq_gmfn(d, &s->bufioreq);
+            hvm_add_ioreq_gfn(d, &s->bufioreq);
 
-        hvm_add_ioreq_gmfn(d, &s->ioreq);
+        hvm_add_ioreq_gfn(d, &s->ioreq);
     }
 
     s->enabled = 0;
@@ -776,8 +776,8 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
 }
 
 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
+                              unsigned long *ioreq_gfn,
+                              unsigned long *bufioreq_gfn,
                               evtchn_port_t *bufioreq_port)
 {
     struct hvm_ioreq_server *s;
@@ -796,11 +796,11 @@ int hvm_get_ioreq_server_info(struct domain *d, 
ioservid_t id,
         if ( s->id != id )
             continue;
 
-        *ioreq_pfn = s->ioreq.gmfn;
+        *ioreq_gfn = s->ioreq.gfn;
 
         if ( s->bufioreq.va != NULL )
         {
-            *bufioreq_pfn = s->bufioreq.gmfn;
+            *bufioreq_gfn = s->bufioreq.gfn;
             *bufioreq_port = s->bufioreq_evtchn;
         }
 
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index d2899c9bb2..ce536f75ef 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -36,7 +36,7 @@
 #include <public/hvm/dm_op.h>
 
 struct hvm_ioreq_page {
-    unsigned long gmfn;
+    unsigned long gfn;
     struct page_info *page;
     void *va;
 };
@@ -105,7 +105,7 @@ struct hvm_domain {
     struct {
         unsigned long base;
         unsigned long mask;
-    } ioreq_gmfn;
+    } ioreq_gfn;
 
     /* Lock protects all other values in the sub-struct and the default */
     struct {
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index b43667a367..43fbe115dc 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -28,8 +28,8 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
                             ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
-                              unsigned long *ioreq_pfn,
-                              unsigned long *bufioreq_pfn,
+                              unsigned long *ioreq_gfn,
+                              unsigned long *bufioreq_gfn,
                               evtchn_port_t *bufioreq_port);
 int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
                                      uint32_t type, uint64_t start,
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index 2a4c3d938d..6bbab5fca3 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -41,9 +41,9 @@
  * A domain supports a single 'legacy' IOREQ Server which is instantiated if
  * parameter...
  *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
  * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
  * ioreq ring), or...
  * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
  * to request buffered I/O emulation).
@@ -81,14 +81,14 @@ struct xen_dm_op_create_ioreq_server {
  *
  * The emulator needs to map the synchronous ioreq structures and buffered
  * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in the target domain's gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * hosted in the target domain's gmfns <ioreq_gfn> and <bufioreq_gfn>
  * respectively. In addition, if the IOREQ Server is handling buffered
  * emulation requests, the emulator needs to bind to event channel
  * <bufioreq_port> to listen for them. (The event channels used for
  * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
+ * structures in <ioreq_gfn>).
  * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ * values handed back in <bufioreq_gfn> and <bufioreq_port> will both be 0.
  */
 #define XEN_DMOP_get_ioreq_server_info 2
 
@@ -98,10 +98,10 @@ struct xen_dm_op_get_ioreq_server_info {
     uint16_t pad;
     /* OUT - buffered ioreq port */
     evtchn_port_t bufioreq_port;
-    /* OUT - sync ioreq pfn */
-    uint64_aligned_t ioreq_pfn;
-    /* OUT - buffered ioreq pfn */
-    uint64_aligned_t bufioreq_pfn;
+    /* OUT - sync ioreq gfn */
+    uint64_aligned_t ioreq_gfn;
+    /* OUT - buffered ioreq gfn */
+    uint64_aligned_t bufioreq_gfn;
 };
 
 /*
@@ -150,7 +150,7 @@ struct xen_dm_op_ioreq_server_range {
  *
  * The IOREQ Server will not be passed any emulation requests until it is
  * in the enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
  * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
  * is in the enabled state.
  */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.