[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [V1 PATCH 3/3] P2M error code propogation



Because some of the leaf p2m functions return 0 for failure and
TRUE for success, the real errno is lost. We change some of those
functions to return proper -errno. Also, any code in the immediate
vicinity that is in coding style violation is fixed up.

This patch doesn't change any functionality.

Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
 xen/arch/x86/domctl.c            | 19 ++++----
 xen/arch/x86/mm/hap/nested_hap.c | 11 ++---
 xen/arch/x86/mm/mem_sharing.c    | 10 ++---
 xen/arch/x86/mm/p2m-ept.c        | 17 ++++---
 xen/arch/x86/mm/p2m-pod.c        |  7 ++-
 xen/arch/x86/mm/p2m-pt.c         | 48 ++++++++++----------
 xen/arch/x86/mm/p2m.c            | 96 +++++++++++++++++++---------------------
 7 files changed, 102 insertions(+), 106 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 26635ff..cd58e37 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -671,13 +671,12 @@ long arch_do_domctl(
             if ( !ret && paging_mode_translate(d) )
             {
                 for ( i = 0; !ret && i < nr_mfns; i++ )
-                    if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
-                        ret = -EIO;
+                    ret = set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i));
                 if ( ret )
                 {
                     printk(XENLOG_G_WARNING
-                           "memory_map:fail: dom%d gfn=%lx mfn=%lx\n",
-                           d->domain_id, gfn + i, mfn + i);
+                           "memory_map:fail: dom%d gfn=%lx mfn=%lx ret:%ld\n",
+                           d->domain_id, gfn + i, mfn + i, ret);
                     while ( i-- )
                         clear_mmio_p2m_entry(d, gfn + i);
                     if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
@@ -690,20 +689,22 @@ long arch_do_domctl(
         }
         else
         {
+            int tmp_rc;
             printk(XENLOG_G_INFO
                    "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
                    d->domain_id, gfn, mfn, nr_mfns);
 
             if ( paging_mode_translate(d) )
                 for ( i = 0; i < nr_mfns; i++ )
-                    add |= !clear_mmio_p2m_entry(d, gfn + i);
-            ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
-            if ( !ret && add )
-                ret = -EIO;
+                    if ( (tmp_rc = clear_mmio_p2m_entry(d, gfn + i)) )
+                        ret = tmp_rc;
+            tmp_rc = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+            if ( !ret && tmp_rc )
+                ret = tmp_rc;
             if ( ret && is_hardware_domain(current->domain) )
                 printk(XENLOG_ERR
                        "memory_map: error %ld %s dom%d access to [%lx,%lx]\n",
-                       ret, add ? "removing" : "denying", d->domain_id,
+                       ret, tmp_rc ? "denying" : "removing", d->domain_id,
                        mfn, mfn + nr_mfns - 1);
         }
     }
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 78701dc..5c41725 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -103,7 +103,7 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
                   paddr_t L2_gpa, paddr_t L0_gpa,
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
-    int rv = 1;
+    int rc = 0;
     ASSERT(p2m);
     ASSERT(p2m->set_entry);
 
@@ -124,15 +124,16 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
         gfn = (L2_gpa >> PAGE_SHIFT) & mask;
         mfn = _mfn((L0_gpa >> PAGE_SHIFT) & mask);
 
-        rv = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+        rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
     }
 
     p2m_unlock(p2m);
 
-    if (rv == 0) {
+    if ( rc )
+    {
         gdprintk(XENLOG_ERR,
-               "failed to set entry for %#"PRIx64" -> %#"PRIx64"\n",
-               L2_gpa, L0_gpa);
+                 "failed to set entry for %#"PRIx64" -> %#"PRIx64" rc:%d\n",
+                 L2_gpa, L0_gpa, rc);
         BUG();
     }
 }
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 0968681..61f0b22 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1021,7 +1021,7 @@ int mem_sharing_share_pages(struct domain *sd, unsigned 
long sgfn, shr_handle_t
         put_page_and_type(cpage);
         d = get_domain_by_id(gfn->domain);
         BUG_ON(!d);
-        BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn) == 0);
+        BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
         put_domain(d);
     }
     ASSERT(list_empty(&cpage->sharing->gfns));
@@ -1095,13 +1095,11 @@ int mem_sharing_add_to_physmap(struct domain *sd, 
unsigned long sgfn, shr_handle
     ret = p2m_set_entry(p2m, cgfn, smfn, PAGE_ORDER_4K, p2m_ram_shared, a);
 
     /* Tempted to turn this into an assert */
-    if ( !ret )
+    if ( ret )
     {
-        ret = -ENOENT;
         mem_sharing_gfn_destroy(spage, cd, gfn_info);
         put_page_and_type(spage);
     } else {
-        ret = 0;
         /* There is a chance we're plugging a hole where a paged out page was 
*/
         if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) )
         {
@@ -1232,7 +1230,7 @@ int __mem_sharing_unshare_page(struct domain *d,
     unmap_domain_page(s);
     unmap_domain_page(t);
 
-    BUG_ON(set_shared_p2m_entry(d, gfn, page_to_mfn(page)) == 0);
+    BUG_ON(set_shared_p2m_entry(d, gfn, page_to_mfn(page)));
     mem_sharing_gfn_destroy(old_page, d, gfn_info);
     mem_sharing_page_unlock(old_page);
     put_page_and_type(old_page);
@@ -1288,7 +1286,7 @@ int relinquish_shared_pages(struct domain *d)
              * we hold the p2m lock. */
             set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K,
                                     p2m_invalid, p2m_access_rwx);
-            ASSERT(set_rc != 0);
+            ASSERT(set_rc == 0);
             count += 0x10;
         }
         else
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 99a1084..a219f8b 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -273,6 +273,8 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t 
read_only,
 /*
  * ept_set_entry() computes 'need_modify_vtd_table' for itself,
  * by observing whether any gfn->mfn translations are modified.
+ *
+ * Returns: 0 for success, -errno for failure
  */
 static int
 ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
@@ -281,7 +283,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
     ept_entry_t *table, *ept_entry = NULL;
     unsigned long gfn_remainder = gfn;
     int i, target = order / EPT_TABLE_ORDER;
-    int rv = 0;
+    int rc = 0;
     int ret = 0;
     bool_t direct_mmio = (p2mt == p2m_mmio_direct);
     uint8_t ipat = 0;
@@ -302,7 +304,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
     if ( ((gfn | mfn_x(mfn)) & ((1UL << order) - 1)) ||
          ((u64)gfn >> ((ept_get_wl(ept) + 1) * EPT_TABLE_ORDER)) ||
          (order % EPT_TABLE_ORDER) )
-        return 0;
+        return -EINVAL;
 
     ASSERT((target == 2 && hvm_hap_has_1gb()) ||
            (target == 1 && hvm_hap_has_2mb()) ||
@@ -314,7 +316,10 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
     {
         ret = ept_next_level(p2m, 0, &table, &gfn_remainder, i);
         if ( !ret )
+        {
+            rc = -ENOENT;
             goto out;
+        }
         else if ( ret != GUEST_TABLE_NORMAL_PAGE )
             break;
     }
@@ -386,6 +391,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
         if ( !ept_split_super_page(p2m, &split_ept_entry, i, target) )
         {
             ept_free_entry(p2m, &split_ept_entry, i);
+            rc = -ENOMEM;
             goto out;
         }
 
@@ -426,9 +432,6 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
          (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
         p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
 
-    /* Success */
-    rv = 1;
-
 out:
     unmap_domain_page(table);
 
@@ -436,7 +439,7 @@ out:
         ept_sync_domain(p2m);
 
     /* For non-nested p2m, may need to change VT-d page table.*/
-    if ( rv && !p2m_is_nestedp2m(p2m) && need_iommu(d) &&
+    if ( rc == 0 && !p2m_is_nestedp2m(p2m) && need_iommu(d) &&
          need_modify_vtd_table )
     {
         if ( iommu_hap_pt_share )
@@ -460,7 +463,7 @@ out:
     if ( is_epte_present(&old_entry) )
         ept_free_entry(p2m, &old_entry, target);
 
-    return rv;
+    return rc;
 }
 
 /* Read ept p2m entries */
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 3c86255..68db937 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1150,10 +1150,9 @@ guest_physmap_mark_populate_on_demand(struct domain *d, 
unsigned long gfn,
     }
 
     /* Now, actually do the two-way mapping */
-    if ( !p2m_set_entry(p2m, gfn, _mfn(0), order,
-                        p2m_populate_on_demand, p2m->default_access) )
-        rc = -EINVAL;
-    else
+    rc = p2m_set_entry(p2m, gfn, _mfn(0), order, p2m_populate_on_demand,
+                       p2m->default_access);
+    if ( rc == 0 )
     {
         pod_lock(p2m);
         p2m->pod.entry_count += 1 << order;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 849ae08..3c4e688 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -154,6 +154,7 @@ static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
         l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
 }
 
+/* Returns: 0 for success, -errno for failure */
 static int
 p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
                unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
@@ -167,7 +168,7 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, 
void **table,
 
     if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
                                       shift, max)) )
-        return 0;
+        return -ENOENT;
 
     /* PoD/paging: Not present doesn't imply empty. */
     if ( !l1e_get_flags(*p2m_entry) )
@@ -176,7 +177,7 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, 
void **table,
 
         pg = p2m_alloc_ptp(p2m, type);
         if ( pg == NULL )
-            return 0;
+            return -ENOMEM;
 
         new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                  __PAGE_HYPERVISOR | _PAGE_USER);
@@ -210,7 +211,7 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, 
void **table,
 
         pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
         if ( pg == NULL )
-            return 0;
+            return -ENOMEM;
 
         flags = l1e_get_flags(*p2m_entry);
         pfn = l1e_get_pfn(*p2m_entry);
@@ -239,7 +240,7 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, 
void **table,
 
         pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
         if ( pg == NULL )
-            return 0;
+            return -ENOMEM;
 
         /* New splintered mappings inherit the flags of the old superpage, 
          * with a little reorganisation for the _PAGE_PSE_PAT bit. */
@@ -272,23 +273,23 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, 
void **table,
     unmap_domain_page(*table);
     *table = next;
 
-    return 1;
+    return 0;
 }
 
-// Returns 0 on error (out of memory)
+/* Returns: 0 for success, -errno for failure */
 static int
 p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
-    // XXX -- this might be able to be faster iff current->domain == d
+    /* XXX -- this might be able to be faster iff current->domain == d */
     mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
-    void *table =map_domain_page(mfn_x(table_mfn));
+    void *table = map_domain_page(mfn_x(table_mfn));
     unsigned long i, gfn_remainder = gfn;
     l1_pgentry_t *p2m_entry;
     l1_pgentry_t entry_content;
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
-    int rv=0;
+    int rc;
     unsigned int iommu_pte_flags = (p2mt == p2m_ram_rw) ?
                                    IOMMUF_readable|IOMMUF_writable:
                                    0; 
@@ -311,9 +312,10 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long 
gfn, mfn_t mfn,
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
-    if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
-                         L4_PAGETABLE_SHIFT - PAGE_SHIFT,
-                         L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
+    rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
+                        L4_PAGETABLE_SHIFT - PAGE_SHIFT,
+                        L4_PAGETABLE_ENTRIES, PGT_l3_page_table);
+    if ( rc )
         goto out;
 
     /*
@@ -354,17 +356,18 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long 
gfn, mfn_t mfn,
         if ( l1e_get_flags(old_entry) & _PAGE_PRESENT )
             p2m_free_entry(p2m, &old_entry, page_order);
     }
-    else if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
-                              L3_PAGETABLE_SHIFT - PAGE_SHIFT,
-                              L3_PAGETABLE_ENTRIES,
-                              PGT_l2_page_table) )
+    else if ( (rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder,
+                                   gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT,
+                                   L3_PAGETABLE_ENTRIES,
+                                   PGT_l2_page_table)) )
         goto out;
 
     if ( page_order == PAGE_ORDER_4K )
     {
-        if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
-                             L2_PAGETABLE_SHIFT - PAGE_SHIFT,
-                             L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
+        rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
+                            L2_PAGETABLE_SHIFT - PAGE_SHIFT,
+                            L2_PAGETABLE_ENTRIES, PGT_l1_page_table);
+        if ( rc )
             goto out;
 
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -452,12 +455,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long 
gfn, mfn_t mfn,
         }
     }
 
-    /* Success */
-    rv = 1;
-
-out:
+ out:
     unmap_domain_page(table);
-    return rv;
+    return rc;
 }
 
 static mfn_t
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e16f31a..0c300a3 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -308,14 +308,14 @@ struct page_info *get_page_from_gfn_p2m(
     return page;
 }
 
-
+/* Returns: 0 for success, -errno for failure */
 int p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
     struct domain *d = p2m->domain;
     unsigned long todo = 1ul << page_order;
     unsigned int order;
-    int rc = 1;
+    int set_rc, rc = 1;
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
 
@@ -329,8 +329,8 @@ int p2m_set_entry(struct p2m_domain *p2m, unsigned long 
gfn, mfn_t mfn,
         else
             order = 0;
 
-        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) )
-            rc = 0;
+        if ( (set_rc = p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma)) )
+            rc = set_rc;
         gfn += 1ul << order;
         if ( mfn_x(mfn) != INVALID_MFN )
             mfn = _mfn(mfn_x(mfn) + (1ul << order));
@@ -370,17 +370,19 @@ void p2m_free_ptp(struct p2m_domain *p2m, struct 
page_info *pg)
     return;
 }
 
-// Allocate a new p2m table for a domain.
-//
-// The structure of the p2m table is that of a pagetable for xen (i.e. it is
-// controlled by CONFIG_PAGING_LEVELS).
-//
-// Returns 0 for success or -errno.
-//
+/*
+ * Allocate a new p2m table for a domain.
+ *
+ * The structure of the p2m table is that of a pagetable for xen (i.e. it is
+ * controlled by CONFIG_PAGING_LEVELS).
+ *
+ * Returns 0 for success, -errno for failure.
+ */
 int p2m_alloc_table(struct p2m_domain *p2m)
 {
     struct page_info *p2m_top;
     struct domain *d = p2m->domain;
+    int rc = 0;
 
     p2m_lock(p2m);
 
@@ -417,8 +419,8 @@ int p2m_alloc_table(struct p2m_domain *p2m)
 
     /* Initialise physmap tables for slot zero. Other code assumes this. */
     p2m->defer_nested_flush = 1;
-    if ( !p2m_set_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
-                        p2m_invalid, p2m->default_access) )
+    if ( (rc = p2m_set_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+                             p2m_invalid, p2m->default_access)) )
         goto error;
     p2m->defer_nested_flush = 0;
 
@@ -428,9 +430,9 @@ int p2m_alloc_table(struct p2m_domain *p2m)
 
     spin_unlock(&p2m->domain->page_alloc_lock);
  error:
-    P2M_PRINTK("failed to initialise p2m table for slot zero\n");
+    P2M_PRINTK("failed to initialise p2m table for slot zero. rc:%d\n", rc);
     p2m_unlock(p2m);
-    return -ENOMEM;
+    return rc;
 }
 
 void p2m_teardown(struct p2m_domain *p2m)
@@ -636,12 +638,10 @@ guest_physmap_add_entry(struct domain *d, unsigned long 
gfn,
     /* Now, actually do the two-way mapping */
     if ( mfn_valid(_mfn(mfn)) ) 
     {
-        if ( !p2m_set_entry(p2m, gfn, _mfn(mfn), page_order, t,
-                            p2m->default_access) )
-        {
-            rc = -EINVAL;
+        if ( (rc = p2m_set_entry(p2m, gfn, _mfn(mfn), page_order, t,
+                            p2m->default_access)) )
             goto out; /* Failed to update p2m, bail without updating m2p. */
-        }
+
         if ( !p2m_is_grant(t) )
         {
             for ( i = 0; i < (1UL << page_order); i++ )
@@ -652,10 +652,8 @@ guest_physmap_add_entry(struct domain *d, unsigned long 
gfn,
     {
         gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
                  gfn, mfn);
-        if ( !p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
-                            p2m_invalid, p2m->default_access) )
-            rc = -EINVAL;
-        else
+        if ( (rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
+                            p2m_invalid, p2m->default_access)) == 0 )
         {
             pod_lock(p2m);
             p2m->pod.entry_count -= pod_count;
@@ -742,9 +740,8 @@ void p2m_change_type_range(struct domain *d,
 }
 
 
-
-int
-set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+/* Returns: 0 for success, -errno for failure */
+int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
     int rc = 0;
     p2m_access_t a;
@@ -753,7 +750,7 @@ set_mmio_p2m_entry(struct domain *d, unsigned long gfn, 
mfn_t mfn)
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     if ( !paging_mode_translate(d) )
-        return 0;
+        return -EPERM;
 
     gfn_lock(p2m, gfn, 0);
     omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
@@ -761,7 +758,7 @@ set_mmio_p2m_entry(struct domain *d, unsigned long gfn, 
mfn_t mfn)
     {
         p2m_unlock(p2m);
         domain_crash(d);
-        return 0;
+        return -ENOENT;
     }
     else if ( p2m_is_ram(ot) )
     {
@@ -773,24 +770,24 @@ set_mmio_p2m_entry(struct domain *d, unsigned long gfn, 
mfn_t mfn)
     rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct,
                        p2m->default_access);
     gfn_unlock(p2m, gfn, 0);
-    if ( 0 == rc )
+    if ( rc )
         gdprintk(XENLOG_ERR,
-            "set_mmio_p2m_entry: p2m_set_entry failed! mfn=%08lx\n",
-            mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
+                 "p2m_set_entry failed! mfn=%08lx rc:%d\n",
+                 mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)), rc);
     return rc;
 }
 
-int
-clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
+/* Returns: 0 for success, -errno for failure */
+int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
 {
-    int rc = 0;
+    int rc = -EINVAL;
     mfn_t mfn;
     p2m_access_t a;
     p2m_type_t t;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     if ( !paging_mode_translate(d) )
-        return 0;
+        return -EPERM;
 
     gfn_lock(p2m, gfn, 0);
     mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
@@ -799,20 +796,20 @@ clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
     if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) )
     {
         gdprintk(XENLOG_ERR,
-            "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
+                 "gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn, t);
         goto out;
     }
     rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid,
                        p2m->default_access);
 
-out:
+ out:
     gfn_unlock(p2m, gfn, 0);
 
     return rc;
 }
 
-int
-set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+/* Returns: 0 for success, -errno for failure */
+int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int rc = 0;
@@ -822,7 +819,7 @@ set_shared_p2m_entry(struct domain *d, unsigned long gfn, 
mfn_t mfn)
     unsigned long pg_type;
 
     if ( !paging_mode_translate(p2m->domain) )
-        return 0;
+        return -EPERM;
 
     gfn_lock(p2m, gfn, 0);
     omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
@@ -841,10 +838,10 @@ set_shared_p2m_entry(struct domain *d, unsigned long gfn, 
mfn_t mfn)
     rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared,
                        p2m->default_access);
     gfn_unlock(p2m, gfn, 0);
-    if ( 0 == rc )
+    if ( rc )
         gdprintk(XENLOG_ERR,
-            "set_shared_p2m_entry: p2m_set_entry failed! mfn=%08lx\n",
-            mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
+                 "p2m_set_entry failed! mfn=%08lx rc:%d\n",
+                 mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)), rc);
     return rc;
 }
 
@@ -1263,7 +1260,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t 
gla_valid, unsigned long gla,
     if ( access_w && p2ma == p2m_access_rx2rw ) 
     {
         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
-        ASSERT(rc);
+        ASSERT(rc == 0);
         gfn_unlock(p2m, gfn, 0);
         return 1;
     }
@@ -1272,7 +1269,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t 
gla_valid, unsigned long gla,
         ASSERT(access_w || access_r || access_x);
         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
                             p2mt, p2m_access_rwx);
-        ASSERT(rc);
+        ASSERT(rc == 0);
     }
     gfn_unlock(p2m, gfn, 0);
 
@@ -1299,7 +1296,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t 
gla_valid, unsigned long gla,
                  * gfn locked and just did a successful get_entry(). */
                 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
                                     p2mt, p2m_access_rwx);
-                ASSERT(rc);
+                ASSERT(rc == 0);
             }
             gfn_unlock(p2m, gfn, 0);
             return 1;
@@ -1401,11 +1398,8 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
pfn, uint32_t nr,
     for ( pfn += start; nr > start; ++pfn )
     {
         mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
-        if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
-        {
-            rc = -ENOMEM;
+        if ( (rc = p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a)) )
             break;
-        }
 
         /* Check for continuation if it's not the last interation. */
         if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.