|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 1/2] This patch just changes indentation, to make the next patch easier to review.
Signed-off-by: Julian Vetter <julian.vetter@xxxxxxxxxx>
---
Changes in v4:
- No changes to this patch
---
xen/arch/x86/hvm/ioreq.c | 58 ++++++++++++++++---------------
xen/common/ioreq.c | 74 +++++++++++++++++++++-------------------
2 files changed, 70 insertions(+), 62 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index a5fa97e149..355b2ba12c 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -125,14 +125,16 @@ static void hvm_unmap_ioreq_gfn(struct ioreq_server *s,
bool buf)
{
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- if ( gfn_eq(iorp->gfn, INVALID_GFN) )
- return;
+ {
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ return;
- destroy_ring_for_helper(&iorp->va, iorp->page);
- iorp->page = NULL;
+ destroy_ring_for_helper(&iorp->va, iorp->page);
+ iorp->page = NULL;
- hvm_free_ioreq_gfn(s, iorp->gfn);
- iorp->gfn = INVALID_GFN;
+ hvm_free_ioreq_gfn(s, iorp->gfn);
+ iorp->gfn = INVALID_GFN;
+ }
}
static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool buf)
@@ -141,34 +143,36 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool
buf)
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
- if ( iorp->page )
{
- /*
- * If a page has already been allocated (which will happen on
- * demand if ioreq_server_get_frame() is called), then
- * mapping a guest frame is not permitted.
- */
- if ( gfn_eq(iorp->gfn, INVALID_GFN) )
- return -EPERM;
-
- return 0;
- }
+ if ( iorp->page )
+ {
+ /*
+ * If a page has already been allocated (which will happen on
+ * demand if ioreq_server_get_frame() is called), then
+ * mapping a guest frame is not permitted.
+ */
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
+
+ return 0;
+ }
- if ( d->is_dying )
- return -EINVAL;
+ if ( d->is_dying )
+ return -EINVAL;
- iorp->gfn = hvm_alloc_ioreq_gfn(s);
+ iorp->gfn = hvm_alloc_ioreq_gfn(s);
- if ( gfn_eq(iorp->gfn, INVALID_GFN) )
- return -ENOMEM;
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -ENOMEM;
- rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
- &iorp->va);
+ rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
+ &iorp->va);
- if ( rc )
- hvm_unmap_ioreq_gfn(s, buf);
+ if ( rc )
+ hvm_unmap_ioreq_gfn(s, buf);
- return rc;
+ return rc;
+ }
}
static void hvm_remove_ioreq_gfn(struct ioreq_server *s, bool buf)
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index f5fd30ce12..2e284ad26c 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -263,41 +263,43 @@ static int ioreq_server_alloc_mfn(struct ioreq_server *s,
bool buf)
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
struct page_info *page;
- if ( iorp->page )
{
- /*
- * If a guest frame has already been mapped (which may happen
- * on demand if ioreq_server_get_info() is called), then
- * allocating a page is not permitted.
- */
- if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
- return -EPERM;
+ if ( iorp->page )
+ {
+ /*
+ * If a guest frame has already been mapped (which may happen
+ * on demand if ioreq_server_get_info() is called), then
+ * allocating a page is not permitted.
+ */
+ if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
+ return -EPERM;
- return 0;
- }
+ return 0;
+ }
- page = alloc_domheap_page(s->target, MEMF_no_refcount);
+ page = alloc_domheap_page(s->target, MEMF_no_refcount);
- if ( !page )
- return -ENOMEM;
+ if ( !page )
+ return -ENOMEM;
- if ( !get_page_and_type(page, s->target, PGT_writable_page) )
- {
- /*
- * The domain can't possibly know about this page yet, so failure
- * here is a clear indication of something fishy going on.
- */
- domain_crash(s->emulator);
- return -ENODATA;
- }
+ if ( !get_page_and_type(page, s->target, PGT_writable_page) )
+ {
+ /*
+ * The domain can't possibly know about this page yet, so failure
+ * here is a clear indication of something fishy going on.
+ */
+ domain_crash(s->emulator);
+ return -ENODATA;
+ }
- iorp->va = __map_domain_page_global(page);
- if ( !iorp->va )
- goto fail;
+ iorp->va = __map_domain_page_global(page);
+ if ( !iorp->va )
+ goto fail;
- iorp->page = page;
- clear_page(iorp->va);
- return 0;
+ iorp->page = page;
+ clear_page(iorp->va);
+ return 0;
+ }
fail:
put_page_alloc_ref(page);
@@ -311,16 +313,18 @@ static void ioreq_server_free_mfn(struct ioreq_server *s,
bool buf)
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
struct page_info *page = iorp->page;
- if ( !page )
- return;
+ {
+ if ( !page )
+ return;
- iorp->page = NULL;
+ iorp->page = NULL;
- unmap_domain_page_global(iorp->va);
- iorp->va = NULL;
+ unmap_domain_page_global(iorp->va);
+ iorp->va = NULL;
- put_page_alloc_ref(page);
- put_page_and_type(page);
+ put_page_alloc_ref(page);
+ put_page_and_type(page);
+ }
}
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
--
2.51.0
--
Julian Vetter | Vates Hypervisor & Kernel Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |