[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-next] x86/mm: Correctly indent the pin_page block of do_mmuext_op()
The pin_page block is missing one level of indentation, which makes the MMUEXT_UNPIN_TABLE case label appear to be outside of the switch statement. While making this adjustment, delete one other piece of trailing whitespace. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> --- xen/arch/x86/mm.c | 115 +++++++++++++++++++++++++++--------------------------- 1 file changed, 58 insertions(+), 57 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 77b0af1..a54b000 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3249,73 +3249,74 @@ long do_mmuext_op( break; type = PGT_l4_page_table; - pin_page: { - struct page_info *page; + pin_page: + { + struct page_info *page; - /* Ignore pinning of invalid paging levels. */ - if ( (op.cmd - MMUEXT_PIN_L1_TABLE) > (CONFIG_PAGING_LEVELS - 1) ) - break; + /* Ignore pinning of invalid paging levels. */ + if ( (op.cmd - MMUEXT_PIN_L1_TABLE) > (CONFIG_PAGING_LEVELS - 1) ) + break; - if ( paging_mode_refcounts(pg_owner) ) - break; + if ( paging_mode_refcounts(pg_owner) ) + break; - page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC); - if ( unlikely(!page) ) - { - rc = -EINVAL; - break; - } + page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC); + if ( unlikely(!page) ) + { + rc = -EINVAL; + break; + } - rc = get_page_type_preemptible(page, type); - if ( unlikely(rc) ) - { - if ( rc == -EINTR ) - rc = -ERESTART; - else if ( rc != -ERESTART ) - gdprintk(XENLOG_WARNING, - "Error %d while pinning mfn %" PRI_mfn "\n", - rc, page_to_mfn(page)); - if ( page != curr->arch.old_guest_table ) - put_page(page); - break; - } + rc = get_page_type_preemptible(page, type); + if ( unlikely(rc) ) + { + if ( rc == -EINTR ) + rc = -ERESTART; + else if ( rc != -ERESTART ) + gdprintk(XENLOG_WARNING, + "Error %d while pinning mfn %" PRI_mfn "\n", + rc, page_to_mfn(page)); + if ( page != curr->arch.old_guest_table ) + put_page(page); + break; + } - rc = xsm_memory_pin_page(XSM_HOOK, d, pg_owner, page); - if ( !rc && unlikely(test_and_set_bit(_PGT_pinned, - &page->u.inuse.type_info)) ) - { - gdprintk(XENLOG_WARNING, - "mfn %" PRI_mfn " already pinned\n", page_to_mfn(page)); - rc = -EINVAL; - } + rc = xsm_memory_pin_page(XSM_HOOK, d, pg_owner, page); + if ( !rc && unlikely(test_and_set_bit(_PGT_pinned, + &page->u.inuse.type_info)) ) + { + gdprintk(XENLOG_WARNING, + "mfn %" PRI_mfn " already pinned\n", page_to_mfn(page)); + rc = -EINVAL; + } - if ( unlikely(rc) ) - goto pin_drop; + if ( unlikely(rc) ) + goto pin_drop; - /* A page is dirtied when its pin status is set. */ - paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page))); + /* A page is dirtied when its pin status is set. */ + paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page))); - /* We can race domain destruction (domain_relinquish_resources). */ - if ( unlikely(pg_owner != d) ) - { - int drop_ref; - spin_lock(&pg_owner->page_alloc_lock); - drop_ref = (pg_owner->is_dying && - test_and_clear_bit(_PGT_pinned, - &page->u.inuse.type_info)); - spin_unlock(&pg_owner->page_alloc_lock); - if ( drop_ref ) + /* We can race domain destruction (domain_relinquish_resources). */ + if ( unlikely(pg_owner != d) ) { - pin_drop: - if ( type == PGT_l1_page_table ) - put_page_and_type(page); - else - curr->arch.old_guest_table = page; + int drop_ref; + spin_lock(&pg_owner->page_alloc_lock); + drop_ref = (pg_owner->is_dying && + test_and_clear_bit(_PGT_pinned, + &page->u.inuse.type_info)); + spin_unlock(&pg_owner->page_alloc_lock); + if ( drop_ref ) + { + pin_drop: + if ( type == PGT_l1_page_table ) + put_page_and_type(page); + else + curr->arch.old_guest_table = page; + } } - } - break; - } + break; + } case MMUEXT_UNPIN_TABLE: { struct page_info *page; @@ -3478,7 +3479,7 @@ long do_mmuext_op( else rc = -EPERM; break; - + case MMUEXT_INVLPG_ALL: if ( unlikely(d != pg_owner) ) rc = -EPERM; -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |