[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/1] x86/hvm: prevent hvm_free_ioreq_gmfn() clobber of arbitrary memory
At 12:32 +0100 on 16 Apr (1429187564), Jan Beulich wrote: > >>> On 16.04.15 at 12:53, <tim@xxxxxxx> wrote: > > I would be inclined to use a bigger hammer here. IMO refactoring like > > this makes it easier to reason about (compile tested only): > > This looks like a pretty nice cleanup; I particularly like the 4 labels > going away. OK, here it is as a proper patch. I still haven't tested it -- indeed I'm not sure how to test multiple ioreq clients. Any suggestions? From 67957b954f2b8d58b635a8e5fdc818154ec9e4ff Mon Sep 17 00:00:00 2001 From: Tim Deegan <tim@xxxxxxx> Date: Thu, 16 Apr 2015 17:34:24 +0100 Subject: [PATCH] x86/hvm: refactor code that allocates ioreq gfns. It was confusing GCC's uninitialized-variable detection. Signed-off-by: Tim Deegan <tim@xxxxxxx> --- xen/arch/x86/hvm/hvm.c | 79 ++++++++++++++++++++++++-------------------------- 1 file changed, 38 insertions(+), 41 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 21cf744..db7bac3 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -495,7 +495,8 @@ static void hvm_free_ioreq_gmfn(struct domain *d, unsigned long gmfn) { unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base; - clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask); + if (gmfn != INVALID_GFN) + clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask); } static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf) @@ -729,63 +730,59 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s) } static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s, - bool_t is_default, bool_t handle_bufioreq) + unsigned long ioreq_pfn, + unsigned long bufioreq_pfn) +{ + int rc; + + rc = hvm_map_ioreq_page(s, 0, ioreq_pfn); + if ( rc ) + return rc; + + if ( bufioreq_pfn != INVALID_GFN ) + rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn); + + if ( rc ) + hvm_unmap_ioreq_page(s, 0); + + return rc; +} + +static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s, + bool_t is_default, + bool_t handle_bufioreq) { struct domain *d = s->domain; - unsigned long ioreq_pfn; - unsigned long bufioreq_pfn = ~0UL; /* gcc uninitialised var warning */ + unsigned long ioreq_pfn = INVALID_GFN; + unsigned long bufioreq_pfn = INVALID_GFN; int rc; if ( is_default ) { - ioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]; - /* * The default ioreq server must handle buffered ioreqs, for * backwards compatibility. */ ASSERT(handle_bufioreq); - bufioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]; - } - else - { - rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn); - if ( rc ) - goto fail1; - - if ( handle_bufioreq ) - { - rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn); - if ( rc ) - goto fail2; - } + return hvm_ioreq_server_map_pages(s, + d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN], + d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]); } - rc = hvm_map_ioreq_page(s, 0, ioreq_pfn); - if ( rc ) - goto fail3; + rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn); - if ( handle_bufioreq ) - { - rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn); - if ( rc ) - goto fail4; - } + if ( !rc && handle_bufioreq ) + rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn); - return 0; - -fail4: - hvm_unmap_ioreq_page(s, 0); - -fail3: - if ( !is_default && handle_bufioreq ) - hvm_free_ioreq_gmfn(d, bufioreq_pfn); + if ( !rc ) + rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn); -fail2: - if ( !is_default ) + if ( rc ) + { hvm_free_ioreq_gmfn(d, ioreq_pfn); + hvm_free_ioreq_gmfn(d, bufioreq_pfn); + } -fail1: return rc; } @@ -938,7 +935,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d, if ( rc ) return rc; - rc = hvm_ioreq_server_map_pages(s, is_default, handle_bufioreq); + rc = hvm_ioreq_server_setup_pages(s, is_default, handle_bufioreq); if ( rc ) goto fail_map; -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |