[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] hvm: Limit the size of large HVM op batches
# HG changeset patch # User Tim Deegan <tim@xxxxxxx> # Date 1354646285 0 # Node ID 96fed5bcd0971c74e8751489278011407f9f91f3 # Parent 90a697f3e78c7e44129dde455df6fc457275b7e8 hvm: Limit the size of large HVM op batches Doing large p2m updates for HVMOP_track_dirty_vram without preemption ties up the physical processor. Integrating preemption into the p2m updates is hard so simply limit to 1GB which is sufficient for a 15000 * 15000 * 32bpp framebuffer. For HVMOP_modified_memory and HVMOP_set_mem_type preemptible add the necessary machinery to handle preemption. This is CVE-2012-5511 / XSA-27. Signed-off-by: Tim Deegan <tim@xxxxxxx> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Acked-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Committed-by: Ian Jackson <ian.jackson.citrix.com> --- diff -r 90a697f3e78c -r 96fed5bcd097 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Tue Dec 04 18:38:00 2012 +0000 +++ b/xen/arch/x86/hvm/hvm.c Tue Dec 04 18:38:05 2012 +0000 @@ -3984,6 +3984,9 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( !is_hvm_domain(d) ) goto param_fail2; + if ( a.nr > GB(1) >> PAGE_SHIFT ) + goto param_fail2; + rc = xsm_hvm_param(d, op); if ( rc ) goto param_fail2; @@ -4010,7 +4013,6 @@ long do_hvm_op(unsigned long op, XEN_GUE { struct xen_hvm_modified_memory a; struct domain *d; - unsigned long pfn; if ( copy_from_guest(&a, arg, 1) ) return -EFAULT; @@ -4037,9 +4039,11 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( !paging_mode_log_dirty(d) ) goto param_fail3; - for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) + while ( a.nr > 0 ) { + unsigned long pfn = a.first_pfn; struct page_info *page; + page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE); if ( page ) { @@ -4049,6 +4053,19 @@ long do_hvm_op(unsigned long op, XEN_GUE sh_remove_shadows(d->vcpu[0], _mfn(page_to_mfn(page)), 1, 0); put_page(page); } + + a.first_pfn++; + a.nr--; + + /* Check for continuation if it's not the last interation */ + if ( a.nr > 0 && hypercall_preempt_check() ) + { + if ( copy_to_guest(arg, &a, 1) ) + rc = -EFAULT; + else + rc = -EAGAIN; + break; + } } param_fail3: @@ -4104,7 +4121,6 @@ long do_hvm_op(unsigned long op, XEN_GUE { struct xen_hvm_set_mem_type a; struct domain *d; - unsigned long pfn; /* Interface types to internal p2m types */ p2m_type_t memtype[] = { @@ -4137,8 +4153,9 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ) goto param_fail4; - for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) + while ( a.nr ) { + unsigned long pfn = a.first_pfn; p2m_type_t t; p2m_type_t nt; mfn_t mfn; @@ -4178,6 +4195,19 @@ long do_hvm_op(unsigned long op, XEN_GUE } } put_gfn(d, pfn); + + a.first_pfn++; + a.nr--; + + /* Check for continuation if it's not the last interation */ + if ( a.nr > 0 && hypercall_preempt_check() ) + { + if ( copy_to_guest(arg, &a, 1) ) + rc = -EFAULT; + else + rc = -EAGAIN; + goto param_fail4; + } } rc = 0; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |