|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/3] XSA-60 security hole: remove the problematic vmx_set_uc_mode logic
On 17/10/13 10:58, Jan Beulich wrote:
>>>> On 16.10.13 at 20:36, "Liu, Jinsong" <jinsong.liu@xxxxxxxxx> wrote:
>> From 2a0dc13d14d63af67d12f181655dcc04783da83a Mon Sep 17 00:00:00 2001
>> From: Liu Jinsong <jinsong.liu@xxxxxxxxx>
>> Date: Thu, 17 Oct 2013 04:45:11 +0800
>> Subject: [PATCH 2/3] XSA-60 security hole: remove the problematic
>> vmx_set_uc_mode logic
>>
>> XSA-60 security hole comes from the problematic vmx_set_uc_mode.
>> This patch remove vmx_set_uc_mode logic, which will be replaced by
>> PAT approach at later patch.
>>
>> Signed-off-by: Liu Jinsong <jinsong.liu@xxxxxxxxx>
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
>
>> ---
>> xen/arch/x86/hvm/hvm.c | 2 -
>> xen/arch/x86/hvm/mtrr.c | 3 -
>> xen/arch/x86/hvm/vmx/vmx.c | 9 ---
>> xen/arch/x86/mm/p2m-ept.c | 120
>> -----------------------------------------
>> xen/include/asm-x86/hvm/hvm.h | 1 -
>> 5 files changed, 0 insertions(+), 135 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
>> index de81e45..688a943 100644
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -1619,8 +1619,6 @@ static void hvm_set_uc_mode(struct vcpu *v, bool_t
>> is_in_uc_mode)
>> {
>> v->domain->arch.hvm_domain.is_in_uc_mode = is_in_uc_mode;
>> shadow_blow_tables_per_domain(v->domain);
>> - if ( hvm_funcs.set_uc_mode )
>> - return hvm_funcs.set_uc_mode(v);
>> }
>>
>> int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
>> diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
>> index ef51a8d..4ff1e55 100644
>> --- a/xen/arch/x86/hvm/mtrr.c
>> +++ b/xen/arch/x86/hvm/mtrr.c
>> @@ -696,9 +696,6 @@ uint8_t epte_get_entry_emt(struct domain *d, unsigned
>> long gfn, mfn_t mfn,
>> if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
>> return MTRR_TYPE_WRBACK;
>>
>> - if ( (v == current) && v->domain->arch.hvm_domain.is_in_uc_mode )
>> - return MTRR_TYPE_UNCACHABLE;
>> -
>> if ( !mfn_valid(mfn_x(mfn)) )
>> return MTRR_TYPE_UNCACHABLE;
>>
>> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
>> index b59bf59..6dedb29 100644
>> --- a/xen/arch/x86/hvm/vmx/vmx.c
>> +++ b/xen/arch/x86/hvm/vmx/vmx.c
>> @@ -1392,14 +1392,6 @@ static int vmx_event_pending(struct vcpu *v)
>> return intr_info & INTR_INFO_VALID_MASK;
>> }
>>
>> -static void vmx_set_uc_mode(struct vcpu *v)
>> -{
>> - if ( paging_mode_hap(v->domain) )
>> - ept_change_entry_emt_with_range(
>> - v->domain, 0, p2m_get_hostp2m(v->domain)->max_mapped_pfn);
>> - hvm_asid_flush_vcpu(v);
>> -}
>> -
>> static void vmx_set_info_guest(struct vcpu *v)
>> {
>> unsigned long intr_shadow;
>> @@ -1558,7 +1550,6 @@ static struct hvm_function_table __initdata
>> vmx_function_table = {
>> .msr_read_intercept = vmx_msr_read_intercept,
>> .msr_write_intercept = vmx_msr_write_intercept,
>> .invlpg_intercept = vmx_invlpg_intercept,
>> - .set_uc_mode = vmx_set_uc_mode,
>> .set_info_guest = vmx_set_info_guest,
>> .set_rdtsc_exiting = vmx_set_rdtsc_exiting,
>> .nhvm_vcpu_initialise = nvmx_vcpu_initialise,
>> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
>> index 595c6e7..92d9e2d 100644
>> --- a/xen/arch/x86/mm/p2m-ept.c
>> +++ b/xen/arch/x86/mm/p2m-ept.c
>> @@ -587,44 +587,6 @@ out:
>> return mfn;
>> }
>>
>> -/* WARNING: Only caller doesn't care about PoD pages. So this function will
>> - * always return 0 for PoD pages, not populate them. If that becomes
>> necessary,
>> - * pass a p2m_query_t type along to distinguish. */
>> -static ept_entry_t ept_get_entry_content(struct p2m_domain *p2m,
>> - unsigned long gfn, int *level)
>> -{
>> - ept_entry_t *table =
>> map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
>> - unsigned long gfn_remainder = gfn;
>> - ept_entry_t *ept_entry;
>> - ept_entry_t content = { .epte = 0 };
>> - u32 index;
>> - int i;
>> - int ret=0;
>> - struct ept_data *ept = &p2m->ept;
>> -
>> - /* This pfn is higher than the highest the p2m map currently holds */
>> - if ( gfn > p2m->max_mapped_pfn )
>> - goto out;
>> -
>> - for ( i = ept_get_wl(ept); i > 0; i-- )
>> - {
>> - ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
>> - if ( !ret || ret == GUEST_TABLE_POD_PAGE )
>> - goto out;
>> - else if ( ret == GUEST_TABLE_SUPER_PAGE )
>> - break;
>> - }
>> -
>> - index = gfn_remainder >> (i * EPT_TABLE_ORDER);
>> - ept_entry = table + index;
>> - content = *ept_entry;
>> - *level = i;
>> -
>> - out:
>> - unmap_domain_page(table);
>> - return content;
>> -}
>> -
>> void ept_walk_table(struct domain *d, unsigned long gfn)
>> {
>> struct p2m_domain *p2m = p2m_get_hostp2m(d);
>> @@ -677,88 +639,6 @@ out:
>> }
>>
>> /*
>> - * To test if the new emt type is the same with old,
>> - * return 1 to not to reset ept entry.
>> - */
>> -static int need_modify_ept_entry(struct p2m_domain *p2m, unsigned long gfn,
>> - mfn_t mfn, uint8_t o_ipat, uint8_t o_emt,
>> - p2m_type_t p2mt)
>> -{
>> - uint8_t ipat;
>> - uint8_t emt;
>> - bool_t direct_mmio = (p2mt == p2m_mmio_direct);
>> -
>> - emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat, direct_mmio);
>> -
>> - if ( (emt == o_emt) && (ipat == o_ipat) )
>> - return 0;
>> -
>> - return 1;
>> -}
>> -
>> -void ept_change_entry_emt_with_range(struct domain *d,
>> - unsigned long start_gfn,
>> - unsigned long end_gfn)
>> -{
>> - unsigned long gfn;
>> - ept_entry_t e;
>> - mfn_t mfn;
>> - int order = 0;
>> - struct p2m_domain *p2m = p2m_get_hostp2m(d);
>> - int rc;
>> -
>> - p2m_lock(p2m);
>> - for ( gfn = start_gfn; gfn <= end_gfn; gfn++ )
>> - {
>> - int level = 0;
>> - uint64_t trunk = 0;
>> -
>> - e = ept_get_entry_content(p2m, gfn, &level);
>> - if ( !is_epte_present(&e) || !p2m_has_emt(e.sa_p2mt) )
>> - continue;
>> -
>> - order = 0;
>> - mfn = _mfn(e.mfn);
>> -
>> - if ( is_epte_superpage(&e) )
>> - {
>> - while ( level )
>> - {
>> - trunk = (1UL << (level * EPT_TABLE_ORDER)) - 1;
>> - if ( !(gfn & trunk) && (gfn + trunk <= end_gfn) )
>> - {
>> - /* gfn assigned with 2M or 1G, and the end covers more
>> than
>> - * the super page areas.
>> - * Set emt for super page.
>> - */
>> - order = level * EPT_TABLE_ORDER;
>> - if ( need_modify_ept_entry(p2m, gfn, mfn,
>> - e.ipat, e.emt, e.sa_p2mt) )
>> - {
>> - rc = ept_set_entry(p2m, gfn, mfn, order,
>> - e.sa_p2mt, e.access);
>> - ASSERT(rc);
>> - }
>> - gfn += trunk;
>> - break;
>> - }
>> - level--;
>> - }
>> - }
>> - else /* gfn assigned with 4k */
>> - {
>> - if ( need_modify_ept_entry(p2m, gfn, mfn,
>> - e.ipat, e.emt, e.sa_p2mt) )
>> - {
>> - rc = ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt,
>> e.access);
>> - ASSERT(rc);
>> - }
>> - }
>> - }
>> - p2m_unlock(p2m);
>> -}
>> -
>> -/*
>> * Walk the whole p2m table, changing any entries of the old type
>> * to the new type. This is used in hardware-assisted paging to
>> * quickly enable or diable log-dirty tracking
>> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
>> index 3376418..8dd2b40 100644
>> --- a/xen/include/asm-x86/hvm/hvm.h
>> +++ b/xen/include/asm-x86/hvm/hvm.h
>> @@ -156,7 +156,6 @@ struct hvm_function_table {
>> int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
>> int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
>> void (*invlpg_intercept)(unsigned long vaddr);
>> - void (*set_uc_mode)(struct vcpu *v);
>> void (*set_info_guest)(struct vcpu *v);
>> void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
>>
>> --
>> 1.7.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |