[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3] x86/vmap: handle superpages in vmap_to_mfn()
From: Hongyan Xia <hongyxia@xxxxxxxxxx> There is simply no guarantee that vmap won't return superpages to the caller. It can happen if the list of MFNs are contiguous, or we simply have a large granularity. Although rare, if such things do happen, we will simply hit BUG_ON() and crash. Introduce xen_map_to_mfn() to translate any mapped Xen address to mfn regardless of page size, and wrap vmap_to_mfn() around it. Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- Changed in v3: - switch to do-while. - move the declaration close to map_pages_to_xen(). - add missing parentheses to vmap_to_mfn(). Changed in v2: - const pl*e - introduce xen_map_to_mfn(). - goto to a single exit path. - ASSERT_UNREACHABLE instead of ASSERT. --- xen/arch/x86/mm.c | 56 ++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/page.h | 2 +- xen/include/xen/mm.h | 1 + 3 files changed, 58 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 5a50339284c7..723cc1070f16 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5194,6 +5194,62 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v) } \ } while ( false ) +/* Translate mapped Xen address to MFN. */ +mfn_t xen_map_to_mfn(unsigned long va) +{ +#define CHECK_MAPPED(cond) \ + do { \ + if ( !(cond) ) \ + { \ + ASSERT_UNREACHABLE(); \ + ret = INVALID_MFN; \ + goto out; \ + } \ + } while ( false ) + + bool locking = system_state > SYS_STATE_boot; + unsigned int l2_offset = l2_table_offset(va); + unsigned int l1_offset = l1_table_offset(va); + const l3_pgentry_t *pl3e = virt_to_xen_l3e(va); + const l2_pgentry_t *pl2e = NULL; + const l1_pgentry_t *pl1e = NULL; + struct page_info *l3page; + mfn_t ret; + + L3T_INIT(l3page); + CHECK_MAPPED(pl3e); + l3page = virt_to_page(pl3e); + L3T_LOCK(l3page); + + CHECK_MAPPED(l3e_get_flags(*pl3e) & _PAGE_PRESENT); + if ( l3e_get_flags(*pl3e) & _PAGE_PSE ) + { + ret = mfn_add(l3e_get_mfn(*pl3e), + (l2_offset << PAGETABLE_ORDER) + l1_offset); + goto out; + } + + pl2e = map_l2t_from_l3e(*pl3e) + l2_offset; + CHECK_MAPPED(l2e_get_flags(*pl2e) & _PAGE_PRESENT); + if ( l2e_get_flags(*pl2e) & _PAGE_PSE ) + { + ret = mfn_add(l2e_get_mfn(*pl2e), l1_offset); + goto out; + } + + pl1e = map_l1t_from_l2e(*pl2e) + l1_offset; + CHECK_MAPPED(l1e_get_flags(*pl1e) & _PAGE_PRESENT); + ret = l1e_get_mfn(*pl1e); + +#undef CHECK_MAPPED + out: + L3T_UNLOCK(l3page); + unmap_domain_page(pl1e); + unmap_domain_page(pl2e); + unmap_domain_page(pl3e); + return ret; +} + int map_pages_to_xen( unsigned long virt, mfn_t mfn, diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h index 7a771baf7cb3..082c14a66226 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -291,7 +291,7 @@ void copy_page_sse2(void *, const void *); #define pfn_to_paddr(pfn) __pfn_to_paddr(pfn) #define paddr_to_pfn(pa) __paddr_to_pfn(pa) #define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa)) -#define vmap_to_mfn(va) l1e_get_mfn(*virt_to_xen_l1e((unsigned long)(va))) +#define vmap_to_mfn(va) xen_map_to_mfn((unsigned long)(va)) #define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) #endif /* !defined(__ASSEMBLY__) */ diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index f7975b2df00b..1475f352e411 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -175,6 +175,7 @@ bool scrub_free_pages(void); } while ( false ) #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0) +mfn_t xen_map_to_mfn(unsigned long va); /* Map machine page range in Xen virtual address space. */ int map_pages_to_xen( unsigned long virt, -- 2.17.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |