|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] x86/vmap: handle superpages in vmap_to_mfn()
From: Hongyan Xia <hongyxia@xxxxxxxxxx>
There is simply no guarantee that vmap won't return superpages to the
caller. It can happen if the list of MFNs are contiguous, or we simply
have a large granularity. Although rare, if such things do happen, we
will simply hit BUG_ON() and crash.
Introduce xen_map_to_mfn() to translate any mapped Xen address to mfn
regardless of page size, and wrap vmap_to_mfn() around it.
Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>
---
Changed in v2:
- const pl*e
- introduce xen_map_to_mfn().
- goto to a single exit path.
- ASSERT_UNREACHABLE instead of ASSERT.
---
xen/arch/x86/mm.c | 54 ++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/mm.h | 1 +
xen/include/asm-x86/page.h | 2 +-
3 files changed, 56 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 5a50339284c7..53cc5c6de2e6 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5194,6 +5194,60 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
} \
} while ( false )
+/* Translate mapped Xen address to MFN. */
+mfn_t xen_map_to_mfn(unsigned long va)
+{
+#define CHECK_MAPPED(cond_) \
+ if ( !(cond_) ) \
+ { \
+ ASSERT_UNREACHABLE(); \
+ ret = INVALID_MFN; \
+ goto out; \
+ } \
+
+ bool locking = system_state > SYS_STATE_boot;
+ unsigned int l2_offset = l2_table_offset(va);
+ unsigned int l1_offset = l1_table_offset(va);
+ const l3_pgentry_t *pl3e = virt_to_xen_l3e(va);
+ const l2_pgentry_t *pl2e = NULL;
+ const l1_pgentry_t *pl1e = NULL;
+ struct page_info *l3page;
+ mfn_t ret;
+
+ L3T_INIT(l3page);
+ CHECK_MAPPED(pl3e)
+ l3page = virt_to_page(pl3e);
+ L3T_LOCK(l3page);
+
+ CHECK_MAPPED(l3e_get_flags(*pl3e) & _PAGE_PRESENT)
+ if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
+ {
+ ret = mfn_add(l3e_get_mfn(*pl3e),
+ (l2_offset << PAGETABLE_ORDER) + l1_offset);
+ goto out;
+ }
+
+ pl2e = map_l2t_from_l3e(*pl3e) + l2_offset;
+ CHECK_MAPPED(l2e_get_flags(*pl2e) & _PAGE_PRESENT)
+ if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
+ {
+ ret = mfn_add(l2e_get_mfn(*pl2e), l1_offset);
+ goto out;
+ }
+
+ pl1e = map_l1t_from_l2e(*pl2e) + l1_offset;
+ CHECK_MAPPED(l1e_get_flags(*pl1e) & _PAGE_PRESENT)
+ ret = l1e_get_mfn(*pl1e);
+
+#undef CHECK_MAPPED
+ out:
+ L3T_UNLOCK(l3page);
+ unmap_domain_page(pl1e);
+ unmap_domain_page(pl2e);
+ unmap_domain_page(pl3e);
+ return ret;
+}
+
int map_pages_to_xen(
unsigned long virt,
mfn_t mfn,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index deeba75a1cbb..2fc8eeaf7aad 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -578,6 +578,7 @@ mfn_t alloc_xen_pagetable_new(void);
void free_xen_pagetable_new(mfn_t mfn);
l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
+mfn_t xen_map_to_mfn(unsigned long va);
int __sync_local_execstate(void);
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 7a771baf7cb3..886adf17a40c 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -291,7 +291,7 @@ void copy_page_sse2(void *, const void *);
#define pfn_to_paddr(pfn) __pfn_to_paddr(pfn)
#define paddr_to_pfn(pa) __paddr_to_pfn(pa)
#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa))
-#define vmap_to_mfn(va) l1e_get_mfn(*virt_to_xen_l1e((unsigned long)(va)))
+#define vmap_to_mfn(va) xen_map_to_mfn((unsigned long)va)
#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
#endif /* !defined(__ASSEMBLY__) */
--
2.17.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |