|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/vmap: handle superpages in vmap_to_mfn()
commit 3ec53aa79905edc3891b8267123d88a221553370
Author: Hongyan Xia <hongyxia@xxxxxxxxxx>
AuthorDate: Mon Dec 7 14:54:44 2020 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Dec 7 14:54:44 2020 +0100
x86/vmap: handle superpages in vmap_to_mfn()
There is simply no guarantee that vmap won't return superpages to the
caller. It can happen if the list of MFNs are contiguous, or we simply
have a large granularity. Although rare, if such things do happen, we
will simply hit BUG_ON() and crash.
Introduce xen_map_to_mfn() to translate any mapped Xen address to mfn
regardless of page size, and wrap vmap_to_mfn() around it.
Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/mm.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/page.h | 2 +-
xen/include/xen/mm.h | 3 +++
3 files changed, 60 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 5a50339284..723cc1070f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5194,6 +5194,62 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
} \
} while ( false )
+/* Translate mapped Xen address to MFN. */
+mfn_t xen_map_to_mfn(unsigned long va)
+{
+#define CHECK_MAPPED(cond) \
+ do { \
+ if ( !(cond) ) \
+ { \
+ ASSERT_UNREACHABLE(); \
+ ret = INVALID_MFN; \
+ goto out; \
+ } \
+ } while ( false )
+
+ bool locking = system_state > SYS_STATE_boot;
+ unsigned int l2_offset = l2_table_offset(va);
+ unsigned int l1_offset = l1_table_offset(va);
+ const l3_pgentry_t *pl3e = virt_to_xen_l3e(va);
+ const l2_pgentry_t *pl2e = NULL;
+ const l1_pgentry_t *pl1e = NULL;
+ struct page_info *l3page;
+ mfn_t ret;
+
+ L3T_INIT(l3page);
+ CHECK_MAPPED(pl3e);
+ l3page = virt_to_page(pl3e);
+ L3T_LOCK(l3page);
+
+ CHECK_MAPPED(l3e_get_flags(*pl3e) & _PAGE_PRESENT);
+ if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
+ {
+ ret = mfn_add(l3e_get_mfn(*pl3e),
+ (l2_offset << PAGETABLE_ORDER) + l1_offset);
+ goto out;
+ }
+
+ pl2e = map_l2t_from_l3e(*pl3e) + l2_offset;
+ CHECK_MAPPED(l2e_get_flags(*pl2e) & _PAGE_PRESENT);
+ if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
+ {
+ ret = mfn_add(l2e_get_mfn(*pl2e), l1_offset);
+ goto out;
+ }
+
+ pl1e = map_l1t_from_l2e(*pl2e) + l1_offset;
+ CHECK_MAPPED(l1e_get_flags(*pl1e) & _PAGE_PRESENT);
+ ret = l1e_get_mfn(*pl1e);
+
+#undef CHECK_MAPPED
+ out:
+ L3T_UNLOCK(l3page);
+ unmap_domain_page(pl1e);
+ unmap_domain_page(pl2e);
+ unmap_domain_page(pl3e);
+ return ret;
+}
+
int map_pages_to_xen(
unsigned long virt,
mfn_t mfn,
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 7a771baf7c..082c14a662 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -291,7 +291,7 @@ void copy_page_sse2(void *, const void *);
#define pfn_to_paddr(pfn) __pfn_to_paddr(pfn)
#define paddr_to_pfn(pa) __paddr_to_pfn(pa)
#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa))
-#define vmap_to_mfn(va) l1e_get_mfn(*virt_to_xen_l1e((unsigned long)(va)))
+#define vmap_to_mfn(va) xen_map_to_mfn((unsigned long)(va))
#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
#endif /* !defined(__ASSEMBLY__) */
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index f7975b2df0..85a8df9948 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -184,6 +184,9 @@ int map_pages_to_xen(
/* Alter the permissions of a range of Xen virtual address space. */
int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int flags);
int destroy_xen_mappings(unsigned long v, unsigned long e);
+/* Retrieve the MFN mapped by VA in Xen virtual address space. */
+mfn_t xen_map_to_mfn(unsigned long va);
+
/*
* Create only non-leaf page table entries for the
* page range in Xen virtual address space.
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |