|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/p2m-pt: simplify p2m_next_level()
commit 9f2e25c0042312071935047a93144914f969575f
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Sep 4 16:25:59 2017 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Sep 4 16:25:59 2017 +0200
x86/p2m-pt: simplify p2m_next_level()
Calculate entry PFN and flags just once. Convert the two successive
main if()-s to and if/else-if chain. Restrict variable scope where
reasonable. Take the opportunity and also make the induction variable
unsigned.
This at once fixes excessive permissions granted in the 2M PTEs
resulting from splitting a 1G one - original permissions should be
inherited instead. This is not a security issue only because all of
this takes no effect anyway, as iommu_hap_pt_share is always false on
AMD systems for all supported branches.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
xen/arch/x86/mm/p2m-pt.c | 100 +++++++++++++++++++++++------------------------
1 file changed, 49 insertions(+), 51 deletions(-)
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 27d8760..c192e4d 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -192,18 +192,18 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
u32 max, unsigned long type, bool_t unmap)
{
- l1_pgentry_t *l1_entry;
- l1_pgentry_t *p2m_entry;
- l1_pgentry_t new_entry;
+ l1_pgentry_t *p2m_entry, new_entry;
void *next;
- int i;
+ unsigned int flags;
if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
shift, max)) )
return -ENOENT;
+ flags = l1e_get_flags(*p2m_entry);
+
/* PoD/paging: Not present doesn't imply empty. */
- if ( !l1e_get_flags(*p2m_entry) )
+ if ( !flags )
{
struct page_info *pg;
@@ -231,68 +231,66 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
break;
}
}
-
- ASSERT(l1e_get_flags(*p2m_entry) & (_PAGE_PRESENT|_PAGE_PSE));
-
- /* split 1GB pages into 2MB pages */
- if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+ else if ( flags & _PAGE_PSE )
{
- unsigned long flags, pfn;
+ /* Split superpages pages into smaller ones. */
+ unsigned long pfn = l1e_get_pfn(*p2m_entry);
struct page_info *pg;
+ l1_pgentry_t *l1_entry;
+ unsigned int i, level;
- pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
- if ( pg == NULL )
- return -ENOMEM;
-
- flags = l1e_get_flags(*p2m_entry);
- pfn = l1e_get_pfn(*p2m_entry);
-
- l1_entry = __map_domain_page(pg);
- for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
+ switch ( type )
{
- new_entry = l1e_from_pfn(pfn | (i * L1_PAGETABLE_ENTRIES), flags);
- p2m_add_iommu_flags(&new_entry, 1,
IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
- }
- unmap_domain_page(l1_entry);
- new_entry = l1e_from_page(pg, P2M_BASE_FLAGS | _PAGE_RW); /* disable
PSE */
- p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
- }
+ case PGT_l2_page_table:
+ level = 2;
+ break;
+
+ case PGT_l1_page_table:
+ /*
+ * New splintered mappings inherit the flags of the old superpage,
+ * with a little reorganisation for the _PAGE_PSE_PAT bit.
+ */
+ if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */
+ pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT
*/
+ else
+ flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
+ level = 1;
+ break;
- /* split single 2MB large page into 4KB page in P2M table */
- if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
- {
- unsigned long flags, pfn;
- struct page_info *pg;
+ default:
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
- pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
+ pg = p2m_alloc_ptp(p2m, type);
if ( pg == NULL )
return -ENOMEM;
- /* New splintered mappings inherit the flags of the old superpage,
- * with a little reorganisation for the _PAGE_PSE_PAT bit. */
- flags = l1e_get_flags(*p2m_entry);
- pfn = l1e_get_pfn(*p2m_entry);
- if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */
- pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
- else
- flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
-
l1_entry = __map_domain_page(pg);
- for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+
+ /* Inherit original IOMMU permissions, but update Next Level. */
+ if ( iommu_hap_pt_share )
+ {
+ flags &= ~iommu_nlevel_to_flags(~0, 0);
+ flags |= iommu_nlevel_to_flags(level - 1, 0);
+ }
+
+ for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
{
- new_entry = l1e_from_pfn(pfn | i, flags);
- p2m_add_iommu_flags(&new_entry, 0, 0);
- p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1);
+ new_entry = l1e_from_pfn(pfn | (i << ((level - 1) *
PAGETABLE_ORDER)),
+ flags);
+ p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
}
+
unmap_domain_page(l1_entry);
-
+
new_entry = l1e_from_page(pg, P2M_BASE_FLAGS | _PAGE_RW);
- p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
+ p2m_add_iommu_flags(&new_entry, level,
IOMMUF_readable|IOMMUF_writable);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
}
+ else
+ ASSERT(flags & _PAGE_PRESENT);
next = map_domain_page(l1e_get_mfn(*p2m_entry));
if ( unmap )
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |