|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 21/55] x86_64/mm: introduce pl2e in paging_init
Introduce pl2e so that we can use l2_ro_mpt to point to the page table
itself.
No functional change.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/arch/x86/x86_64/mm.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d8f558bc3a..83d62674c0 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -497,7 +497,7 @@ void __init paging_init(void)
unsigned long i, mpt_size, va;
unsigned int n, memflags;
l3_pgentry_t *l3_ro_mpt;
- l2_pgentry_t *l2_ro_mpt = NULL;
+ l2_pgentry_t *pl2e = NULL, *l2_ro_mpt;
struct page_info *l1_pg;
/*
@@ -547,7 +547,7 @@ void __init paging_init(void)
(L2_PAGETABLE_SHIFT - 3 + PAGE_SHIFT)));
if ( cpu_has_page1gb &&
- !((unsigned long)l2_ro_mpt & ~PAGE_MASK) &&
+ !((unsigned long)pl2e & ~PAGE_MASK) &&
(mpt_size >> L3_PAGETABLE_SHIFT) > (i >> PAGETABLE_ORDER) )
{
unsigned int k, holes;
@@ -606,7 +606,7 @@ void __init paging_init(void)
memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)),
0xFF, 1UL << L2_PAGETABLE_SHIFT);
}
- if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
+ if ( !((unsigned long)pl2e & ~PAGE_MASK) )
{
if ( (l2_ro_mpt = alloc_xen_pagetable()) == NULL )
goto nomem;
@@ -614,13 +614,14 @@ void __init paging_init(void)
l3e_write(&l3_ro_mpt[l3_table_offset(va)],
l3e_from_paddr(__pa(l2_ro_mpt),
__PAGE_HYPERVISOR_RO | _PAGE_USER));
+ pl2e = l2_ro_mpt;
ASSERT(!l2_table_offset(va));
}
/* NB. Cannot be GLOBAL: guest user mode should not see it. */
if ( l1_pg )
- l2e_write(l2_ro_mpt, l2e_from_page(
+ l2e_write(pl2e, l2e_from_page(
l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
- l2_ro_mpt++;
+ pl2e++;
}
#undef CNT
#undef MFN
@@ -636,7 +637,8 @@ void __init paging_init(void)
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR_RO));
- l2_ro_mpt += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
+ pl2e = l2_ro_mpt;
+ pl2e += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
/* Allocate and map the compatibility mode machine-to-phys table. */
mpt_size = (mpt_size >> 1) + (1UL << (L2_PAGETABLE_SHIFT - 1));
if ( mpt_size > RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START )
@@ -649,7 +651,7 @@ void __init paging_init(void)
sizeof(*compat_machine_to_phys_mapping))
BUILD_BUG_ON((sizeof(*frame_table) & ~sizeof(*frame_table)) % \
sizeof(*compat_machine_to_phys_mapping));
- for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++, l2_ro_mpt++ )
+ for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++, pl2e++ )
{
memflags = MEMF_node(phys_to_nid(i <<
(L2_PAGETABLE_SHIFT - 2 + PAGE_SHIFT)));
@@ -671,7 +673,7 @@ void __init paging_init(void)
0x55,
1UL << L2_PAGETABLE_SHIFT);
/* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
- l2e_write(l2_ro_mpt, l2e_from_page(l1_pg, _PAGE_PSE|_PAGE_PRESENT));
+ l2e_write(pl2e, l2e_from_page(l1_pg, _PAGE_PSE|_PAGE_PRESENT));
}
#undef CNT
#undef MFN
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |