x86: consolidate initialization of PV guest L4 page tables So far this has been repeated in 3 places, requiring to remember to update all of them if a change is being made. Signed-off-by: Jan Beulich --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -290,13 +290,8 @@ static int setup_compat_l4(struct vcpu * pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1; l4tab = page_to_virt(pg); - copy_page(l4tab, idle_pg_table); - l4tab[0] = l4e_empty(); - l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] = - l4e_from_page(pg, __PAGE_HYPERVISOR); - l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = - l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3), - __PAGE_HYPERVISOR); + clear_page(l4tab); + init_guest_l4_table(l4tab, v->domain); v->arch.guest_table = pagetable_from_page(pg); v->arch.guest_table_user = v->arch.guest_table; --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -622,13 +622,7 @@ int __init construct_dom0( l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; } clear_page(l4tab); - for ( i = l4_table_offset(HYPERVISOR_VIRT_START); - i < l4_table_offset(HYPERVISOR_VIRT_END); ++i ) - l4tab[i] = idle_pg_table[i]; - l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] = - l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR); - l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = - l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); + init_guest_l4_table(l4tab, d); v->arch.guest_table = pagetable_from_paddr(__pa(l4start)); if ( is_pv_32on64_domain(d) ) v->arch.guest_table_user = v->arch.guest_table; --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -1315,6 +1315,18 @@ static int alloc_l3_table(struct page_in return rc > 0 ? 0 : rc; } +void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d) +{ + /* Xen private mappings. */ + memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT], + &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT], + ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t)); + l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] = + l4e_from_pfn(virt_to_mfn(l4tab), __PAGE_HYPERVISOR); + l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = + l4e_from_pfn(virt_to_mfn(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); +} + static int alloc_l4_table(struct page_info *page, int preemptible) { struct domain *d = page_get_owner(page); @@ -1358,15 +1370,7 @@ static int alloc_l4_table(struct page_in adjust_guest_l4e(pl4e[i], d); } - /* Xen private mappings. */ - memcpy(&pl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT], - &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT], - ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t)); - pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] = - l4e_from_pfn(pfn, __PAGE_HYPERVISOR); - pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] = - l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3), - __PAGE_HYPERVISOR); + init_guest_l4_table(pl4e, d); return rc > 0 ? 0 : rc; } --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -316,6 +316,8 @@ static inline void *__page_to_virt(const int free_page_type(struct page_info *page, unsigned long type, int preemptible); +void init_guest_l4_table(l4_pgentry_t[], const struct domain *); + int is_iomem_page(unsigned long mfn); void clear_superpage_mark(struct page_info *page);