[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC 0 PATCH 2/3] PVH dom0: move some pv specific code to static functions
In this preparatory patch also, some pv specific code is carved out into static functions. No functionality change. Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> --- xen/arch/x86/domain_build.c | 358 +++++++++++++++++++++++------------------- 1 files changed, 196 insertions(+), 162 deletions(-) diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c index 232adf8..5125aa2 100644 --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -307,6 +307,197 @@ static void __init process_dom0_ioports_disable(void) } } +static __init void mark_pv_pt_pages_rdonly(struct domain *d, + l4_pgentry_t *l4start, + unsigned long vpt_start, + unsigned long nr_pt_pages) +{ + unsigned long count; + struct page_info *page; + l4_pgentry_t *l4tab; + l3_pgentry_t *l3tab, *l3start; + l2_pgentry_t *l2tab, *l2start; + l1_pgentry_t *l1tab, *l1start; + + /* Pages that are part of page tables must be read only. */ + l4tab = l4start + l4_table_offset(vpt_start); + l3start = l3tab = l4e_to_l3e(*l4tab); + l3tab += l3_table_offset(vpt_start); + l2start = l2tab = l3e_to_l2e(*l3tab); + l2tab += l2_table_offset(vpt_start); + l1start = l1tab = l2e_to_l1e(*l2tab); + l1tab += l1_table_offset(vpt_start); + for ( count = 0; count < nr_pt_pages; count++ ) + { + l1e_remove_flags(*l1tab, _PAGE_RW); + page = mfn_to_page(l1e_get_pfn(*l1tab)); + + /* Read-only mapping + PGC_allocated + page-table page. */ + page->count_info = PGC_allocated | 3; + page->u.inuse.type_info |= PGT_validated | 1; + + /* Top-level p.t. is pinned. */ + if ( (page->u.inuse.type_info & PGT_type_mask) == + (!is_pv_32on64_domain(d) ? + PGT_l4_page_table : PGT_l3_page_table) ) + { + page->count_info += 1; + page->u.inuse.type_info += 1 | PGT_pinned; + } + + /* Iterate. */ + if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) ) + { + if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) ) + { + if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) ) + l3start = l3tab = l4e_to_l3e(*++l4tab); + l2start = l2tab = l3e_to_l2e(*l3tab); + } + l1start = l1tab = l2e_to_l1e(*l2tab); + } + } +} + +static __init void setup_pv_p2m_table( + struct domain *d, struct vcpu *v, struct elf_dom_parms *parms, + unsigned long v_start, unsigned long vphysmap_start, + unsigned long vphysmap_end, unsigned long v_end, unsigned long nr_pages) +{ + struct page_info *page = NULL; + l4_pgentry_t *l4tab = NULL, *l4start = NULL; + l3_pgentry_t *l3tab = NULL; + l2_pgentry_t *l2tab = NULL; + l1_pgentry_t *l1tab = NULL; + + l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table)); + l3tab = NULL; + l2tab = NULL; + l1tab = NULL; + + /* Set up the phys->machine table if not part of the initial mapping. */ + if ( parms->p2m_base != UNSET_ADDR ) + { + unsigned long va = vphysmap_start; + + if ( v_start <= vphysmap_end && vphysmap_start <= v_end ) + panic("DOM0 P->M table overlaps initial mapping"); + + while ( va < vphysmap_end ) + { + if ( d->tot_pages + ((round_pgup(vphysmap_end) - va) + >> PAGE_SHIFT) + 3 > nr_pages ) + panic("Dom0 allocation too small for initial P->M table.\n"); + + if ( l1tab ) + { + unmap_domain_page(l1tab); + l1tab = NULL; + } + if ( l2tab ) + { + unmap_domain_page(l2tab); + l2tab = NULL; + } + if ( l3tab ) + { + unmap_domain_page(l3tab); + l3tab = NULL; + } + l4tab = l4start + l4_table_offset(va); + if ( !l4e_get_intpte(*l4tab) ) + { + page = alloc_domheap_page(d, 0); + if ( !page ) + break; + /* No mapping, PGC_allocated + page-table page. */ + page->count_info = PGC_allocated | 2; + page->u.inuse.type_info = + PGT_l3_page_table | PGT_validated | 1; + l3tab = __map_domain_page(page); + clear_page(l3tab); + *l4tab = l4e_from_page(page, L4_PROT); + } else + l3tab = map_domain_page(l4e_get_pfn(*l4tab)); + l3tab += l3_table_offset(va); + if ( !l3e_get_intpte(*l3tab) ) + { + if ( cpu_has_page1gb && + !(va & ((1UL << L3_PAGETABLE_SHIFT) - 1)) && + vphysmap_end >= va + (1UL << L3_PAGETABLE_SHIFT) && + (page = alloc_domheap_pages(d, + L3_PAGETABLE_SHIFT - + PAGE_SHIFT, + 0)) != NULL ) + { + *l3tab = l3e_from_page(page, + L1_PROT|_PAGE_DIRTY|_PAGE_PSE); + va += 1UL << L3_PAGETABLE_SHIFT; + continue; + } + if ( (page = alloc_domheap_page(d, 0)) == NULL ) + break; + /* No mapping, PGC_allocated + page-table page. */ + page->count_info = PGC_allocated | 2; + page->u.inuse.type_info = + PGT_l2_page_table | PGT_validated | 1; + l2tab = __map_domain_page(page); + clear_page(l2tab); + *l3tab = l3e_from_page(page, L3_PROT); + } + else + l2tab = map_domain_page(l3e_get_pfn(*l3tab)); + l2tab += l2_table_offset(va); + if ( !l2e_get_intpte(*l2tab) ) + { + if ( !(va & ((1UL << L2_PAGETABLE_SHIFT) - 1)) && + vphysmap_end >= va + (1UL << L2_PAGETABLE_SHIFT) && + (page = alloc_domheap_pages(d, + L2_PAGETABLE_SHIFT - + PAGE_SHIFT, + 0)) != NULL ) + { + *l2tab = l2e_from_page(page, + L1_PROT|_PAGE_DIRTY|_PAGE_PSE); + if ( opt_allow_superpage ) + get_superpage(page_to_mfn(page), d); + va += 1UL << L2_PAGETABLE_SHIFT; + continue; + } + if ( (page = alloc_domheap_page(d, 0)) == NULL ) + break; + /* No mapping, PGC_allocated + page-table page. */ + page->count_info = PGC_allocated | 2; + page->u.inuse.type_info = + PGT_l1_page_table | PGT_validated | 1; + l1tab = __map_domain_page(page); + clear_page(l1tab); + *l2tab = l2e_from_page(page, L2_PROT); + } + else + l1tab = map_domain_page(l2e_get_pfn(*l2tab)); + l1tab += l1_table_offset(va); + BUG_ON(l1e_get_intpte(*l1tab)); + page = alloc_domheap_page(d, 0); + if ( !page ) + break; + *l1tab = l1e_from_page(page, L1_PROT|_PAGE_DIRTY); + va += PAGE_SIZE; + va &= PAGE_MASK; + } + if ( !page ) + panic("Not enough RAM for DOM0 P->M table.\n"); + } + + if ( l1tab ) + unmap_domain_page(l1tab); + if ( l2tab ) + unmap_domain_page(l2tab); + if ( l3tab ) + unmap_domain_page(l3tab); + unmap_domain_page(l4start); +} + int __init construct_dom0( struct domain *d, const module_t *image, unsigned long image_headroom, @@ -705,44 +896,8 @@ int __init construct_dom0( COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2tab)); } - /* Pages that are part of page tables must be read only. */ - l4tab = l4start + l4_table_offset(vpt_start); - l3start = l3tab = l4e_to_l3e(*l4tab); - l3tab += l3_table_offset(vpt_start); - l2start = l2tab = l3e_to_l2e(*l3tab); - l2tab += l2_table_offset(vpt_start); - l1start = l1tab = l2e_to_l1e(*l2tab); - l1tab += l1_table_offset(vpt_start); - for ( count = 0; count < nr_pt_pages; count++ ) - { - l1e_remove_flags(*l1tab, _PAGE_RW); - page = mfn_to_page(l1e_get_pfn(*l1tab)); - - /* Read-only mapping + PGC_allocated + page-table page. */ - page->count_info = PGC_allocated | 3; - page->u.inuse.type_info |= PGT_validated | 1; - - /* Top-level p.t. is pinned. */ - if ( (page->u.inuse.type_info & PGT_type_mask) == - (!is_pv_32on64_domain(d) ? - PGT_l4_page_table : PGT_l3_page_table) ) - { - page->count_info += 1; - page->u.inuse.type_info += 1 | PGT_pinned; - } - - /* Iterate. */ - if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) ) - { - if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) ) - { - if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) ) - l3start = l3tab = l4e_to_l3e(*++l4tab); - l2start = l2tab = l3e_to_l2e(*l3tab); - } - l1start = l1tab = l2e_to_l1e(*l2tab); - } - } + if ( is_pv_domain(d) ) + mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages); /* Mask all upcalls... */ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ ) @@ -814,131 +969,10 @@ int __init construct_dom0( elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : ""); count = d->tot_pages; - l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table)); - l3tab = NULL; - l2tab = NULL; - l1tab = NULL; - /* Set up the phys->machine table if not part of the initial mapping. */ - if ( parms.p2m_base != UNSET_ADDR ) - { - unsigned long va = vphysmap_start; - if ( v_start <= vphysmap_end && vphysmap_start <= v_end ) - panic("DOM0 P->M table overlaps initial mapping"); - - while ( va < vphysmap_end ) - { - if ( d->tot_pages + ((round_pgup(vphysmap_end) - va) - >> PAGE_SHIFT) + 3 > nr_pages ) - panic("Dom0 allocation too small for initial P->M table.\n"); - - if ( l1tab ) - { - unmap_domain_page(l1tab); - l1tab = NULL; - } - if ( l2tab ) - { - unmap_domain_page(l2tab); - l2tab = NULL; - } - if ( l3tab ) - { - unmap_domain_page(l3tab); - l3tab = NULL; - } - l4tab = l4start + l4_table_offset(va); - if ( !l4e_get_intpte(*l4tab) ) - { - page = alloc_domheap_page(d, 0); - if ( !page ) - break; - /* No mapping, PGC_allocated + page-table page. */ - page->count_info = PGC_allocated | 2; - page->u.inuse.type_info = - PGT_l3_page_table | PGT_validated | 1; - l3tab = __map_domain_page(page); - clear_page(l3tab); - *l4tab = l4e_from_page(page, L4_PROT); - } else - l3tab = map_domain_page(l4e_get_pfn(*l4tab)); - l3tab += l3_table_offset(va); - if ( !l3e_get_intpte(*l3tab) ) - { - if ( cpu_has_page1gb && - !(va & ((1UL << L3_PAGETABLE_SHIFT) - 1)) && - vphysmap_end >= va + (1UL << L3_PAGETABLE_SHIFT) && - (page = alloc_domheap_pages(d, - L3_PAGETABLE_SHIFT - - PAGE_SHIFT, - 0)) != NULL ) - { - *l3tab = l3e_from_page(page, - L1_PROT|_PAGE_DIRTY|_PAGE_PSE); - va += 1UL << L3_PAGETABLE_SHIFT; - continue; - } - if ( (page = alloc_domheap_page(d, 0)) == NULL ) - break; - /* No mapping, PGC_allocated + page-table page. */ - page->count_info = PGC_allocated | 2; - page->u.inuse.type_info = - PGT_l2_page_table | PGT_validated | 1; - l2tab = __map_domain_page(page); - clear_page(l2tab); - *l3tab = l3e_from_page(page, L3_PROT); - } - else - l2tab = map_domain_page(l3e_get_pfn(*l3tab)); - l2tab += l2_table_offset(va); - if ( !l2e_get_intpte(*l2tab) ) - { - if ( !(va & ((1UL << L2_PAGETABLE_SHIFT) - 1)) && - vphysmap_end >= va + (1UL << L2_PAGETABLE_SHIFT) && - (page = alloc_domheap_pages(d, - L2_PAGETABLE_SHIFT - - PAGE_SHIFT, - 0)) != NULL ) - { - *l2tab = l2e_from_page(page, - L1_PROT|_PAGE_DIRTY|_PAGE_PSE); - if ( opt_allow_superpage ) - get_superpage(page_to_mfn(page), d); - va += 1UL << L2_PAGETABLE_SHIFT; - continue; - } - if ( (page = alloc_domheap_page(d, 0)) == NULL ) - break; - /* No mapping, PGC_allocated + page-table page. */ - page->count_info = PGC_allocated | 2; - page->u.inuse.type_info = - PGT_l1_page_table | PGT_validated | 1; - l1tab = __map_domain_page(page); - clear_page(l1tab); - *l2tab = l2e_from_page(page, L2_PROT); - } - else - l1tab = map_domain_page(l2e_get_pfn(*l2tab)); - l1tab += l1_table_offset(va); - BUG_ON(l1e_get_intpte(*l1tab)); - page = alloc_domheap_page(d, 0); - if ( !page ) - break; - *l1tab = l1e_from_page(page, L1_PROT|_PAGE_DIRTY); - va += PAGE_SIZE; - va &= PAGE_MASK; - } - if ( !page ) - panic("Not enough RAM for DOM0 P->M table.\n"); - } - - if ( l1tab ) - unmap_domain_page(l1tab); - if ( l2tab ) - unmap_domain_page(l2tab); - if ( l3tab ) - unmap_domain_page(l3tab); - unmap_domain_page(l4start); + if ( is_pv_domain(d) ) + setup_pv_p2m_table(d, v, &parms, v_start, vphysmap_start, + vphysmap_end, v_end, nr_pages); /* Write the phys->machine and machine->phys table entries. */ for ( pfn = 0; pfn < count; pfn++ ) -- 1.7.2.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |