[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-ia64-devel] Modify to introduce delayed p2m table destruction
Hi all, We've modified to introduce delayed p2m table destruction like x86 code. This modification can avoid the hypervisor crash during destrcuting a domain with copy receiver of VNIF. Our modification has: * split relinquish_mm with two parts + shadow_teardown() - renamed from relinquish_mm. - removed codes for p2m table destrcution. - called at domain_kill phase + shadow_final_teardown() - includes codes for p2m table destruction. - called at domain_destruct phase * remove preliminary code to avoid hypervisor crash. + gmfn_to_mfn_foreign() - in current code, it includes preliminary code to avoid hypervisor crash. - this modification has made redundant it. In our test, it successes to avoid hypervisor crash during destructing domains with PV-on-HVM VNIF (it uses copy receiver). Thanks, - Tsunehisa Doi # HG changeset patch # User Doi.Tsunehisa@xxxxxxxxxxxxxx # Node ID a0fa08f7e778a21949e560e631f22365b19a4e4c # Parent 9c649ca5c1ccf745ec4c22944475ae2d9322b30d Modify to introduce delayed p2m table destruction Signed-off-by: Hirofumi Tsujimura <tsujimura.hirof@xxxxxxxxxxxxxx> Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx> diff -r 9c649ca5c1cc -r a0fa08f7e778 xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Tue Oct 31 22:25:40 2006 -0700 +++ b/xen/arch/ia64/xen/domain.c Thu Nov 02 17:56:58 2006 +0900 @@ -456,11 +456,10 @@ fail_nomem1: void arch_domain_destroy(struct domain *d) { - BUG_ON(d->arch.mm.pgd != NULL); + shadow_final_teardown(d); + if (d->shared_info != NULL) free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT)); - if (d->arch.shadow_bitmap != NULL) - xfree(d->arch.shadow_bitmap); tlb_track_destroy(d); @@ -607,14 +606,14 @@ static void relinquish_memory(struct dom void domain_relinquish_resources(struct domain *d) { - /* Relinquish every page of memory. */ - - // relase page traversing d->arch.mm. - relinquish_mm(d); - + /* Relinquish guest resources for VT-i domain. */ if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0])) vmx_relinquish_guest_resources(d); + /* Tear down shadow mode stuff. */ + shadow_teardown(d); + + /* Relinquish every page of memory. */ relinquish_memory(d, &d->xenpage_list); relinquish_memory(d, &d->page_list); diff -r 9c649ca5c1cc -r a0fa08f7e778 xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Tue Oct 31 22:25:40 2006 -0700 +++ b/xen/arch/ia64/xen/mm.c Thu Nov 02 17:56:58 2006 +0900 @@ -247,7 +247,7 @@ try_to_clear_PGC_allocate(struct domain* } static void -relinquish_pte(struct domain* d, pte_t* pte) +shadow_teardown_pte(struct domain* d, pte_t* pte) { unsigned long mfn = pte_pfn(*pte); struct page_info* page; @@ -280,7 +280,7 @@ relinquish_pte(struct domain* d, pte_t* } static void -relinquish_pmd(struct domain* d, pmd_t* pmd, unsigned long offset) +shadow_teardown_pmd(struct domain* d, pmd_t* pmd, unsigned long offset) { unsigned long i; pte_t* pte = pte_offset_map(pmd, offset); @@ -288,14 +288,12 @@ relinquish_pmd(struct domain* d, pmd_t* for (i = 0; i < PTRS_PER_PTE; i++, pte++) { if (!pte_present(*pte)) continue; - - relinquish_pte(d, pte); - } - pte_free_kernel(pte_offset_map(pmd, offset)); + shadow_teardown_pte(d, pte); + } } static void -relinquish_pud(struct domain* d, pud_t *pud, unsigned long offset) +shadow_teardown_pud(struct domain* d, pud_t *pud, unsigned long offset) { unsigned long i; pmd_t *pmd = pmd_offset(pud, offset); @@ -303,14 +301,12 @@ relinquish_pud(struct domain* d, pud_t * for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { if (!pmd_present(*pmd)) continue; - - relinquish_pmd(d, pmd, offset + (i << PMD_SHIFT)); - } - pmd_free(pmd_offset(pud, offset)); + shadow_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT)); + } } static void -relinquish_pgd(struct domain* d, pgd_t *pgd, unsigned long offset) +shadow_teardown_pgd(struct domain* d, pgd_t *pgd, unsigned long offset) { unsigned long i; pud_t *pud = pud_offset(pgd, offset); @@ -318,14 +314,12 @@ relinquish_pgd(struct domain* d, pgd_t * for (i = 0; i < PTRS_PER_PUD; i++, pud++) { if (!pud_present(*pud)) continue; - - relinquish_pud(d, pud, offset + (i << PUD_SHIFT)); - } - pud_free(pud_offset(pgd, offset)); + shadow_teardown_pud(d, pud, offset + (i << PUD_SHIFT)); + } } void -relinquish_mm(struct domain* d) +shadow_teardown(struct domain* d) { struct mm_struct* mm = &d->arch.mm; unsigned long i; @@ -338,11 +332,72 @@ relinquish_mm(struct domain* d) for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { if (!pgd_present(*pgd)) continue; - - relinquish_pgd(d, pgd, i << PGDIR_SHIFT); + shadow_teardown_pgd(d, pgd, i << PGDIR_SHIFT); + } +} + +static void +shadow_p2m_teardown_pmd(struct domain* d, pmd_t* pmd, unsigned long offset) +{ + pte_free_kernel(pte_offset_map(pmd, offset)); +} + +static void +shadow_p2m_teardown_pud(struct domain* d, pud_t *pud, unsigned long offset) +{ + unsigned long i; + pmd_t *pmd = pmd_offset(pud, offset); + + for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { + if (!pmd_present(*pmd)) + continue; + shadow_p2m_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT)); + } + pmd_free(pmd_offset(pud, offset)); +} + +static void +shadow_p2m_teardown_pgd(struct domain* d, pgd_t *pgd, unsigned long offset) +{ + unsigned long i; + pud_t *pud = pud_offset(pgd, offset); + + for (i = 0; i < PTRS_PER_PUD; i++, pud++) { + if (!pud_present(*pud)) + continue; + shadow_p2m_teardown_pud(d, pud, offset + (i << PUD_SHIFT)); + } + pud_free(pud_offset(pgd, offset)); +} + +static void +shadow_p2m_teardown(struct domain* d) +{ + struct mm_struct* mm = &d->arch.mm; + unsigned long i; + pgd_t* pgd; + + if (mm->pgd == NULL) + return; + + pgd = pgd_offset(mm, 0); + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { + if (!pgd_present(*pgd)) + continue; + shadow_p2m_teardown_pgd(d, pgd, i << PGDIR_SHIFT); } pgd_free(mm->pgd); mm->pgd = NULL; +} + +void +shadow_final_teardown(struct domain* d) +{ + if (d->arch.shadow_bitmap != NULL) { + xfree(d->arch.shadow_bitmap); + d->arch.shadow_bitmap = NULL; + } + shadow_p2m_teardown(d); } // stolen from share_xen_page_with_guest() in xen/arch/x86/mm.c @@ -398,13 +453,6 @@ gmfn_to_mfn_foreign(struct domain *d, un { unsigned long pte; - // This function may be called from __gnttab_copy() - // during domain destruction with VNIF copy receiver. - // ** FIXME: This is not SMP-safe yet about p2m table. ** - if (unlikely(d->arch.mm.pgd == NULL)) { - BUG(); - return INVALID_MFN; - } pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL); if (!pte) { panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n"); @@ -1300,7 +1348,7 @@ expose_p2m_page(struct domain* d, unsign // pte page is allocated form xen heap.(see pte_alloc_one_kernel().) // so that the page has NULL page owner and it's reference count // is useless. - // see also relinquish_pte()'s page_get_owner() == NULL check. + // see also shadow_teardown_pte()'s page_get_owner() == NULL check. BUG_ON(page_get_owner(page) != NULL); return __assign_domain_page(d, mpaddr, page_to_maddr(page), diff -r 9c649ca5c1cc -r a0fa08f7e778 xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Tue Oct 31 22:25:40 2006 -0700 +++ b/xen/include/asm-ia64/mm.h Thu Nov 02 17:56:58 2006 +0900 @@ -422,7 +422,8 @@ extern int nr_swap_pages; extern int nr_swap_pages; extern void alloc_dom_xen_and_dom_io(void); -extern void relinquish_mm(struct domain* d); +extern void shadow_teardown(struct domain* d); +extern void shadow_final_teardown(struct domain* d); extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr); extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr); extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags); _______________________________________________ Xen-ia64-devel mailing list Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-ia64-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |