[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] map_p2m_entry only needs gpfn and gmfn as input parameters.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Node ID b42b80403ddd93f42146ffddf190b56a86f54f69 # Parent 5fa2cd68d059dd32d6c23f196ec6eae3c1690ad6 map_p2m_entry only needs gpfn and gmfn as input parameters. Current map_p2m_entry has a redundant input parameter 'va', this patch removes it. Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx> --- xen/arch/x86/shadow32.c | 24 ++++++++---------------- xen/arch/x86/shadow_public.c | 17 +++++++---------- 2 files changed, 15 insertions(+), 26 deletions(-) diff -r 5fa2cd68d059 -r b42b80403ddd xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Thu Jul 27 13:17:17 2006 +0100 +++ b/xen/arch/x86/shadow32.c Thu Jul 27 13:17:58 2006 +0100 @@ -835,12 +835,12 @@ void free_monitor_pagetable(struct vcpu } static int -map_p2m_entry(l1_pgentry_t *l1tab, unsigned long va, - unsigned long gpa, unsigned long mfn) +map_p2m_entry(l1_pgentry_t *l1tab, unsigned long gpfn, unsigned long mfn) { unsigned long *l0tab = NULL; l1_pgentry_t l1e = { 0 }; struct page_info *page; + unsigned long va = RO_MPT_VIRT_START + (gpfn * sizeof(mfn)); l1e = l1tab[l1_table_offset(va)]; if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) @@ -858,7 +858,7 @@ map_p2m_entry(l1_pgentry_t *l1tab, unsig else l0tab = map_domain_page(l1e_get_pfn(l1e)); - l0tab[gpa & ((PAGE_SIZE / sizeof(mfn)) - 1)] = mfn; + l0tab[gpfn & ((PAGE_SIZE / sizeof(mfn)) - 1)] = mfn; unmap_domain_page(l0tab); @@ -877,15 +877,9 @@ set_p2m_entry(struct domain *d, unsigned unsigned long va = pfn << PAGE_SHIFT; if ( shadow_mode_external(d) ) - { tabpfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table); - va = RO_MPT_VIRT_START + (pfn * sizeof (unsigned long)); - } else - { tabpfn = pagetable_get_pfn(d->arch.phys_table); - va = pfn << PAGE_SHIFT; - } ASSERT(tabpfn != 0); ASSERT(shadow_lock_is_acquired(d)); @@ -902,12 +896,12 @@ set_p2m_entry(struct domain *d, unsigned l1_pgentry_t *l1tab = NULL; l2_pgentry_t l2e; - l2e = l2[l2_table_offset(va)]; + l2e = l2[l2_table_offset(RO_MPT_VIRT_START)]; ASSERT( l2e_get_flags(l2e) & _PAGE_PRESENT ); l1tab = map_domain_page(l2e_get_pfn(l2e)); - if ( !(error = map_p2m_entry(l1tab, va, pfn, mfn)) ) + if ( !(error = map_p2m_entry(l1tab, pfn, mfn)) ) domain_crash(d); unmap_domain_page(l1tab); @@ -952,7 +946,6 @@ alloc_p2m_table(struct domain *d) alloc_p2m_table(struct domain *d) { struct list_head *list_ent; - unsigned long va = RO_MPT_VIRT_START; /* phys_to_machine_mapping */ l2_pgentry_t *l2tab = NULL; l1_pgentry_t *l1tab = NULL; @@ -965,14 +958,14 @@ alloc_p2m_table(struct domain *d) { l2tab = map_domain_page( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table)); - l2e = l2tab[l2_table_offset(va)]; + l2e = l2tab[l2_table_offset(RO_MPT_VIRT_START)]; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) { page = alloc_domheap_page(NULL); l1tab = map_domain_page(page_to_mfn(page)); memset(l1tab, 0, PAGE_SIZE); - l2e = l2tab[l2_table_offset(va)] = + l2e = l2tab[l2_table_offset(RO_MPT_VIRT_START)] = l2e_from_page(page, __PAGE_HYPERVISOR); } else @@ -1002,14 +995,13 @@ alloc_p2m_table(struct domain *d) page = list_entry(list_ent, struct page_info, list); mfn = page_to_mfn(page); - if ( !(error = map_p2m_entry(l1tab, va, gpfn, mfn)) ) + if ( !(error = map_p2m_entry(l1tab, gpfn, mfn)) ) { domain_crash(d); break; } list_ent = frame_table[mfn].list.next; - va += sizeof(mfn); } unmap_domain_page(l1tab); diff -r 5fa2cd68d059 -r b42b80403ddd xen/arch/x86/shadow_public.c --- a/xen/arch/x86/shadow_public.c Thu Jul 27 13:17:17 2006 +0100 +++ b/xen/arch/x86/shadow_public.c Thu Jul 27 13:17:58 2006 +0100 @@ -1471,8 +1471,7 @@ int _shadow_mode_refcounts(struct domain } static int -map_p2m_entry(pgentry_64_t *top_tab, unsigned long va, - unsigned long gpfn, unsigned long mfn) +map_p2m_entry(pgentry_64_t *top_tab, unsigned long gpfn, unsigned long mfn) { #if CONFIG_PAGING_LEVELS >= 4 pgentry_64_t l4e = { 0 }; @@ -1487,6 +1486,7 @@ map_p2m_entry(pgentry_64_t *top_tab, uns l2_pgentry_t l2e = { 0 }; l1_pgentry_t l1e = { 0 }; struct page_info *page; + unsigned long va = RO_MPT_VIRT_START + (gpfn * sizeof(mfn)); #if CONFIG_PAGING_LEVELS >= 4 l4e = top_tab[l4_table_offset(va)]; @@ -1568,7 +1568,7 @@ map_p2m_entry(pgentry_64_t *top_tab, uns unmap_domain_page(l1tab); - l0tab[gpfn & ((PAGE_SIZE / sizeof (mfn)) - 1) ] = mfn; + l0tab[gpfn & ((PAGE_SIZE / sizeof(mfn)) - 1)] = mfn; unmap_domain_page(l0tab); @@ -1584,7 +1584,6 @@ set_p2m_entry(struct domain *d, unsigned struct domain_mmap_cache *l1cache) { unsigned long tabmfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table); - unsigned long va = RO_MPT_VIRT_START + (gpfn * sizeof(unsigned long)); pgentry_64_t *top_tab; int error; @@ -1593,7 +1592,7 @@ set_p2m_entry(struct domain *d, unsigned top_tab = map_domain_page_with_cache(tabmfn, l2cache); - if ( !(error = map_p2m_entry(top_tab, va, gpfn, mfn)) ) + if ( !(error = map_p2m_entry(top_tab, gpfn, mfn)) ) domain_crash(d); unmap_domain_page_with_cache(top_tab, l2cache); @@ -1605,10 +1604,9 @@ alloc_p2m_table(struct domain *d) alloc_p2m_table(struct domain *d) { struct list_head *list_ent; - unsigned long va = RO_MPT_VIRT_START; /* phys_to_machine_mapping */ pgentry_64_t *top_tab = NULL; - unsigned long mfn; - int gpfn, error = 0; + unsigned long gpfn, mfn; + int error = 0; ASSERT( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) ); @@ -1624,14 +1622,13 @@ alloc_p2m_table(struct domain *d) page = list_entry(list_ent, struct page_info, list); mfn = page_to_mfn(page); - if ( !(error = map_p2m_entry(top_tab, va, gpfn, mfn)) ) + if ( !(error = map_p2m_entry(top_tab, gpfn, mfn)) ) { domain_crash(d); break; } list_ent = frame_table[mfn].list.next; - va += sizeof(mfn); } unmap_domain_page(top_tab); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |