[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4 of 4] Expand use of get_page_from_gfn
xen/arch/x86/mm.c | 48 ++++++++++++++++++---------------------------- xen/common/memory.c | 9 +++++++- xen/common/tmem_xen.c | 26 +++++++++--------------- xen/include/asm-x86/p2m.h | 11 ---------- xen/xsm/flask/hooks.c | 19 ++++++++++++++--- 5 files changed, 52 insertions(+), 61 deletions(-) Replace get_gfn* calls in common/memory.c, arch/x86/mm.c, xsm, and tmem. Fix bugs on xsm for get_gfn_untyped and get_page_from_gfn. Eliminate altogether get_gfn_untyped. Add appropriate ifdefe'ery in common code so that ARM isn't trapped. Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> diff -r 07fda1825c29 -r 8674ecae829c xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3731,18 +3731,17 @@ static int create_grant_pte_mapping( adjust_guest_l1e(nl1e, d); gmfn = pte_addr >> PAGE_SHIFT; - mfn = get_gfn_untyped(d, gmfn); - - if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) ) + page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); + + if ( unlikely(!page) ) { - put_gfn(d, gmfn); MEM_LOG("Could not get page for normal update"); return GNTST_general_error; } + mfn = page_to_mfn(page); va = map_domain_page(mfn); va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK)); - page = mfn_to_page(mfn); if ( !page_lock(page) ) { @@ -3773,7 +3772,6 @@ static int create_grant_pte_mapping( failed: unmap_domain_page(va); put_page(page); - put_gfn(d, gmfn); return rc; } @@ -3788,18 +3786,17 @@ static int destroy_grant_pte_mapping( l1_pgentry_t ol1e; gmfn = addr >> PAGE_SHIFT; - mfn = get_gfn_untyped(d, gmfn); - - if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) ) + page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); + + if ( unlikely(!page) ) { - put_gfn(d, gmfn); MEM_LOG("Could not get page for normal update"); return GNTST_general_error; } + mfn = page_to_mfn(page); va = map_domain_page(mfn); va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK)); - page = mfn_to_page(mfn); if ( !page_lock(page) ) { @@ -3844,7 +3841,6 @@ static int destroy_grant_pte_mapping( failed: unmap_domain_page(va); put_page(page); - put_gfn(d, gmfn); return rc; } @@ -4367,11 +4363,12 @@ long set_gdt(struct vcpu *v, /* Check the pages in the new GDT. */ for ( i = 0; i < nr_pages; i++ ) { + struct page_info *page; pfns[i] = frames[i]; - mfn = frames[i] = get_gfn_untyped(d, frames[i]); - if ( !mfn_valid(mfn) || - !get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page) ) + page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC); + if ( !page || !get_page_type(page, PGT_seg_desc_page) ) goto fail; + mfn = frames[i] = page_to_mfn(page); } /* Tear down the old GDT. */ @@ -4384,7 +4381,6 @@ long set_gdt(struct vcpu *v, v->arch.pv_vcpu.gdt_frames[i] = frames[i]; l1e_write(&v->arch.perdomain_ptes[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR)); - put_gfn(d, pfns[i]); } xfree(pfns); @@ -4394,7 +4390,6 @@ long set_gdt(struct vcpu *v, while ( i-- > 0 ) { put_page_and_type(mfn_to_page(frames[i])); - put_gfn(d, pfns[i]); } xfree(pfns); return -EINVAL; @@ -4440,21 +4435,16 @@ long do_update_descriptor(u64 pa, u64 de *(u64 *)&d = desc; - mfn = get_gfn_untyped(dom, gmfn); + page = get_page_from_gfn(dom, gmfn, NULL, P2M_ALLOC); if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) || - !mfn_valid(mfn) || + !page || !check_descriptor(dom, &d) ) { - put_gfn(dom, gmfn); + if ( page ) + put_page(page); return -EINVAL; } - - page = mfn_to_page(mfn); - if ( unlikely(!get_page(page, dom)) ) - { - put_gfn(dom, gmfn); - return -EINVAL; - } + mfn = page_to_mfn(page); /* Check if the given frame is in use in an unsafe context. */ switch ( page->u.inuse.type_info & PGT_type_mask ) @@ -4482,7 +4472,6 @@ long do_update_descriptor(u64 pa, u64 de out: put_page(page); - put_gfn(dom, gmfn); return ret; } @@ -4529,6 +4518,7 @@ static int xenmem_add_to_physmap_once( unsigned long gfn = 0; /* gcc ... */ unsigned long prev_mfn, mfn = 0, gpfn, idx; int rc; + p2m_type_t p2mt; switch ( xatp->space ) { @@ -4617,7 +4607,7 @@ static int xenmem_add_to_physmap_once( put_page(page); /* Remove previously mapped page if it was present. */ - prev_mfn = get_gfn_untyped(d, xatp->gpfn); + prev_mfn = mfn_x(get_gfn(d, xatp->gpfn, &p2mt)); if ( mfn_valid(prev_mfn) ) { if ( is_xen_heap_mfn(prev_mfn) ) diff -r 07fda1825c29 -r 8674ecae829c xen/common/memory.c --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -694,7 +694,14 @@ long do_memory_op(unsigned long cmd, XEN domain_lock(d); - mfn = get_gfn_untyped(d, xrfp.gpfn); +#ifdef CONFIG_X86 + { + p2m_type_t p2mt; + mfn = mfn_x(get_gfn(d, xrfp.gpfn, &p2mt)); + } +#else + mfn = gmfn_to_mfn(d, xrfp.gpfn); +#endif if ( mfn_valid(mfn) ) guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0); diff -r 07fda1825c29 -r 8674ecae829c xen/common/tmem_xen.c --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -107,30 +107,25 @@ static inline void cli_put_page(tmem_cli static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn, pfp_t **pcli_pfp, bool_t cli_write) { - unsigned long cli_mfn; p2m_type_t t; struct page_info *page; - int ret; - cli_mfn = mfn_x(get_gfn(current->domain, cmfn, &t)); - if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) ) + page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC); + if ( !page || t != p2m_ram_rw ) { - put_gfn(current->domain, (unsigned long) cmfn); - return NULL; + if ( page ) + put_page(page); } - page = mfn_to_page(cli_mfn); - if ( cli_write ) - ret = get_page_and_type(page, current->domain, PGT_writable_page); - else - ret = get_page(page, current->domain); - if ( !ret ) + + if ( cli_write && !get_page_type(page, PGT_writable_page) ) { - put_gfn(current->domain, (unsigned long) cmfn); + put_page(page); return NULL; } - *pcli_mfn = cli_mfn; + + *pcli_mfn = page_to_mfn(page); *pcli_pfp = (pfp_t *)page; - return map_domain_page(cli_mfn); + return map_domain_page(*pcli_mfn); } static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp, @@ -144,7 +139,6 @@ static inline void cli_put_page(tmem_cli else put_page((struct page_info *)cli_pfp); unmap_domain_page(cli_va); - put_gfn(current->domain, (unsigned long) cmfn); } #endif diff -r 07fda1825c29 -r 8674ecae829c xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -350,17 +350,6 @@ static inline mfn_t get_gfn_type(struct #define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \ P2M_ALLOC | P2M_UNSHARE) -/* Compatibility function exporting the old untyped interface */ -static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long gpfn) -{ - mfn_t mfn; - p2m_type_t t; - mfn = get_gfn(d, gpfn, &t); - if ( p2m_is_valid(t) ) - return mfn_x(mfn); - return INVALID_MFN; -} - /* Will release the p2m_lock for this gfn entry. */ void __put_gfn(struct p2m_domain *p2m, unsigned long gfn); diff -r 07fda1825c29 -r 8674ecae829c xen/xsm/flask/hooks.c --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -1318,7 +1318,7 @@ static int flask_mmu_normal_update(struc struct domain_security_struct *dsec; u32 fsid; struct avc_audit_data ad; - struct page_info *page; + struct page_info *page = NULL; if (d != t) rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__REMOTE_REMAP); @@ -1334,9 +1334,12 @@ static int flask_mmu_normal_update(struc map_perms |= MMU__MAP_WRITE; AVC_AUDIT_DATA_INIT(&ad, MEMORY); - page = get_page_from_gfn(f, l1e_get_pfn(l1e_from_intpte(fpte)), P2M_ALLOC); +#if CONFIG_X86 + page = get_page_from_gfn(f, l1e_get_pfn(l1e_from_intpte(fpte)), NULL, P2M_ALLOC); mfn = page ? page_to_mfn(page) : INVALID_MFN; - +#else + mfn = gmfn_to_mfn(f, l1e_get_pfn(l1e_from_intpte(fpte))); +#endif ad.sdom = d; ad.tdom = f; ad.memory.pte = fpte; @@ -1373,6 +1376,7 @@ static int flask_update_va_mapping(struc int rc = 0; u32 psid; u32 map_perms = MMU__MAP_READ; + struct page_info *page = NULL; unsigned long mfn; struct domain_security_struct *dsec; @@ -1384,8 +1388,15 @@ static int flask_update_va_mapping(struc dsec = d->ssid; - mfn = get_gfn_untyped(f, l1e_get_pfn(pte)); +#if CONFIG_X86 + page = get_page_from_gfn(f, l1e_get_pfn(pte), NULL, P2M_ALLOC); + mfn = (page) ? page_to_mfn(page) : INVALID_MFN; +#else + mfn = gmfn_to_mfn(f, l1e_get_pfn(pte)); +#endif rc = get_mfn_sid(mfn, &psid); + if ( page ) + put_page(page); if ( rc ) return rc; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |