[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH 58/84] x86/mm: fix leaks in map_xen_pagetable.
Hi,NIT: we don't usually add full stop at the end of the title. This applies for the rest of the series. On 9/26/19 10:46 AM, hongyax@xxxxxxxxxx wrote: From: Hongyan Xia <hongyax@xxxxxxxxxx> Not unmapping pages after map_xen_pagetable can leak the virtual address space over time. Also this fix makes vmap_to_mfn non-trivial to be a macro. There might be better options but move it into vmap.c for now. Signed-off-by: Hongyan Xia <hongyax@xxxxxxxxxx> --- xen/arch/x86/mm.c | 5 +---- xen/common/vmap.c | 13 +++++++++++++ xen/include/asm-arm/mm.h | 2 -- xen/include/asm-x86/page.h | 2 -- xen/include/xen/vmap.h | 3 +++ 5 files changed, 17 insertions(+), 8 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index b2b2edbed1..145c5ab47c 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5160,6 +5160,7 @@ int map_pages_to_xen( !(l2e_get_flags(ol2e) & _PAGE_PSE) ) free_xen_pagetable(l2e_get_mfn(ol2e)); } + UNMAP_XEN_PAGETABLE(l2t); free_xen_pagetable(l2t_mfn); } } @@ -5225,7 +5226,6 @@ int map_pages_to_xen( l3e_write_atomic(pl3e, l3e_from_mfn(l2t_mfn, __PAGE_HYPERVISOR)); UNMAP_XEN_PAGETABLE(l2t); - l2t = NULL; } if ( locking ) spin_unlock(&map_pgdir_lock); @@ -5346,7 +5346,6 @@ int map_pages_to_xen( l2e_write_atomic(pl2e, l2e_from_mfn(l1t_mfn, __PAGE_HYPERVISOR)); UNMAP_XEN_PAGETABLE(l1t); - l1t = NULL; } if ( locking ) spin_unlock(&map_pgdir_lock); @@ -5589,7 +5588,6 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) { l3e_write_atomic(pl3e, l3e_from_mfn(mfn, __PAGE_HYPERVISOR)); UNMAP_XEN_PAGETABLE(l2t); - l2t = NULL; } if ( locking ) spin_unlock(&map_pgdir_lock); @@ -5657,7 +5655,6 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) l2e_write_atomic(pl2e, l2e_from_mfn(mfn, __PAGE_HYPERVISOR)); UNMAP_XEN_PAGETABLE(l1t); - l1t = NULL; } if ( locking ) spin_unlock(&map_pgdir_lock); diff --git a/xen/common/vmap.c b/xen/common/vmap.c index faebc1ddf1..fcdb8495c8 100644 --- a/xen/common/vmap.c +++ b/xen/common/vmap.c @@ -19,6 +19,19 @@ static unsigned int __read_mostly vm_end[VMAP_REGION_NR]; /* lowest known clear bit in the bitmap */ static unsigned int vm_low[VMAP_REGION_NR];+mfn_t vmap_to_mfn(void *va)+{ + l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va)); + mfn_t ret = _mfn(l1e_get_pfn(*pl1e)); We already + unmap_xen_pagetable(pl1e); My knowledge of the x86 port is quite limited, but this feels suspicious to see an unmap in a function call vmap_to_mfn(). Where does is the map done? Furthermore, this is not going to compile on Arm. You probably want to implement this function in x86 code and keep the current implementation for Arm. + return ret; +} + +struct page_info *vmap_to_page(void *va) +{ + return mfn_to_page(vmap_to_mfn(va)); +} This is the exact same implementation as in mm.h. So what's the advantage of this? + void __init vm_init_type(enum vmap_region type, void *start, void *end) { unsigned int i, nr; diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 262d92f18d..1b53429255 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -231,8 +231,6 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len) #define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) #define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) #define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) -#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) -#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))/* Page-align address and convert to frame number format */#define paddr_to_pfn_aligned(paddr) paddr_to_pfn(PAGE_ALIGN(paddr)) diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h index 906ec701a3..191de86bff 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -266,8 +266,6 @@ void copy_page_sse2(void *, const void *); #define pfn_to_paddr(pfn) __pfn_to_paddr(pfn) #define paddr_to_pfn(pa) __paddr_to_pfn(pa) #define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa)) -#define vmap_to_mfn(va) _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))) -#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))#endif /* !defined(__ASSEMBLY__) */ diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.hindex 369560e620..3d69727a9d 100644 --- a/xen/include/xen/vmap.h +++ b/xen/include/xen/vmap.h @@ -23,6 +23,9 @@ void *vmalloc_xen(size_t size); void *vzalloc(size_t size); void vfree(void *va);+mfn_t vmap_to_mfn(void *va);+struct page_info *vmap_to_page(void *va); + void __iomem *ioremap(paddr_t, size_t);static inline void iounmap(void __iomem *va) Cheers, -- Julien Grall _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |