[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5] boot allocator: Use arch helper for virt_to_mfn on DIRECTMAP_VIRT region
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx> On ARM platforms with NUMA, while initializing second memory node, panic is triggered from init_node_heap() when virt_to_mfn() is called for DIRECTMAP_VIRT region address because DIRECTMAP_VIRT region is not mapped to any virtual address. The check virt_to_mfn() here is used to know whether the max MFN is part of the direct mapping. The max MFN is found by calling virt_to_mfn on end address of DIRECTMAP_VIRT region, which is DIRECTMAP_VIRT_END. On ARM64, all RAM is currently direct mapped in Xen and virt_to_mfn uses the hardware for address translation. So if the virtual address is not mapped translation fault is raised. In this patch, instead of calling virt_to_mfn(), arch helper arch_mfn_in_directmap() is introduced. On ARM64 this arch helper will return true, because currently all RAM is direct mapped in Xen. On ARM32, Only a limited amount of RAM, called xenheap, is always mapped and DIRECTMAP_VIRT region is not mapped. Hence return false. For x86 this helper does virt_to_mfn. Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- v5: - Rewritten commit message. - Update comments - Dropped extra brackets --- xen/common/page_alloc.c | 9 ++------- xen/include/asm-arm/arm32/mm.h | 23 +++++++++++++++++++++++ xen/include/asm-arm/arm64/mm.h | 23 +++++++++++++++++++++++ xen/include/asm-arm/mm.h | 8 ++++++++ xen/include/asm-x86/mm.h | 11 +++++++++++ 5 files changed, 67 insertions(+), 7 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 68dba19..9e41fb4 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -520,9 +520,6 @@ static unsigned long init_node_heap(int node, unsigned long mfn, unsigned long needed = (sizeof(**_heap) + sizeof(**avail) * NR_ZONES + PAGE_SIZE - 1) >> PAGE_SHIFT; -#ifdef DIRECTMAP_VIRT_END - unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END); -#endif int i, j; if ( !first_node_initialised ) @@ -532,9 +529,8 @@ static unsigned long init_node_heap(int node, unsigned long mfn, first_node_initialised = 1; needed = 0; } -#ifdef DIRECTMAP_VIRT_END else if ( *use_tail && nr >= needed && - (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) && + arch_mfn_in_directmap(mfn + nr) && (!xenheap_bits || !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) ) { @@ -543,7 +539,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn, PAGE_SIZE - sizeof(**avail) * NR_ZONES; } else if ( nr >= needed && - (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) && + arch_mfn_in_directmap(mfn + needed) && (!xenheap_bits || !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) ) { @@ -552,7 +548,6 @@ static unsigned long init_node_heap(int node, unsigned long mfn, PAGE_SIZE - sizeof(**avail) * NR_ZONES; *use_tail = 0; } -#endif else if ( get_order_from_bytes(sizeof(**_heap)) == get_order_from_pages(needed) ) { diff --git a/xen/include/asm-arm/arm32/mm.h b/xen/include/asm-arm/arm32/mm.h new file mode 100644 index 0000000..6861249 --- /dev/null +++ b/xen/include/asm-arm/arm32/mm.h @@ -0,0 +1,23 @@ +#ifndef __ARM_ARM32_MM_H__ +#define __ARM_ARM32_MM_H__ + +/* + * Only a limited amount of RAM, called xenheap, is always mapped on ARM32. + * For convenience always return false. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + return false; +} + +#endif /* __ARM_ARM32_MM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/mm.h b/xen/include/asm-arm/arm64/mm.h new file mode 100644 index 0000000..d0a3be7 --- /dev/null +++ b/xen/include/asm-arm/arm64/mm.h @@ -0,0 +1,23 @@ +#ifndef __ARM_ARM64_MM_H__ +#define __ARM_ARM64_MM_H__ + +/* + * On ARM64, all the RAM is currently direct mapped in Xen. + * Hence return always true. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + return true; +} + +#endif /* __ARM_ARM64_MM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 4892155..0fef612 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -6,6 +6,14 @@ #include <public/xen.h> #include <xen/pdx.h> +#if defined(CONFIG_ARM_32) +# include <asm/arm32/mm.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/mm.h> +#else +# error "unknown ARM variant" +#endif + /* Align Xen to a 2 MiB boundary. */ #define XEN_PADDR_ALIGN (1 << 21) diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index e22603c..119d7de 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -602,4 +602,15 @@ extern const char zero_page[]; /* Build a 32bit PSE page table using 4MB pages. */ void write_32bit_pse_identmap(uint32_t *l2); +/* + * x86 maps part of physical memory via the directmap region. + * Return whether the input MFN falls in that range. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END); + + return mfn <= (virt_to_mfn(eva - 1) + 1); +} + #endif /* __ASM_X86_MM_H__ */ -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |