x86: frame table related improvements - fix super page frame table setup for memory hotplug case (should create full table, or else the hotplug code would need to do the necessary table population) - simplify super page frame table setup (can re-use frame table setup code) - slightly streamline frame table setup code - fix (tighten) a BUG_ON() and an ASSERT() condition - fix spage <-> pdx conversion macros (they had no users so far, and hence no-one noticed how broken they were) Signed-off-by: Jan Beulich --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -182,28 +182,6 @@ static uint32_t base_disallow_mask; !is_hvm_domain(d)) ? \ L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS)) -static void __init init_spagetable(void) -{ - unsigned long s, start = SPAGETABLE_VIRT_START; - unsigned long end = SPAGETABLE_VIRT_END; - unsigned long step, mfn; - unsigned int max_entries; - - step = 1UL << PAGETABLE_ORDER; - max_entries = (max_pdx + ((1UL<> SUPERPAGE_ORDER; - end = start + (((max_entries * sizeof(*spage_table)) + - ((1UL< SPAGETABLE_VIRT_START); + + init_frametable_chunk(spage_table, + mem_hotplug ? (void *)SPAGETABLE_VIRT_END + : pdx_to_spage(max_pdx - 1) + 1); } void __init init_frametable(void) { unsigned int sidx, eidx, nidx; unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT; + struct page_info *end_pg, *top_pg; - BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_END); + BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_START); BUILD_BUG_ON(FRAMETABLE_VIRT_START & ((1UL << L2_PAGETABLE_SHIFT) - 1)); for ( sidx = 0; ; sidx = nidx ) @@ -252,17 +240,13 @@ void __init init_frametable(void) init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT), pdx_to_page(eidx * PDX_GROUP_COUNT)); } - if ( !mem_hotplug ) - init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT), - pdx_to_page(max_pdx - 1) + 1); - else - { - init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT), - pdx_to_page(max_idx * PDX_GROUP_COUNT - 1) + 1); - memset(pdx_to_page(max_pdx), -1, - (unsigned long)pdx_to_page(max_idx * PDX_GROUP_COUNT) - - (unsigned long)pdx_to_page(max_pdx)); - } + + end_pg = pdx_to_page(max_pdx - 1) + 1; + top_pg = mem_hotplug ? pdx_to_page(max_idx * PDX_GROUP_COUNT - 1) + 1 + : end_pg; + init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT), top_pg); + memset(end_pg, -1, (unsigned long)top_pg - (unsigned long)end_pg); + if (opt_allow_superpage) init_spagetable(); } --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -301,7 +301,7 @@ static inline struct page_info *__virt_t static inline void *__page_to_virt(const struct page_info *pg) { - ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_VIRT_END); + ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_SIZE); /* * (sizeof(*pg) & -sizeof(*pg)) selects the LS bit of sizeof(*pg). The * division and re-multiplication avoids one shift when sizeof(*pg) is a --- a/xen/include/asm-x86/x86_64/page.h +++ b/xen/include/asm-x86/x86_64/page.h @@ -46,8 +46,8 @@ extern void pfn_pdx_hole_setup(unsigned #define page_to_pdx(pg) ((pg) - frame_table) #define pdx_to_page(pdx) (frame_table + (pdx)) -#define spage_to_pdx(spg) ((spg>>(SUPERPAGE_SHIFT-PAGE_SHIFT)) - spage_table) -#define pdx_to_spage(pdx) (spage_table + ((pdx)<<(SUPERPAGE_SHIFT-PAGE_SHIFT))) +#define spage_to_pdx(spg) (((spg) - spage_table)<<(SUPERPAGE_SHIFT-PAGE_SHIFT)) +#define pdx_to_spage(pdx) (spage_table + ((pdx)>>(SUPERPAGE_SHIFT-PAGE_SHIFT))) /* * Note: These are solely for the use by page_{get,set}_owner(), and * therefore don't need to handle the XEN_VIRT_{START,END} range.