[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6/7] xen/mmu: Copy and revector the P2M tree.



Please first read the description in "xen/p2m: Add logic to revector a
P2M tree to use __va leafs" patch.

The 'xen_revector_p2m_tree()' function allocates a new P2M tree
copies the contents of the old one in it, and returns the new one.

At this stage, the __ka address space (which is what the old
P2M tree was using) is partially disassembled. The cleanup_highmap
has removed the PMD entries from 0-16MB and anything past _brk_end
up to the max_pfn_mapped (which is the end of the ramdisk).

We have revectored the P2M tree (and the one for save/restore as well)
to use new shiny __va address to new MFNs. The xen_start_info
has been taken care of already in 'xen_setup_kernel_pagetable()' and
xen_start_info->shared_info in 'xen_setup_shared_info()', so
we are free to roam and delete PMD entries - which is exactly what
we are going to do. We rip out the __ka for the old P2M array.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 arch/x86/xen/mmu.c |   57 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 57 insertions(+), 0 deletions(-)

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7f54b75..05e8492 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1183,9 +1183,64 @@ static __init void xen_mapping_pagetable_reserve(u64 
start, u64 end)
 
 static void xen_post_allocator_init(void);
 
+#ifdef CONFIG_X86_64
+void __init xen_cleanhighmap(unsigned long vaddr, unsigned long vaddr_end)
+{
+       unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 
1;
+       pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
+
+       /* NOTE: The loop is more greedy than the cleanup_highmap variant.
+        * We include the PMD passed in on _both_ boundaries. */
+       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+                       pmd++, vaddr += PMD_SIZE) {
+               if (pmd_none(*pmd))
+                       continue;
+               if (vaddr < (unsigned long) _text || vaddr > kernel_end)
+                       set_pmd(pmd, __pmd(0));
+       }
+       /* In case we did something silly, we should crash in this function
+        * instead of somewhere later and be confusing. */
+       xen_mc_flush();
+}
+#else
+void __init xen_cleanhighmap(unsigned long vaddr, unsigned long vaddr_end)
+{
+}
+#endif
 static void __init xen_pagetable_setup_done(pgd_t *base)
 {
+       unsigned long size;
+       unsigned long addr;
+
        xen_setup_shared_info();
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               unsigned long new_mfn_list;
+
+               size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned 
long));
+
+               new_mfn_list = xen_revector_p2m_tree();
+
+               /* On 32-bit, we get zero so this never gets executed. */
+               if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
+                       /* using __ka address! */
+                       memset((void *)xen_start_info->mfn_list, 0, size);
+
+                       /* We should be in __ka space. */
+                       BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
+                       addr = xen_start_info->mfn_list;
+                       size = PAGE_ALIGN(xen_start_info->nr_pages * 
sizeof(unsigned long));
+                       /* We roundup to the PMD, which means that if anybody 
at this stage is
+                        * using the __ka address of xen_start_info or 
xen_start_info->shared_info
+                        * they are in going to crash. Fortunatly we have 
already revectored
+                        * in xen_setup_kernel_pagetable and in 
xen_setup_shared_info. */
+                       size = roundup(size, PMD_SIZE);
+                       xen_cleanhighmap(addr, addr + size);
+
+                       memblock_free(__pa(xen_start_info->mfn_list), size);
+                       /* And revector! Bye bye old array */
+                       xen_start_info->mfn_list = new_mfn_list;
+               }
+       }
        xen_post_allocator_init();
 }
 
@@ -1823,6 +1878,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, 
unsigned long max_pfn)
        }
        /* Our (by three pages) smaller Xen pagetable that we are using */
        memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
+       /* Revector the xen_start_info */
+       xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
 }
 #else  /* !CONFIG_X86_64 */
 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
-- 
1.7.7.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.