[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] x86: construct static parts of 1:1 mapping at build time


  • To: "xen-devel" <xen-devel@xxxxxxxxxxxxx>
  • From: "Jan Beulich" <JBeulich@xxxxxxxx>
  • Date: Tue, 11 Sep 2012 12:36:41 +0100
  • Delivery-date: Tue, 11 Sep 2012 11:37:10 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

... rather than at boot time, removing unnecessary redundancy between
EFI and legacy boot code.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/boot/head.S
+++ b/xen/arch/x86/boot/head.S
@@ -124,14 +124,11 @@ __start:
         bt      $29,%edx
         jnc     bad_cpu
         /* Initialise L2 identity-map and xen page table entries (16MB). */
-        mov     $sym_phys(l2_identmap),%edi
         mov     $sym_phys(l2_xenmap),%esi
         mov     $sym_phys(l2_bootmap),%edx
         mov     $0x1e3,%eax                  /* PRESENT+RW+A+D+2MB+GLOBAL */
         mov     $8,%ecx
-1:      mov     %eax,(%edi)
-        add     $8,%edi
-        mov     %eax,(%esi)
+1:      mov     %eax,(%esi)
         add     $8,%esi
         mov     %eax,(%edx)
         add     $8,%edx
@@ -163,54 +160,11 @@ __start:
         mov     %eax,sym_phys(idle_pg_table) + 
l4_table_offset(DIRECTMAP_VIRT_START)*8
         mov     $(sym_phys(l3_xenmap)+7),%eax
         mov     %eax,sym_phys(idle_pg_table) + 
l4_table_offset(XEN_VIRT_START)*8
-#else
-        /* Initialize low and high mappings of memory with 2MB pages */
-        mov     $sym_phys(idle_pg_table_l2),%edi
-        mov     $0xe3,%eax                   /* PRESENT+RW+A+D+2MB */
-1:      mov     %eax,__PAGE_OFFSET>>18(%edi) /* high mapping */
-        stosl                                /* low mapping */
-        add     $4,%edi
-        add     $(1<<L2_PAGETABLE_SHIFT),%eax
-        cmp     $DIRECTMAP_PHYS_END+0xe3,%eax
-        jne     1b
-1:      stosl   /* low mappings cover up to 16MB */
-        add     $4,%edi
-        add     $(1<<L2_PAGETABLE_SHIFT),%eax
-        cmp     $(16<<20)+0xe3,%eax
-        jne     1b
-        /* Initialise L2 fixmap page directory entry. */
-        mov     $(sym_phys(l1_fixmap)+7),%eax
-        mov     %eax,sym_phys(idle_pg_table_l2) + 
l2_table_offset(FIXADDR_TOP-1)*8
-#endif
-
-        /* Initialize 4kB mappings of first 2MB or 4MB of memory. */
-        mov     $sym_phys(l1_identmap),%edi
-        mov     $0x263,%eax                  /* PRESENT+RW+A+D+SMALL_PAGES */
-#if defined(__x86_64__)
-        or      $0x100,%eax                  /* GLOBAL */
-#endif
-        xor     %ecx,%ecx
-1:      stosl
-        add     $4,%edi
-        add     $PAGE_SIZE,%eax
-        inc     %ecx
-        /* VGA hole (0xa0000-0xc0000) should be mapped UC. */
-        cmp     $0xa0,%ecx
-        jne     2f
-        or      $0x10,%eax                   /* +PCD */
-2:      cmp     $0xc0,%ecx
-        jne     2f
-        and     $~0x10,%eax                  /* -PCD */
-2:      cmp     $L1_PAGETABLE_ENTRIES,%ecx
-        jne     1b
-        sub     $(PAGE_SIZE-0x63),%edi
-#if defined(__x86_64__)
+        /* Hook 4kB mappings of first 2MB of memory into L2. */
+        mov     $sym_phys(l1_identmap)+__PAGE_HYPERVISOR,%edi
         mov     %edi,sym_phys(l2_identmap)
         mov     %edi,sym_phys(l2_xenmap)
         mov     %edi,sym_phys(l2_bootmap)
-#else
-        mov     %edi,sym_phys(idle_pg_table_l2)
-        mov     %edi,sym_phys(idle_pg_table_l2) + (__PAGE_OFFSET>>18)
 #endif
 
         /* Apply relocations to bootstrap trampoline. */
@@ -269,3 +223,25 @@ __high_start:
 #else
 #include "x86_32.S"
 #endif
+
+        .section .data.page_aligned, "aw", @progbits
+        .p2align PAGE_SHIFT
+/*
+ * Mapping of first 2 megabytes of memory. This is mapped with 4kB mappings
+ * to avoid type conflicts with fixed-range MTRRs covering the lowest megabyte
+ * of physical memory. In any case the VGA hole should be mapped with type UC.
+ */
+        .globl l1_identmap
+l1_identmap:
+        pfn = 0
+        .rept L1_PAGETABLE_ENTRIES
+        /* VGA hole (0xa0000-0xc0000) should be mapped UC. */
+        .if pfn >= 0xa0 && pfn < 0xc0
+        .long (pfn << PAGE_SHIFT) | PAGE_HYPERVISOR_NOCACHE | MAP_SMALL_PAGES
+        .else
+        .long (pfn << PAGE_SHIFT) | PAGE_HYPERVISOR | MAP_SMALL_PAGES
+        .endif
+        .long 0
+        pfn = pfn + 1
+        .endr
+        .size l1_identmap, . - l1_identmap
--- a/xen/arch/x86/boot/x86_32.S
+++ b/xen/arch/x86/boot/x86_32.S
@@ -108,3 +108,24 @@ ENTRY(boot_cpu_gdt_table)
         .fill (PER_CPU_GDT_ENTRY - FLAT_RING3_DS / 8 - 1), 8, 0
         .quad 0x0000910000000000     /* per-CPU entry (limit == cpu) */
         .align PAGE_SIZE,0
+
+#define PAGE_HYPERVISOR         __PAGE_HYPERVISOR
+#define PAGE_HYPERVISOR_NOCACHE __PAGE_HYPERVISOR_NOCACHE
+
+/* Mapping of first 16 megabytes of memory. */
+        .globl idle_pg_table_l2
+idle_pg_table_l2:
+        range = 8
+        .irp count, l2_linear_offset(__PAGE_OFFSET), \
+                    (4 * L2_PAGETABLE_ENTRIES - 
l2_linear_offset(__PAGE_OFFSET) - 1)
+        .long sym_phys(l1_identmap) + PAGE_HYPERVISOR, 0
+        pfn = 1 << PAGETABLE_ORDER
+        .rept range - 1
+        .long (pfn << PAGE_SHIFT) | PAGE_HYPERVISOR | _PAGE_PSE, 0
+        pfn = pfn + (1 << PAGETABLE_ORDER)
+        .endr
+        .fill \count - range, 8, 0
+        range = DIRECTMAP_MBYTES / 2
+        .endr
+        .long sym_phys(l1_fixmap) + PAGE_HYPERVISOR, 0
+        .size idle_pg_table_l2, . - idle_pg_table_l2
--- a/xen/arch/x86/boot/x86_64.S
+++ b/xen/arch/x86/boot/x86_64.S
@@ -127,3 +127,15 @@ ENTRY(boot_cpu_compat_gdt_table)
         .fill (PER_CPU_GDT_ENTRY - __HYPERVISOR_CS32 / 8 - 1), 8, 0
         .quad 0x0000910000000000     /* per-CPU entry (limit == cpu)      */
         .align PAGE_SIZE, 0
+
+/* Mapping of first 16 megabytes of memory. */
+        .globl l2_identmap
+l2_identmap:
+        .quad 0
+        pfn = 0
+        .rept 7
+        pfn = pfn + (1 << PAGETABLE_ORDER)
+        .quad (pfn << PAGE_SHIFT) | PAGE_HYPERVISOR | _PAGE_PSE
+        .endr
+        .fill 4 * L2_PAGETABLE_ENTRIES - 8, 8, 0
+        .size l2_identmap, . - l2_identmap
--- a/xen/arch/x86/efi/boot.c
+++ b/xen/arch/x86/efi/boot.c
@@ -1119,8 +1119,6 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SY
         unsigned int slot = (xen_phys_start >> L2_PAGETABLE_SHIFT) + i;
         paddr_t addr = slot << L2_PAGETABLE_SHIFT;
 
-        l2_identmap[i] = l2e_from_paddr(i << L2_PAGETABLE_SHIFT,
-                                        PAGE_HYPERVISOR|_PAGE_PSE);
         l2_identmap[slot] = l2e_from_paddr(addr, PAGE_HYPERVISOR|_PAGE_PSE);
         l2_xenmap[i] = l2e_from_paddr(addr, PAGE_HYPERVISOR|_PAGE_PSE);
         slot &= L2_PAGETABLE_ENTRIES - 1;
@@ -1150,16 +1148,7 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SY
         l4e_from_paddr((UINTN)l3_identmap, __PAGE_HYPERVISOR);
     idle_pg_table[l4_table_offset(XEN_VIRT_START)] =
         l4e_from_paddr((UINTN)l3_xenmap, __PAGE_HYPERVISOR);
-    /* Initialize 4kB mappings of first 2MB of memory. */
-    for ( i = 0; i < L1_PAGETABLE_ENTRIES; ++i )
-    {
-        unsigned int attr = PAGE_HYPERVISOR|MAP_SMALL_PAGES;
-
-        /* VGA hole (0xa0000-0xc0000) should be mapped UC. */
-        if ( i >= 0xa0 && i < 0xc0 )
-            attr |= _PAGE_PCD;
-        l1_identmap[i] = l1e_from_pfn(i, attr);
-    }
+    /* Hook 4kB mappings of first 2MB of memory into L2. */
     l2_identmap[0] = l2e_from_paddr((UINTN)l1_identmap, __PAGE_HYPERVISOR);
 
     if ( gop )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -121,15 +121,6 @@
 #include <asm/setup.h>
 #include <asm/fixmap.h>
 
-/*
- * Mapping of first 2 or 4 megabytes of memory. This is mapped with 4kB
- * mappings to avoid type conflicts with fixed-range MTRRs covering the
- * lowest megabyte of physical memory. In any case the VGA hole should be
- * mapped with type UC.
- */
-l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
-    l1_identmap[L1_PAGETABLE_ENTRIES];
-
 /* Mapping of the fixmap space needed early. */
 l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
     l1_fixmap[L1_PAGETABLE_ENTRIES];
--- a/xen/arch/x86/x86_32/mm.c
+++ b/xen/arch/x86/x86_32/mm.c
@@ -31,9 +31,6 @@
 #include <asm/setup.h>
 #include <public/memory.h>
 
-l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
-    idle_pg_table_l2[4 * L2_PAGETABLE_ENTRIES];
-
 unsigned int __read_mostly PAGE_HYPERVISOR         = __PAGE_HYPERVISOR;
 unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
 
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -56,8 +56,6 @@ l4_pgentry_t __attribute__ ((__section__
 /* Enough page directories to map bottom 4GB of the memory map. */
 l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
     l3_identmap[L3_PAGETABLE_ENTRIES];
-l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
-    l2_identmap[4*L2_PAGETABLE_ENTRIES];
 
 /* Enough page directories to map the Xen text and static data. */
 l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -1,15 +1,13 @@
 #ifndef __X86_PAGE_H__
 #define __X86_PAGE_H__
 
+#include <xen/const.h>
+
 /*
  * It is important that the masks are signed quantities. This ensures that
  * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
  */
-#ifndef __ASSEMBLY__
-#define PAGE_SIZE           (1L << PAGE_SHIFT)
-#else
-#define PAGE_SIZE           (1 << PAGE_SHIFT)
-#endif
+#define PAGE_SIZE           (_AC(1,L) << PAGE_SHIFT)
 #define PAGE_MASK           (~(PAGE_SIZE-1))
 #define PAGE_FLAG_MASK      (~0)
 
@@ -319,21 +317,22 @@ void paging_init(void);
 void setup_idle_pagetable(void);
 #endif /* !defined(__ASSEMBLY__) */
 
-#define _PAGE_PRESENT  0x001U
-#define _PAGE_RW       0x002U
-#define _PAGE_USER     0x004U
-#define _PAGE_PWT      0x008U
-#define _PAGE_PCD      0x010U
-#define _PAGE_ACCESSED 0x020U
-#define _PAGE_DIRTY    0x040U
-#define _PAGE_PAT      0x080U
-#define _PAGE_PSE      0x080U
-#define _PAGE_GLOBAL   0x100U
-#define _PAGE_AVAIL0   0x200U
-#define _PAGE_AVAIL1   0x400U
-#define _PAGE_AVAIL2   0x800U
-#define _PAGE_AVAIL    0xE00U
-#define _PAGE_PSE_PAT 0x1000U
+#define _PAGE_PRESENT  _AC(0x001,U)
+#define _PAGE_RW       _AC(0x002,U)
+#define _PAGE_USER     _AC(0x004,U)
+#define _PAGE_PWT      _AC(0x008,U)
+#define _PAGE_PCD      _AC(0x010,U)
+#define _PAGE_ACCESSED _AC(0x020,U)
+#define _PAGE_DIRTY    _AC(0x040,U)
+#define _PAGE_PAT      _AC(0x080,U)
+#define _PAGE_PSE      _AC(0x080,U)
+#define _PAGE_GLOBAL   _AC(0x100,U)
+#define _PAGE_AVAIL0   _AC(0x200,U)
+#define _PAGE_AVAIL1   _AC(0x400,U)
+#define _PAGE_AVAIL2   _AC(0x800,U)
+#define _PAGE_AVAIL    _AC(0xE00,U)
+#define _PAGE_PSE_PAT _AC(0x1000,U)
+/* non-architectural flags */
 #define _PAGE_PAGED   0x2000U
 #define _PAGE_SHARED  0x4000U
 
@@ -354,6 +353,8 @@ void setup_idle_pagetable(void);
 #define __PAGE_HYPERVISOR_NOCACHE \
     (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
 
+#define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages mappings */
+
 #ifndef __ASSEMBLY__
 
 /* Allocator functions for Xen pagetables. */
@@ -367,7 +368,6 @@ l3_pgentry_t *virt_to_xen_l3e(unsigned l
 extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
 
 /* Map machine page range in Xen virtual address space. */
-#define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages for the mapping */
 int map_pages_to_xen(
     unsigned long virt,
     unsigned long mfn,


Attachment: x86-build-ident-map.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.