[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 05/46] xen: arm64: initial build + config changes, start of day code



Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
v3: - s/hyp/el2/
    - remove dead code
    - fix comment formatting
    - Fix xen.lds.S constructor definitions, s/4/BYTES_PER_LONG/g
v2: - Add PSR_MODE definitions for 64-bit to arch-arm.h and use instead of
      defining in head.S
    - Nuke hard tabs in head.S and mode_switch.S with expand(1)

fixup
---
 Config.mk                        |    2 +-
 config/arm64.mk                  |   12 ++
 xen/arch/arm/Makefile            |    1 +
 xen/arch/arm/Rules.mk            |    6 +
 xen/arch/arm/arm64/Makefile      |    1 +
 xen/arch/arm/arm64/head.S        |  393 ++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/arm64/mode_switch.S |   83 ++++++++
 xen/arch/arm/xen.lds.S           |   12 +-
 xen/include/asm-arm/page.h       |    1 +
 xen/include/public/arch-arm.h    |   14 ++
 xen/include/public/hvm/save.h    |    2 +-
 xen/include/public/xen.h         |    2 +-
 xen/include/xen/libelf.h         |    2 +-
 13 files changed, 524 insertions(+), 7 deletions(-)
 create mode 100644 config/arm64.mk
 create mode 100644 xen/arch/arm/arm64/Makefile
 create mode 100644 xen/arch/arm/arm64/head.S
 create mode 100644 xen/arch/arm/arm64/mode_switch.S

diff --git a/Config.mk b/Config.mk
index 07b30ba..a61d7e1 100644
--- a/Config.mk
+++ b/Config.mk
@@ -18,7 +18,7 @@ coverage ?= n
 
 XEN_COMPILE_ARCH    ?= $(shell uname -m | sed -e s/i.86/x86_32/ \
                          -e s/i86pc/x86_32/ -e s/amd64/x86_64/ \
-                         -e s/armv7.*/arm32/)
+                         -e s/armv7.*/arm32/ -e s/armv8.*/arm64/)
 
 XEN_TARGET_ARCH     ?= $(XEN_COMPILE_ARCH)
 XEN_OS              ?= $(shell uname -s)
diff --git a/config/arm64.mk b/config/arm64.mk
new file mode 100644
index 0000000..b2457eb
--- /dev/null
+++ b/config/arm64.mk
@@ -0,0 +1,12 @@
+CONFIG_ARM := y
+CONFIG_ARM_64 := y
+CONFIG_ARM_$(XEN_OS) := y
+
+CFLAGS += #-marm -march= -mcpu= etc
+
+HAS_PL011 := y
+
+# Use only if calling $(LD) directly.
+LDFLAGS_DIRECT += -maarch64elf
+
+CONFIG_LOAD_ADDRESS ?= 0x80000000
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index 3954dbb..677e232 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -1,4 +1,5 @@
 subdir-$(arm32) += arm32
+subdir-$(arm64) += arm64
 subdir-y += platforms
 
 obj-y += early_printk.o
diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk
index 0843e50..a0a14e0 100644
--- a/xen/arch/arm/Rules.mk
+++ b/xen/arch/arm/Rules.mk
@@ -27,6 +27,12 @@ arm32 := y
 arm64 := n
 endif
 
+ifeq ($(TARGET_SUBARCH),arm64)
+CFLAGS += -mcpu=generic
+arm32 := n
+arm64 := y
+endif
+
 ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n)
 CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE
 endif
diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
new file mode 100644
index 0000000..dffbeb1
--- /dev/null
+++ b/xen/arch/arm/arm64/Makefile
@@ -0,0 +1 @@
+obj-y += mode_switch.o
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
new file mode 100644
index 0000000..b7ab251
--- /dev/null
+++ b/xen/arch/arm/arm64/head.S
@@ -0,0 +1,393 @@
+/*
+ * xen/arch/arm/head.S
+ *
+ * Start-of-day code for an ARMv8.
+ *
+ * Ian Campbell <ian.campbell@xxxxxxxxxx>
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * Based on ARMv7-A head.S by
+ * Tim Deegan <tim@xxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/config.h>
+#include <asm/page.h>
+#include <asm/asm_defns.h>
+
+#define PT_PT     0xe7f /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=1 P=1 */
+#define PT_MEM    0xe7d /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
+#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
+
+/* Macro to print a string to the UART, if there is one.
+ * Clobbers r0-r3. */
+#ifdef EARLY_UART_ADDRESS
+#define PRINT(_s)       \
+        adr   x0, 98f ; \
+        bl    puts    ; \
+        b     99f     ; \
+98:     .asciz _s     ; \
+        .align 2      ; \
+99:
+#else
+#define PRINT(s)
+#endif
+
+        /*.aarch64*/
+
+        /*
+         * Kernel startup entry point.
+         * ---------------------------
+         *
+         * The requirements are:
+         *   MMU = off, D-cache = off, I-cache = on or off,
+         *   x0 = physical address to the FDT blob.
+         *
+         * This must be the very first address in the loaded image.
+         * It should be linked at XEN_VIRT_START, and loaded at any
+         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
+         * or the initial pagetable code below will need adjustment.
+         */
+
+        .global start
+start:
+        /*
+         * DO NOT MODIFY. Image header expected by Linux boot-loaders.
+         */
+        b       real_start           /* branch to kernel start, magic */
+        .long   0                    /* reserved */
+        .quad   0                    /* Image load offset from start of RAM */
+        .quad   0                    /* reserved */
+        .quad   0                    /* reserved */
+
+real_start:
+        msr   DAIFSet, 0xf           /* Disable all interrupts */
+
+        /* Save the bootloader arguments in less-clobberable registers */
+        mov   x21, x0                /* x21 := DTB, physical address  */
+
+        /* Find out where we are */
+        ldr   x0, =start
+        adr   x19, start             /* x19 := paddr (start) */
+        sub   x20, x19, x0           /* x20 := phys-offset */
+
+        /* Using the DTB in the .dtb section? */
+#ifdef CONFIG_DTB_FILE
+        ldr   x21, =_sdtb
+        add   x21, x21, x20          /* x21 := paddr(DTB) */
+#endif
+
+        /* Are we the boot CPU? */
+        mov   x22, #0                /* x22 := CPU ID */
+        mrs   x0, mpidr_el1
+        tbz   x0, 31, boot_cpu       /* Multiprocessor extension supported? */
+        tbnz  x0, 30, boot_cpu       /* Uniprocessor system? */
+
+        mov   x13, #(0xff << 24)
+        bics  x22, x0, x13           /* Mask out flags to get CPU ID */
+        b.eq  boot_cpu               /* If we're CPU 0, boot now */
+
+        /* Non-boot CPUs wait here to be woken up one at a time. */
+1:      dsb   sy
+        ldr   x0, =smp_up_cpu        /* VA of gate */
+        add   x0, x0, x20            /* PA of gate */
+        ldr   x1, [x0]               /* Which CPU is being booted? */
+        cmp   x1, x22                /* Is it us? */
+        b.eq  2f
+        wfe
+        b     1b
+2:
+
+boot_cpu:
+#ifdef EARLY_UART_ADDRESS
+        ldr   x23, =EARLY_UART_ADDRESS  /* x23 := UART base address */
+        cbnz  x22, 1f
+        bl    init_uart                 /* CPU 0 sets up the UART too */
+1:      PRINT("- CPU ")
+        mov   x0, x22
+        bl    putn
+        PRINT(" booting -\r\n")
+#endif
+
+        PRINT("- Current EL ")
+        mrs   x0, CurrentEL
+        bl    putn
+        PRINT(" -\r\n")
+
+        /* Are we in EL3 */
+        mrs   x0, CurrentEL
+        cmp   x0, #PSR_MODE_EL3t
+        ccmp  x0, #PSR_MODE_EL3h, #0x4, ne
+        b.eq  1f /* Yes */
+
+        /* Are we in EL2 */
+        cmp   x0, #PSR_MODE_EL2t
+        ccmp  x0, #PSR_MODE_EL2h, #0x4, ne
+        b.eq  2f /* Yes */
+
+        /* Otherwise, it must have been EL0 or EL1 */
+        PRINT("- CPU is not in EL3 or EL2 -\r\n")
+        b     fail
+
+1:      PRINT("- Started in EL3 -\r\n- Entering EL2 -\r\n")
+        ldr   x1, =enter_el2_mode    /* VA of function */
+        add   x1, x1, x20            /* PA of function */
+        adr   x30, el2               /* Set return address for call */
+        br    x1                     /* Call function */
+
+2:      PRINT("- Started in EL2 mode -\r\n")
+
+el2:
+        /* Zero BSS On the boot CPU to avoid nasty surprises */
+        cbnz  x22, skip_bss
+
+        PRINT("- Zero BSS -\r\n")
+        ldr   x0, =__bss_start       /* Load start & end of bss */
+        ldr   x1, =__bss_end
+        add   x0, x0, x20            /* Apply physical offset */
+        add   x1, x1, x20
+
+1:      str   xzr, [x0], #8
+        cmp   x0, x1
+        b.lo  1b
+
+skip_bss:
+
+        PRINT("- Setting up control registers -\r\n")
+
+        /* Set up memory attribute type tables */
+        ldr   x0, =MAIRVAL
+        msr   mair_el2, x0
+
+        /* Set up the HTCR:
+         * PASize -- 4G
+         * Top byte is used
+         * PT walks use Outer-Shareable accesses,
+         * PT walks are write-back, no-write-allocate in both cache levels,
+         * Full 64-bit address space goes through this table. */
+        ldr   x0, =0x80802500
+        msr   tcr_el2, x0
+
+        /* Set up the HSCTLR:
+         * Exceptions in LE ARM,
+         * Low-latency IRQs disabled,
+         * Write-implies-XN disabled (for now),
+         * D-cache disabled (for now),
+         * I-cache enabled,
+         * Alignment checking enabled,
+         * MMU translation disabled (for now). */
+        ldr   x0, =(HSCTLR_BASE|SCTLR_A)
+        msr   SCTLR_EL2, x0
+
+        /* Write Xen's PT's paddr into the HTTBR */
+        ldr   x4, =xen_pgtable
+        add   x4, x4, x20            /* x4 := paddr (xen_pagetable) */
+        msr   TTBR0_EL2, x4
+
+        /* Non-boot CPUs don't need to rebuild the pagetable */
+        cbnz  x22, pt_ready
+
+        ldr   x1, =xen_first
+        add   x1, x1, x20            /* x1 := paddr (xen_first) */
+        mov   x3, #PT_PT             /* x2 := table map of xen_first */
+        orr   x2, x1, x3             /* (+ rights for linear PT) */
+        str   x2, [x4, #0]           /* Map it in slot 0 */
+
+        mov   x4, x1                 /* Next level into xen_first */
+
+       /* console fixmap */
+#ifdef EARLY_UART_ADDRESS
+        ldr   x1, =xen_fixmap
+        add   x1, x1, x20            /* x1 := paddr (xen_fixmap) */
+        lsr   x2, x23, #12
+        lsl   x2, x2, #12            /* 4K aligned paddr of UART */
+        mov   x3, #PT_DEV_L3
+        orr   x2, x2, x3             /* x2 := 4K dev map including UART */
+        str   x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap's 
slot */
+#endif
+
+        /* Build the baseline idle pagetable's first-level entries */
+        ldr   x1, =xen_second
+        add   x1, x1, x20            /* x1 := paddr (xen_second) */
+        mov   x3, #PT_PT             /* x2 := table map of xen_second */
+        orr   x2, x1, x3             /* (+ rights for linear PT) */
+        str   x2, [x4, #0]           /* Map it in slot 0 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #8]           /* Map 2nd page in slot 1 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #16]          /* Map 3rd page in slot 2 */
+        add   x2, x2, #0x1000
+        str   x2, [x4, #24]          /* Map 4th page in slot 3 */
+
+        /* Now set up the second-level entries */
+        mov   x3, #PT_MEM
+        orr   x2, x19, x3            /* x2 := 2MB normal map of Xen */
+        orr   x4, xzr, x19, lsr #18
+        str   x2, [x1, x4]           /* Map Xen there */
+        ldr   x4, =start
+        lsr   x4, x4, #18            /* Slot for vaddr(start) */
+        str   x2, [x1, x4]           /* Map Xen there too */
+
+        /* xen_fixmap pagetable */
+        ldr   x2, =xen_fixmap
+        add   x2, x2, x20            /* x2 := paddr (xen_fixmap) */
+        mov   x3, #PT_PT
+        orr   x2, x2, x3             /* x2 := table map of xen_fixmap */
+        add   x4, x4, #8
+        str   x2, [x1, x4]           /* Map it in the fixmap's slot */
+
+        lsr   x2, x21, #21
+        lsl   x2, x2, #21            /* 2MB-aligned paddr of DTB */
+        mov   x3, #PT_MEM            /* x2 := 2MB RAM incl. DTB */
+        orr   x2, x2, x3
+        add   x4, x4, #8
+        str   x2, [x1, x4]           /* Map it in the early boot slot */
+
+pt_ready:
+        PRINT("- Turning on paging -\r\n")
+
+        ldr   x1, =paging            /* Explicit vaddr, not RIP-relative */
+        mrs   x0, SCTLR_EL2
+        orr   x0, x0, #SCTLR_M       /* Enable MMU */
+        orr   x0, x0, #SCTLR_C       /* Enable D-cache */
+        dsb   sy                     /* Flush PTE writes and finish reads */
+        msr   SCTLR_EL2, x0          /* now paging is enabled */
+        isb                          /* Now, flush the icache */
+        br    x1                     /* Get a proper vaddr into PC */
+paging:
+
+#ifdef EARLY_UART_ADDRESS
+        /* Use a virtual address to access the UART. */
+        ldr   x23, =FIXMAP_ADDR(FIXMAP_CONSOLE)
+#endif
+
+        PRINT("- Ready -\r\n")
+
+        /* The boot CPU should go straight into C now */
+        cbz   x22, launch
+
+        /* Non-boot CPUs need to move on to the relocated pagetables */
+        ldr   x4, =boot_ttbr         /* VA of TTBR0_EL2 stashed by CPU 0 */
+        add   x4, x4, x20            /* PA of it */
+        ldr   x4, [x4]               /* Actual value */
+        dsb   sy
+        msr   TTBR0_EL2, x4
+        dsb   sy
+        isb
+        tlbi  alle2
+        dsb   sy                     /* Ensure completion of TLB flush */
+        isb
+
+        /* Non-boot CPUs report that they've got this far */
+        ldr   x0, =ready_cpus
+1:      ldaxr x1, [x0]               /*            { read # of ready CPUs } */
+        add   x1, x1, #1             /* Atomically { ++                   } */
+        stlxr w2, x1, [x0]           /*            { writeback            } */
+        cbnz  w2, 1b
+        dsb   sy
+        dc    cvac, x0               /* Flush D-Cache */
+        dsb   sy
+
+        /* Here, the non-boot CPUs must wait again -- they're now running on
+         * the boot CPU's pagetables so it's safe for the boot CPU to
+         * overwrite the non-relocated copy of Xen.  Once it's done that,
+         * and brought up the memory allocator, non-boot CPUs can get their
+         * own stacks and enter C. */
+1:      wfe
+        dsb   sy
+        ldr   x0, =smp_up_cpu
+        ldr   x1, [x0]               /* Which CPU is being booted? */
+        cmp   x1, x12                /* Is it us? */
+        b.ne  1b
+
+launch:
+        ldr   x0, =init_stack        /* Find the boot-time stack */
+        ldr   x0, [x0]
+        add   x0, x0, #STACK_SIZE    /* (which grows down from the top). */
+        sub   x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */
+        mov   sp, x0
+
+        mov   x0, x20                /* Marshal args: - phys_offset */
+        mov   x1, x21                /*               - FDT */
+        mov   x2, x22                /*               - CPU ID */
+        cbz   x22, start_xen         /* and disappear into the land of C */
+        b     start_secondary        /* (to the appropriate entry point) */
+
+/* Fail-stop
+ * r0: string explaining why */
+fail:   PRINT("- Boot failed -\r\n")
+1:      wfe
+        b     1b
+
+#ifdef EARLY_UART_ADDRESS
+
+/* Bring up the UART. Specific to the PL011 UART.
+ * Clobbers r0-r2 */
+init_uart:
+        mov   x1, #0x0
+        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor fraction) */
+        mov   x1, #0x4               /* 7.3728MHz / 0x4 == 16 * 115200 */
+        strh  w1, [x23, #0x24]       /* -> UARTIBRD (Baud divisor integer) */
+        mov   x1, #0x60              /* 8n1 */
+        strh  w1, [x23, #0x24]       /* -> UARTLCR_H (Line control) */
+        ldr   x1, =0x00000301        /* RXE | TXE | UARTEN */
+        strh  w1, [x23, #0x30]       /* -> UARTCR (Control Register) */
+        adr   x0, 1f
+        b     puts
+1:      .asciz "- UART enabled -\r\n"
+        .align 4
+
+/* Print early debug messages.  Specific to the PL011 UART.
+ * r0: Nul-terminated string to print.
+ * Clobbers r0-r2 */
+puts:
+        ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
+        tst   w2, #0x8               /* Check BUSY bit */
+        b.ne  puts                   /* Wait for the UART to be ready */
+        ldrb  w2, [x0], #1           /* Load next char */
+        cbz   w2, 1f                 /* Exit on nul */
+        str   w2, [x23]              /* -> UARTDR (Data Register) */
+        b     puts
+1:
+        ret
+
+/* Print a 32-bit number in hex.  Specific to the PL011 UART.
+ * r0: Number to print.
+ * clobbers r0-r3 */
+putn:
+        adr   x1, hex
+        mov   x3, #8
+1:      ldrh  w2, [x23, #0x18]       /* <- UARTFR (Flag register) */
+        tst   w2, #0x8               /* Check BUSY bit */
+        b.ne  1b                     /* Wait for the UART to be ready */
+        and   x2, x0, #0xf0000000    /* Mask off the top nybble */
+        lsr   x2, x2, #28
+        ldrb  w2, [x1, x2]           /* Convert to a char */
+        strb  w2, [x23]              /* -> UARTDR (Data Register) */
+        lsl   x0, x0, #4             /* Roll it through one nybble at a time */
+        subs  x3, x3, #1
+        b.ne  1b
+        ret
+
+hex:    .ascii "0123456789abcdef"
+        .align 2
+
+#else  /* EARLY_UART_ADDRESS */
+
+init_uart:
+.global early_puts
+early_puts:
+puts:
+putn:   mov   pc, lr
+
+#endif /* EARLY_UART_ADDRESS */
diff --git a/xen/arch/arm/arm64/mode_switch.S b/xen/arch/arm/arm64/mode_switch.S
new file mode 100644
index 0000000..4c38181
--- /dev/null
+++ b/xen/arch/arm/arm64/mode_switch.S
@@ -0,0 +1,83 @@
+/*
+ * xen/arch/arm/arm64/mode_switch.S
+ *
+ * Start-of day code to take a CPU from EL3 to EL2. Largely taken from
+ *       bootwrapper.
+ *
+ * Ian Campbell <ian.campbell@xxxxxxxxxx>
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/config.h>
+#include <asm/page.h>
+#include <asm/asm_defns.h>
+
+/* Get up a CPU into EL2.  Clobbers x0-x3.
+ *
+ * Expects x22 == CPU number
+ * Expects x30  == EL2 entry point
+ *
+ * This code is specific to the VE model, and not intended to be used
+ * on production systems.  As such it's a bit hackier than the main
+ * boot code in head.S.  In future it will be replaced by better
+ * integration with the bootloader/firmware so that Xen always starts
+ * at EL2.
+ */
+
+.globl enter_el2_mode
+enter_el2_mode:
+        mov     x0, #0x30                       // RES1
+        orr     x0, x0, #(1 << 0)               // Non-secure EL1
+        orr     x0, x0, #(1 << 8)               // HVC enable
+        orr     x0, x0, #(1 << 10)              // 64-bit EL2
+        msr     scr_el3, x0
+
+        msr     cptr_el3, xzr                   // Disable copro. traps to EL3
+
+        ldr     x0, =0x01800000                 // 24Mhz
+        msr     cntfrq_el0, x0
+
+        /*
+         * Check for the primary CPU to avoid a race on the distributor
+         * registers.
+         */
+        cbnz    x22, 1f
+
+        ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET) // GICD_CTLR
+        mov     w0, #3                          // EnableGrp0 | EnableGrp1
+        str     w0, [x1]
+
+1:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET+0x80) // GICD_IGROUPR
+        mov     w0, #~0                         // Grp1 interrupts
+        str     w0, [x1], #4
+        b.ne    2f                              // Only local interrupts for 
secondary CPUs
+        str     w0, [x1], #4
+        str     w0, [x1], #4
+
+2:      ldr     x1, =(GIC_BASE_ADDRESS+GIC_CR_OFFSET) // GICC_CTLR
+        ldr     w0, [x1]
+        mov     w0, #3                          // EnableGrp0 | EnableGrp1
+        str     w0, [x1]
+
+        mov     w0, #1 << 7                     // allow NS access to GICC_PMR
+        str     w0, [x1, #4]                    // GICC_PMR
+
+        msr     sctlr_el2, xzr
+
+        /*
+         * Prepare the switch to the EL2_SP1 mode from EL3
+         */
+        msr     elr_el3, x30                    // Return to desired function
+        mov     x1, #0x3c9                      // EL2_SP1 | D | A | I | F
+        msr     spsr_el3, x1
+        eret
diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index 50e0c4b..5136b79 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -11,7 +11,13 @@
 
 ENTRY(start)
 
-OUTPUT_ARCH(arm)
+#if defined(__arm__)
+#define FORMAT arm
+#elif defined(__aarch64__)
+#define FORMAT aarch64
+#endif
+
+OUTPUT_ARCH(FORMAT)
 
 PHDRS
 {
@@ -85,9 +91,9 @@ SECTIONS
        *(.init.data.rel)
        *(.init.data.rel.*)
 
-       . = ALIGN(4);
+       . = ALIGN(BYTES_PER_LONG);
        __CTOR_LIST__ = .;
-       LONG((__CTOR_END__ - __CTOR_LIST__) / 4 - 2)
+       LONG((__CTOR_END__ - __CTOR_LIST__) / BYTES_PER_LONG - 2)
        *(.ctors)
        LONG(0)
        __CTOR_END__ = .;
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index cd74950..45ded77 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -38,6 +38,7 @@
  */
 #define MAIR0VAL 0xeeaa4400
 #define MAIR1VAL 0xff000004
+#define MAIRVAL (MAIR0VAL|MAIR1VAL<<32)
 
 /*
  * Attribute Indexes.
diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
index 6ae1009..5306f79 100644
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -172,6 +172,8 @@ typedef uint64_t xen_callback_t;
 
 /* 0-4: Mode */
 #define PSR_MODE_MASK 0x1f
+
+/* 32 bit modes */
 #define PSR_MODE_USR 0x10
 #define PSR_MODE_FIQ 0x11
 #define PSR_MODE_IRQ 0x12
@@ -182,6 +184,18 @@ typedef uint64_t xen_callback_t;
 #define PSR_MODE_UND 0x1b
 #define PSR_MODE_SYS 0x1f
 
+/* 64 bit modes */
+#ifdef CONFIG_ARM_64
+#define PSR_MODE_BIT  0x10 /* Set iff AArch32 */
+#define PSR_MODE_EL3h 0x0d
+#define PSR_MODE_EL3t 0x0c
+#define PSR_MODE_EL2h 0x09
+#define PSR_MODE_EL2t 0x08
+#define PSR_MODE_EL1h 0x05
+#define PSR_MODE_EL1t 0x04
+#define PSR_MODE_EL0t 0x00
+#endif
+
 #define PSR_THUMB       (1<<5)        /* Thumb Mode enable */
 #define PSR_FIQ_MASK    (1<<6)        /* Fast Interrupt mask */
 #define PSR_IRQ_MASK    (1<<7)        /* Interrupt mask */
diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h
index 5538d8e..cc8b5fd 100644
--- a/xen/include/public/hvm/save.h
+++ b/xen/include/public/hvm/save.h
@@ -102,7 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
 
 #if defined(__i386__) || defined(__x86_64__)
 #include "../arch-x86/hvm/save.h"
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
 #include "../arch-arm/hvm/save.h"
 #else
 #error "unsupported architecture"
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index c4d08d6..e9431e2 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -31,7 +31,7 @@
 
 #if defined(__i386__) || defined(__x86_64__)
 #include "arch-x86/xen.h"
-#elif defined(__arm__)
+#elif defined(__arm__) || defined (__aarch64__)
 #include "arch-arm.h"
 #else
 #error "Unsupported architecture"
diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h
index e8f6508..218bb18 100644
--- a/xen/include/xen/libelf.h
+++ b/xen/include/xen/libelf.h
@@ -23,7 +23,7 @@
 #ifndef __XEN_LIBELF_H__
 #define __XEN_LIBELF_H__
 
-#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || 
defined(__aarch64__)
 #define XEN_ELF_LITTLE_ENDIAN
 #else
 #error define architectural endianness
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.