[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Minios-devel] [UNIKRAFT PATCHv4 29/43] plat/kvm: Enable MMU for Arm64
Hi Wei, On 06/07/18 10:03, Wei Chen wrote: QEMU/KVM provide a 1TB physical address for Arm64. In this case, NIT: s/provide/provides/ we should use 40-bit virtual address to map physical address. In this patch, we enable the MMU to access memory with virtual address. Signed-off-by: Wei Chen <Wei.Chen@xxxxxxx> --- plat/common/include/arm/arm64/cpu_defs.h | 109 +++++++++++++++++++++++ plat/kvm/arm/entry64.S | 21 +++++ plat/kvm/arm/pagetable.S | 37 ++++++++ 3 files changed, 167 insertions(+) diff --git a/plat/common/include/arm/arm64/cpu_defs.h b/plat/common/include/arm/arm64/cpu_defs.h index f33ce35..591d632 100644 --- a/plat/common/include/arm/arm64/cpu_defs.h +++ b/plat/common/include/arm/arm64/cpu_defs.h @@ -105,6 +105,14 @@ END(name) #define PSR_N 0x80000000 #define PSR_FLAGS 0xf0000000+/*+ * The supported virtual address bits. + * We will do 1:1 VA to PA Mapping, so we define the same address size + * for VA and PA. 1TB size for Virtual and Physical Address Space. + */ +#define VIRT_BITS 40 + +/* * CPACR_EL1 Architectural Feature Access Control Register * FPEN, bits [21:20] control traps of EL0 and EL1 accesses to the * SIMD and floating-point registers to EL1, from both Execution @@ -145,6 +153,107 @@ END(name) #define NORMAL_WT 4 #define NORMAL_WB 5+#define MAIR_INIT_ATTR \+ (MAIR_ATTR(MAIR_DEVICE_nGnRnE, DEVICE_nGnRnE) | \ + MAIR_ATTR(MAIR_DEVICE_nGnRE, DEVICE_nGnRE) | \ + MAIR_ATTR(MAIR_DEVICE_GRE, DEVICE_GRE) | \ + MAIR_ATTR(MAIR_NORMAL_NC, NORMAL_NC) | \ + MAIR_ATTR(MAIR_NORMAL_WB, NORMAL_WT) | \ + MAIR_ATTR(MAIR_NORMAL_WT, NORMAL_WB)) + + +/* TCR_EL1 - Translation Control Register */ +#define TCR_ASID_16 (1 << 36) + +#define TCR_IPS_SHIFT 32 +#define TCR_IPS_32BIT (0 << TCR_IPS_SHIFT) +#define TCR_IPS_36BIT (1 << TCR_IPS_SHIFT) +#define TCR_IPS_40BIT (2 << TCR_IPS_SHIFT) +#define TCR_IPS_42BIT (3 << TCR_IPS_SHIFT) +#define TCR_IPS_44BIT (4 << TCR_IPS_SHIFT) +#define TCR_IPS_48BIT (5 << TCR_IPS_SHIFT) + +#define TCR_TG1_SHIFT 30 +#define TCR_TG1_16K (1 << TCR_TG1_SHIFT) +#define TCR_TG1_4K (2 << TCR_TG1_SHIFT) +#define TCR_TG1_64K (3 << TCR_TG1_SHIFT) + +#define TCR_TG0_SHIFT 14 +#define TCR_TG0_4K (0 << TCR_TG0_SHIFT) +#define TCR_TG0_64K (1 << TCR_TG0_SHIFT) +#define TCR_TG0_16K (2 << TCR_TG0_SHIFT) + +#define TCR_SH1_SHIFT 28 +#define TCR_SH1_IS (0x3 << TCR_SH1_SHIFT) +#define TCR_ORGN1_SHIFT 26 +#define TCR_ORGN1_WBWA (0x1 << TCR_ORGN1_SHIFT) +#define TCR_IRGN1_SHIFT 24 +#define TCR_IRGN1_WBWA (0x1 << TCR_IRGN1_SHIFT) +#define TCR_SH0_SHIFT 12 +#define TCR_SH0_IS (0x3 << TCR_SH0_SHIFT) +#define TCR_ORGN0_SHIFT 10 +#define TCR_ORGN0_WBWA (0x1 << TCR_ORGN0_SHIFT) +#define TCR_IRGN0_SHIFT 8 +#define TCR_IRGN0_WBWA (0x1 << TCR_IRGN0_SHIFT) + +#define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) | \ + (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)) + +#ifdef SMP +#define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS) +#else +#define TCR_SMP_ATTRS 0 +#endif When running in virt environment you will end up to use innershareable attributes. So I would not bother to have separate attribute for non-SMP. + +#define TCR_T1SZ_SHIFT 16 +#define TCR_T0SZ_SHIFT 0 +#define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT) +#define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT) +#define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x)) + +#define TCR_INIT_FLAGS (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | \ + TCR_TG0_4K | TCR_CACHE_ATTRS | TCR_SMP_ATTRS) + +/* SCTLR_EL1 - System Control Register */ +#define SCTLR_RES0 0xc8222400 /* Reserved ARMv8.0, write 0 */ +#define SCTLR_RES1 0x30d00800 /* Reserved ARMv8.0, write 1 */ You don't seem to use those two defines. So I would drop them. + +#define SCTLR_M (_AC(1, UL) << 0) +#define SCTLR_A (_AC(1, UL) << 1) +#define SCTLR_C (_AC(1, UL) << 2) +#define SCTLR_SA (_AC(1, UL) << 3) +#define SCTLR_SA0 (_AC(1, UL) << 4) +#define SCTLR_CP15BEN (_AC(1, UL) << 5) +#define SCTLR_THEE (_AC(1, UL) << 6) I can't find this bit in the latest ARM ARM (0487C.a). +#define SCTLR_ITD (_AC(1, UL) << 7) +#define SCTLR_SED (_AC(1, UL) << 8) +#define SCTLR_UMA (_AC(1, UL) << 9) +#define SCTLR_I (_AC(1, UL) << 12) +#define SCTLR_DZE (_AC(1, UL) << 14) +#define SCTLR_UCT (_AC(1, UL) << 15) +#define SCTLR_nTWI (_AC(1, UL) << 16) +#define SCTLR_nTWE (_AC(1, UL) << 18) +#define SCTLR_WXN (_AC(1, UL) << 19) +#define SCTLR_IESB (_AC(1, UL) << 21) +#define SCTLR_SPAN (_AC(1, UL) << 23) +#define SCTLR_EOE (_AC(1, UL) << 24) +#define SCTLR_EE (_AC(1, UL) << 25) +#define SCTLR_UCI (_AC(1, UL) << 26) +#define SCTLR_nTLSMD (_AC(1, UL) << 28) +#define SCTLR_LSMAOE (_AC(1, UL) << 29) + +/* Bits to set */ +#define SCTLR_SET_BITS \ + (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ + SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ + SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ + SCTLR_M | SCTLR_CP15BEN) + +/* Bits to clear */ +#define SCTLR_CLEAR_BITS \ + (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | \ + SCTLR_UMA | SCTLR_ITD | SCTLR_THEE | SCTLR_A) It would be nice to have a comment explaining what is the expecting state of SCTLR at the end. I.e description each fields briefly. + /* * Definitions for Block and Page descriptor attributes */ diff --git a/plat/kvm/arm/entry64.S b/plat/kvm/arm/entry64.S index c031b79..2ef7e2d 100644 --- a/plat/kvm/arm/entry64.S +++ b/plat/kvm/arm/entry64.S @@ -39,10 +39,31 @@ ENTRY(_libkvmplat_entry) orr x0, x0, #CPACR_FPEN_TRAP_NONE msr cpacr_el1, x0+ /*+ * Disable the MMU. We may have entered the kernel with it on and + * will need to update the tables later. If this has been set up + * with anything other than a VA == PA map then this will fail, + * but in this case the code to find where we are running from + * would have also failed. + */ + dsb sy + mrs x2, sctlr_el1 + bic x2, x2, #SCTLR_M + msr sctlr_el1, x2 + isb + + /* Set the context id */ + msr contextidr_el1, xzr + + /* Create a pagetable to do PA == VA mapping */ + bl create_pagetables + /* Setup excetpion vector table address before enable MMU */ s/excetpion/exception/ ldr x29, =vector_table msr VBAR_EL1, x29+ /* Enable the mmu */+ bl start_mmu/* Load dtb address to x0 as a parameter */ldr x0, =_dtb diff --git a/plat/kvm/arm/pagetable.S b/plat/kvm/arm/pagetable.S index 8de6305..c3bb85b 100644 --- a/plat/kvm/arm/pagetable.S +++ b/plat/kvm/arm/pagetable.S @@ -181,6 +181,43 @@ ENTRY(create_pagetables) ret END(create_pagetables)+ENTRY(start_mmu)+ dsb sy What's this DSB for? + + /* Load ttbr0, pagetable starts from _end */ + ldr x27, =_end + msr ttbr0_el1, x27 + isb + + /* Clear the Monitor Debug System control register */ + msr mdscr_el1, xzr + + /* Invalidate the TLB */ "Invalidate the TLB to avoid stale one" to make clear of the purpose here. + tlbi vmalle1is Why inner-shareable? You are turning the MMU on that CPU and flushing the local TLBs should be enough. You want a "dsb nsh" to ensure the TLB maintenance instruction has completed. + + ldr x2, =MAIR_INIT_ATTR + msr mair_el1, x2 + + /* + * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1. + */ + ldr x2, =TCR_INIT_FLAGS + mrs x3, id_aa64mmfr0_el1 + bfi x2, x3, #32, #3 + msr tcr_el1, x2 + + /* Setup SCTLR */ + ldr x2, =SCTLR_SET_BITS + ldr x3, =SCTLR_CLEAR_BITS + mrs x1, sctlr_el1 + bic x1, x1, x3 /* Clear the required bits */ + orr x1, x1, x2 /* Set the required bits */ + msr sctlr_el1, x1 + isb + + ret +END(start_mmu) + /* * Builds an L0 -> L1 table descriptor * Cheers, -- Julien Grall _______________________________________________ Minios-devel mailing list Minios-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/minios-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |