|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH] xen/arm: Add 4-level page table for stage 2 translation
Hello Vijaya,
On 04/30/2014 01:15 PM, vijay.kilari@xxxxxxxxx wrote:
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index d151724..c0e0362 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -224,13 +224,16 @@ skip_bss:
> ldr x0, =MAIRVAL
> msr mair_el2, x0
>
> + mrs x1, ID_AA64MMFR0_EL1
> +
> /* Set up the HTCR:
> - * PASize -- 40 bits / 1TB
> + * PASize -- based on ID_AA64MMFR0_EL1.PARange value
> * Top byte is used
> * PT walks use Outer-Shareable accesses,
> * PT walks are write-back, write-allocate in both cache levels,
> * Full 64-bit address space goes through this table. */
> - ldr x0, =0x80822500
> + ldr x0, =TCR_VAL_40PA
If you define TCR_VAL_40PA in another header, I would also move the
comments explaining the different bits in this header.
> + bfi x0, x1, #16, #3
> msr tcr_el2, x0
>
> /* Set up the SCTLR_EL2:
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 305879f..d577b23 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -382,13 +382,18 @@ void __cpuinit setup_virt_paging(void)
> /* SH0=00, ORGN0=IRGN0=01
> * SL0=01 (Level-1)
> * ARVv7: T0SZ=(1)1000 = -8 (32-(-8) = 40 bit physical addresses)
> * ARMv8: T0SZ=01 1000 = 24 (64-24 = 40 bit physical addresses)
> * PS=010 == 40 bits
> */
> #ifdef CONFIG_ARM_32
> - WRITE_SYSREG32(0x80002558, VTCR_EL2);
> + WRITE_SYSREG32(VTCR_VAL, VTCR_EL2);
> #else
> - WRITE_SYSREG32(0x80022558, VTCR_EL2);
> + /* Change PS to 48 and T0SZ = 16 SL0 - 2 to take VA 48 bit */
> + if ( current_cpu_data.mm64.pa_range == VTCR_PS_48BIT )
> + WRITE_SYSREG32(VTCR_VAL_48PA, VTCR_EL2);
> + else
> + /* Consider by default 40 PA support for ARM64 */
> + WRITE_SYSREG32(VTCR_VAL_40PA, VTCR_EL2);
> #endif
Same remark here. Also, please update ARMv8 stuff for 48 bits PA in the
comment.
> isb();
> }
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index d00c882..bdaab46 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -10,29 +10,38 @@
> #include <asm/hardirq.h>
> #include <asm/page.h>
>
> +#ifdef CONFIG_ARM_64
> +/* Zeroeth level is of 1 page size */
> +#define P2M_FIRST_ORDER 0
From your comment, should not it be P2M_ZEROETH_ORDER?
> +#else
> /* First level P2M is 2 consecutive pages */
> #define P2M_FIRST_ORDER 1
> +#endif
> #define P2M_FIRST_ENTRIES (LPAE_ENTRIES<<P2M_FIRST_ORDER)
>
> void dump_p2m_lookup(struct domain *d, paddr_t addr)
> {
> struct p2m_domain *p2m = &d->arch.p2m;
> - lpae_t *first;
> + lpae_t *lookup;
>
> printk("dom%d IPA 0x%"PRIpaddr"\n", d->domain_id, addr);
>
> +#ifdef CONFIG_ARM_64
> + if ( zeroeth_linear_offset(addr) > LPAE_ENTRIES )
> +#else
> if ( first_linear_offset(addr) > LPAE_ENTRIES )
> +#endif
> {
> - printk("Cannot dump addresses in second of first level pages...\n");
> + printk("Cannot dump addresses in second of
> first(ARM32)/zeroeth(ARM64) level pages...\n");
> return;
> }
>
> printk("P2M @ %p mfn:0x%lx\n",
> - p2m->first_level, page_to_mfn(p2m->first_level));
> + p2m->lookup_level, page_to_mfn(p2m->lookup_level));
>
> - first = __map_domain_page(p2m->first_level);
> - dump_pt_walk(first, addr);
> - unmap_domain_page(first);
> + lookup = __map_domain_page(p2m->lookup_level);
> + dump_pt_walk(lookup, addr);
dump_pt_walk assume the table is the first level. I think you should
update dump_pt_walk to handle the zeroeth level.
> + unmap_domain_page(lookup);
> }
>
> void p2m_load_VTTBR(struct domain *d)
> @@ -44,6 +53,20 @@ void p2m_load_VTTBR(struct domain *d)
> isb(); /* Ensure update is visible */
> }
>
> +#ifdef CONFIG_ARM_64
> +/*
> + * Map zeroeth level page that addr contains.
> + */
> +static lpae_t *p2m_map_zeroeth(struct p2m_domain *p2m, paddr_t addr)
> +{
> + if ( zeroeth_linear_offset(addr) >= LPAE_ENTRIES )
> + return NULL;
> +
> + return __map_domain_page(p2m->lookup_level);
> +}
> +
> +#else
> +
> static int p2m_first_level_index(paddr_t addr)
> {
> /*
> @@ -64,10 +87,11 @@ static lpae_t *p2m_map_first(struct p2m_domain *p2m,
> paddr_t addr)
> if ( first_linear_offset(addr) >= P2M_FIRST_ENTRIES )
> return NULL;
>
> - page = p2m->first_level + p2m_first_level_index(addr);
> + page = p2m->lookup_level + p2m_first_level_index(addr);
>
> return __map_domain_page(page);
> }
> +#endif
>
> /*
> * Lookup the MFN corresponding to a domain's PFN.
> @@ -79,6 +103,9 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr,
> p2m_type_t *t)
> {
> struct p2m_domain *p2m = &d->arch.p2m;
> lpae_t pte, *first = NULL, *second = NULL, *third = NULL;
> +#ifdef CONFIG_ARM_64
> + lpae_t *zeroeth = NULL;
> +#endif
> paddr_t maddr = INVALID_PADDR;
> p2m_type_t _t;
>
> @@ -89,9 +116,29 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr,
> p2m_type_t *t)
>
> spin_lock(&p2m->lock);
>
> +#ifdef CONFIG_ARM_64
> + zeroeth = p2m_map_zeroeth(p2m, paddr);
> + if ( !zeroeth )
> + goto err;
> +
> + pte = zeroeth[zeroeth_table_offset(paddr)];
> + /* Zeroeth level does not support block translation
> + * so pte.p2m.table should be always set.
> + * Just check for valid bit
> + */
> + if ( !pte.p2m.valid )
> + goto done;
> +
> +#else
> first = p2m_map_first(p2m, paddr);
> if ( !first )
> goto err;
> +#endif
> +
> +#ifdef CONFIG_ARM_64
> + /* Map first level table */
> + first = map_domain_page(pte.p2m.base);
> +#endif
Can you move the code in this ifdef into the first one?
[..]
> @@ -244,8 +294,14 @@ static int apply_p2m_changes(struct domain *d,
> struct p2m_domain *p2m = &d->arch.p2m;
> lpae_t *first = NULL, *second = NULL, *third = NULL;
> paddr_t addr;
> - unsigned long cur_first_page = ~0,
> - cur_first_offset = ~0,
> +#ifdef CONFIG_ARM_64
> + lpae_t *zeroeth = NULL;
> + unsigned long cur_zeroeth_page = ~0,
> + cur_zeroeth_offset = ~0;
> +#else
> + unsigned long cur_first_page = ~0;
> +#endif
> + unsigned long cur_first_offset = ~0,
> cur_second_offset = ~0;
> unsigned long count = 0;
> unsigned int flush = 0;
> @@ -260,6 +316,37 @@ static int apply_p2m_changes(struct domain *d,
> addr = start_gpaddr;
> while ( addr < end_gpaddr )
> {
> +#ifdef CONFIG_ARM_64
> + /* Find zeoeth offset and Map zeroeth page */
> + if ( cur_zeroeth_page != zeroeth_table_offset(addr) )
> + {
> + if ( zeroeth ) unmap_domain_page(zeroeth);
> + zeroeth = p2m_map_zeroeth(p2m, addr);
> + if ( !zeroeth )
> + {
> + rc = -EINVAL;
> + goto out;
> + }
> + cur_zeroeth_page = zeroeth_table_offset(addr);
> + }
> +
> + if ( !zeroeth[zeroeth_table_offset(addr)].p2m.valid )
> + {
> + if ( !populate )
> + {
> + addr = (addr + ZEROETH_SIZE) & ZEROETH_MASK;
> + continue;
> + }
> + rc = p2m_create_table(d, &zeroeth[zeroeth_table_offset(addr)]);
> + if ( rc < 0 )
> + {
> + printk("p2m_populate_ram: L0 failed\n");
> + goto out;
> + }
> + }
> +
> + BUG_ON(!zeroeth[zeroeth_table_offset(addr)].p2m.valid);
> +#else
> if ( cur_first_page != p2m_first_level_index(addr) )
> {
> if ( first ) unmap_domain_page(first);
> @@ -271,7 +358,16 @@ static int apply_p2m_changes(struct domain *d,
> }
> cur_first_page = p2m_first_level_index(addr);
> }
> +#endif
>
> +#ifdef CONFIG_ARM_64
> + if ( cur_zeroeth_offset != zeroeth_table_offset(addr) )
> + {
> + if ( first ) unmap_domain_page(first);
> + first =
> map_domain_page(zeroeth[zeroeth_table_offset(addr)].p2m.base);
> + cur_zeroeth_offset = zeroeth_table_offset(addr);
> + }
> +#endif
Same remark here.
> if ( !first[first_table_offset(addr)].p2m.valid )
> {
> if ( !populate )
> @@ -279,7 +375,6 @@ static int apply_p2m_changes(struct domain *d,
> addr = (addr + FIRST_SIZE) & FIRST_MASK;
> continue;
> }
> -
Why did you remove this empty line?
> rc = p2m_create_table(d, &first[first_table_offset(addr)]);
> if ( rc < 0 )
> {
> @@ -287,7 +382,6 @@ static int apply_p2m_changes(struct domain *d,
> goto out;
> }
> }
> -
Same here.
> BUG_ON(!first[first_table_offset(addr)].p2m.valid);
>
> if ( cur_first_offset != first_table_offset(addr) )
> @@ -305,7 +399,6 @@ static int apply_p2m_changes(struct domain *d,
> addr = (addr + SECOND_SIZE) & SECOND_MASK;
> continue;
> }
> -
Same here.
[..]
> /* Current VMID in use */
> uint8_t vmid;
> diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> index 905beb8..8477206 100644
> --- a/xen/include/asm-arm/page.h
> +++ b/xen/include/asm-arm/page.h
> @@ -6,12 +6,13 @@
> #include <public/xen.h>
> #include <asm/processor.h>
>
> +#ifdef CONFIG_ARM_64
> +#define PADDR_BITS 48
> +#else
> #define PADDR_BITS 40
> +#endif
> #define PADDR_MASK ((1ULL << PADDR_BITS)-1)
>
> -#define VADDR_BITS 32
> -#define VADDR_MASK (~0UL)
> -
Why did you remove VADDR_{BITS,MASK}?
> +#ifdef CONFIG_ARM_64
> +/* 48 bit VA to 48 bit PA */
[..]
> +/* 40 bit VA to 40 bit PA */
[..]
> +/* 40 bit VA to 32 bit PA */
Did you mean IPA instead of VA?
Also, on ARM, the PA is encoded with 40 bits.
[..]
>
> - unsigned long pad1:24;
> + //unsigned long pad1:24;
This line should be removed rather than comment.
> + unsigned long pad1:16;
> } lpae_walk_t;
>
> typedef union {
> diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
> index 06e638f..1355d81 100644
> --- a/xen/include/asm-arm/processor.h
> +++ b/xen/include/asm-arm/processor.h
> @@ -173,9 +173,21 @@ struct cpuinfo_arm {
> uint64_t bits[2];
> } aux64;
>
> - struct {
> + union {
> uint64_t bits[2];
> + struct {
> + unsigned long pa_range:4;
> + unsigned long asid_bits:4;
> + unsigned long bigend:4;
> + unsigned long secure_ns:4;
> + unsigned long bigend_el0:4;
> + unsigned long tgranule_16K:4;
> + unsigned long tgranule_64K:4;
> + unsigned long tgranule_4K:4;
> + unsigned long __res0:32;
> + };
> } mm64;
> +
Spurious line.
>
> struct {
> uint64_t bits[2];
>
Regards,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |