[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 07/13] xen/arm: compile and initialize vmap



On Wed, 2013-04-24 at 20:07 +0100, Stefano Stabellini wrote:
> Rename EARLY_VMAP_VIRT_END and EARLY_VMAP_VIRT_START to
> VMAP_VIRT_END and VMAP_VIRT_START.
> 
> Defining VMAP_VIRT_START triggers the compilation of common/vmap.c.
> 
> Define PAGE_HYPERVISOR and MAP_SMALL_PAGES (unused on ARM).

So our vmap is 2MB mappings only? I suppose that's ok, at least for now.

> Implement map_pages_to_xen and destroy_xen_mappings.

This involves moving the prototypes from x86 to generic, so needs Jan +
Keir, CCd.

> 
> Call vm_init from start_xen.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

> +static int create_xen_table(lpae_t *entry)
> +{
> +    void *p;
> +    lpae_t pte;
> +
> +    p = alloc_xenheap_pages(0, 0);
> +    if ( p == NULL )
> +        return -ENOMEM;
> +    clear_page(p);
> +    pte = mfn_to_xen_entry(virt_to_mfn(p));
> +    pte.pt.table = 1;
> +    write_pte(entry, pte);
> +    return 0;
> +}
> +
> +enum xenmap_operation {
> +    INSERT,
> +    REMOVE
> +};
> +
> +static int create_xen_entries(enum xenmap_operation op,
> +                              unsigned long virt,
> +                              unsigned long mfn,
> +                              unsigned long nr_mfns)

Shame this can't be combined with create_p2m_entries, but that uses
domain pages and this uses xenheap pages.

> +{
> +    int rc;
> +    unsigned long addr = virt, addr_end = addr + nr_mfns * PAGE_SIZE;
> +    lpae_t pte;
> +    lpae_t *third = NULL;
> +
> +    for(; addr < addr_end; addr += PAGE_SIZE, mfn++)
> +    {
> +        if ( !xen_second[second_linear_offset(addr)].pt.valid ||
> +             !xen_second[second_linear_offset(addr)].pt.table )
> +        {
> +            rc = create_xen_table(&xen_second[second_linear_offset(addr)]);
> +            if ( rc < 0 ) {
> +                printk("create_xen_entries: L2 failed\n");
> +                goto out;
> +            }
> +        }
> +
> +        BUG_ON(!xen_second[second_linear_offset(addr)].pt.valid);
> +
> +        third = __va((paddr_t)xen_second[second_linear_offset(addr)].pt.base
> +                << PAGE_SHIFT);
> +        if ( third[third_table_offset(addr)].pt.valid )
> +            flush_tlb_local();

Why this flush? (I notice create_p2m_mapping does the same but with
_all_local())

Isn't it a bug for the third to be already mapped? that suggests
something is overwriting the mapping, does vmap do that?

> +
> +        switch ( op ) {
> +            case INSERT:
> +                pte = mfn_to_xen_entry(mfn);
> +                pte.pt.table = 1;
> +                write_pte(&third[third_table_offset(addr)], pte);
> +                break;
> +            case REMOVE:
> +                memset(&pte, 0x00, sizeof(pte));

            AKA:  pte.bits = 0;

> +                write_pte(&third[third_table_offset(addr)], pte);
> +                break;
> +            default:
> +                printk("create_xen_entries: invalid op\n");

ASSERT? This really can never happen.

> +                break;
> +        }
> +    }
> +    flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
> +
> +    rc = 0;
> +
> +out:
> +    return rc;
> +}
> +
[...]

diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> index 14e63eb..5287a92 100644
> --- a/xen/include/asm-arm/page.h
> +++ b/xen/include/asm-arm/page.h
> @@ -58,6 +58,9 @@
>  #define DEV_WC        BUFFERABLE
>  #define DEV_CACHED    WRITEBACK
>  
> +#define PAGE_HYPERVISOR         (MATTR_MEM)
> +#define MAP_SMALL_PAGES         PAGE_HYPERVISOR
> +
>  /*
>   * Stage 2 Memory Type.
>   *
> diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
> index b2f3859..e53e1e5 100644
> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -338,14 +338,6 @@ l3_pgentry_t *virt_to_xen_l3e(unsigned long v);
>  
>  extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
>  
> -/* Map machine page range in Xen virtual address space. */
> -int map_pages_to_xen(
> -    unsigned long virt,
> -    unsigned long mfn,
> -    unsigned long nr_mfns,
> -    unsigned int flags);
> -void destroy_xen_mappings(unsigned long v, unsigned long e);
> -
>  /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
>  static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
>  {
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index 28512fb..efc45c7 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -48,6 +48,13 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int 
> memflags);
>  void free_xenheap_pages(void *v, unsigned int order);
>  #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
>  #define free_xenheap_page(v) (free_xenheap_pages(v,0))
> +/* Map machine page range in Xen virtual address space. */
> +int map_pages_to_xen(
> +    unsigned long virt,
> +    unsigned long mfn,
> +    unsigned long nr_mfns,
> +    unsigned int flags);
> +void destroy_xen_mappings(unsigned long v, unsigned long e);
>  
>  /* Claim handling */
>  unsigned long domain_adjust_tot_pages(struct domain *d, long pages);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.