[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 07/15] xen/arm: compile and initialize vmap
Rename EARLY_VMAP_VIRT_END and EARLY_VMAP_VIRT_START to VMAP_VIRT_END and VMAP_VIRT_START. Defining VMAP_VIRT_START triggers the compilation of common/vmap.c. Define PAGE_HYPERVISOR and MAP_SMALL_PAGES (unused on ARM, because we only support 4K pages so as a matter of fact it is always set). Implement map_pages_to_xen and destroy_xen_mappings. Call vm_init from start_xen. Changes in v5: - use alloc_xenheap_page; - use pfn_to_paddr. Changes in v4: - remove flush_tlb_local() from create_xen_entries; - return error if a mapping is already present. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> --- xen/arch/arm/mm.c | 100 ++++++++++++++++++++++++++++++++++++++++- xen/arch/arm/setup.c | 3 + xen/include/asm-arm/config.h | 4 +- xen/include/asm-arm/page.h | 3 + xen/include/asm-x86/page.h | 8 --- xen/include/xen/mm.h | 7 +++ 6 files changed, 112 insertions(+), 13 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 5c2fadc..e40798f 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -33,6 +33,7 @@ #include <xen/err.h> #include <asm/page.h> #include <asm/current.h> +#include <asm/flushtlb.h> #include <public/memory.h> #include <xen/sched.h> #include <xsm/xsm.h> @@ -558,9 +559,9 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) /* Map the physical memory range start - start + len into virtual * memory and return the virtual address of the mapping. * start has to be 2MB aligned. - * len has to be < EARLY_VMAP_VIRT_END - EARLY_VMAP_VIRT_START. + * len has to be < VMAP_VIRT_END - VMAP_VIRT_START. */ -static __initdata unsigned long early_vmap_start = EARLY_VMAP_VIRT_END; +static __initdata unsigned long early_vmap_start = VMAP_VIRT_END; void* __init early_ioremap(paddr_t start, size_t len, unsigned attributes) { paddr_t end = start + len; @@ -573,7 +574,7 @@ void* __init early_ioremap(paddr_t start, size_t len, unsigned attributes) ASSERT(!(early_vmap_start & (~SECOND_MASK))); /* The range we need to map is too big */ - if ( early_vmap_start >= EARLY_VMAP_VIRT_START ) + if ( early_vmap_start >= VMAP_VIRT_START ) return NULL; map_start = early_vmap_start; @@ -596,6 +597,99 @@ void *__init arch_vmap_virt_end(void) return (void *)early_vmap_start; } +static int create_xen_table(lpae_t *entry) +{ + void *p; + lpae_t pte; + + p = alloc_xenheap_page(); + if ( p == NULL ) + return -ENOMEM; + clear_page(p); + pte = mfn_to_xen_entry(virt_to_mfn(p)); + pte.pt.table = 1; + write_pte(entry, pte); + return 0; +} + +enum xenmap_operation { + INSERT, + REMOVE +}; + +static int create_xen_entries(enum xenmap_operation op, + unsigned long virt, + unsigned long mfn, + unsigned long nr_mfns) +{ + int rc; + unsigned long addr = virt, addr_end = addr + nr_mfns * PAGE_SIZE; + lpae_t pte; + lpae_t *third = NULL; + + for(; addr < addr_end; addr += PAGE_SIZE, mfn++) + { + if ( !xen_second[second_linear_offset(addr)].pt.valid || + !xen_second[second_linear_offset(addr)].pt.table ) + { + rc = create_xen_table(&xen_second[second_linear_offset(addr)]); + if ( rc < 0 ) { + printk("create_xen_entries: L2 failed\n"); + goto out; + } + } + + BUG_ON(!xen_second[second_linear_offset(addr)].pt.valid); + + third = __va(pfn_to_paddr(xen_second[second_linear_offset(addr)].pt.base)); + + switch ( op ) { + case INSERT: + if ( third[third_table_offset(addr)].pt.valid ) + { + printk("create_xen_entries: trying to replace an existing mapping addr=%lx mfn=%lx\n", + addr, mfn); + return -EINVAL; + } + pte = mfn_to_xen_entry(mfn); + pte.pt.table = 1; + write_pte(&third[third_table_offset(addr)], pte); + break; + case REMOVE: + if ( !third[third_table_offset(addr)].pt.valid ) + { + printk("create_xen_entries: trying to remove a non-existing mapping addr=%lx\n", + addr); + return -EINVAL; + } + pte.bits = 0; + write_pte(&third[third_table_offset(addr)], pte); + break; + default: + BUG(); + } + } + flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns); + + rc = 0; + +out: + return rc; +} + +int map_pages_to_xen(unsigned long virt, + unsigned long mfn, + unsigned long nr_mfns, + unsigned int flags) +{ + ASSERT(flags == PAGE_HYPERVISOR); + return create_xen_entries(INSERT, virt, mfn, nr_mfns); +} +void destroy_xen_mappings(unsigned long v, unsigned long e) +{ + create_xen_entries(REMOVE, v, 0, (e - v) >> PAGE_SHIFT); +} + enum mg { mg_clear, mg_ro, mg_rw, mg_rx }; static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg) { diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index cfe3d94..59646d6 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -34,6 +34,7 @@ #include <xen/keyhandler.h> #include <xen/cpu.h> #include <xen/pfn.h> +#include <xen/vmap.h> #include <asm/page.h> #include <asm/current.h> #include <asm/setup.h> @@ -480,6 +481,8 @@ void __init start_xen(unsigned long boot_phys_offset, console_init_postirq(); + vm_init(); + do_presmp_initcalls(); for_each_present_cpu ( i ) diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h index e49aac1..98a3a43 100644 --- a/xen/include/asm-arm/config.h +++ b/xen/include/asm-arm/config.h @@ -95,12 +95,12 @@ #define FIXMAP_ADDR(n) (mk_unsigned_long(0x00400000) + (n) * PAGE_SIZE) #define BOOT_MISC_VIRT_START mk_unsigned_long(0x00600000) #define FRAMETABLE_VIRT_START mk_unsigned_long(0x02000000) -#define EARLY_VMAP_VIRT_START mk_unsigned_long(0x10000000) +#define VMAP_VIRT_START mk_unsigned_long(0x10000000) #define XENHEAP_VIRT_START mk_unsigned_long(0x40000000) #define DOMHEAP_VIRT_START mk_unsigned_long(0x80000000) #define DOMHEAP_VIRT_END mk_unsigned_long(0xffffffff) -#define EARLY_VMAP_VIRT_END XENHEAP_VIRT_START +#define VMAP_VIRT_END XENHEAP_VIRT_START #define HYPERVISOR_VIRT_START XEN_VIRT_START #define DOMHEAP_ENTRIES 1024 /* 1024 2MB mapping slots */ diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index a6a312f..fd6946e 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -58,6 +58,9 @@ #define DEV_WC BUFFERABLE #define DEV_CACHED WRITEBACK +#define PAGE_HYPERVISOR (MATTR_MEM) +#define MAP_SMALL_PAGES PAGE_HYPERVISOR + /* * Stage 2 Memory Type. * diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h index b2f3859..e53e1e5 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -338,14 +338,6 @@ l3_pgentry_t *virt_to_xen_l3e(unsigned long v); extern void set_pdx_range(unsigned long smfn, unsigned long emfn); -/* Map machine page range in Xen virtual address space. */ -int map_pages_to_xen( - unsigned long virt, - unsigned long mfn, - unsigned long nr_mfns, - unsigned int flags); -void destroy_xen_mappings(unsigned long v, unsigned long e); - /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */ static inline uint32_t pte_flags_to_cacheattr(uint32_t flags) { diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 28512fb..efc45c7 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -48,6 +48,13 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags); void free_xenheap_pages(void *v, unsigned int order); #define alloc_xenheap_page() (alloc_xenheap_pages(0,0)) #define free_xenheap_page(v) (free_xenheap_pages(v,0)) +/* Map machine page range in Xen virtual address space. */ +int map_pages_to_xen( + unsigned long virt, + unsigned long mfn, + unsigned long nr_mfns, + unsigned int flags); +void destroy_xen_mappings(unsigned long v, unsigned long e); /* Claim handling */ unsigned long domain_adjust_tot_pages(struct domain *d, long pages); -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |