[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 10/18] x86/mm: switch {create,destroy}_perdomain_mapping() domain parameter to vCPU
In preparation for the per-domain area being per-vCPU. This requires moving some of the {create,destroy}_perdomain_mapping() calls to the domain initialization and tear down paths into vCPU initialization and tear down. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/domain.c | 12 ++++++++---- xen/arch/x86/domain_page.c | 13 +++++-------- xen/arch/x86/hvm/hvm.c | 5 ----- xen/arch/x86/include/asm/domain.h | 2 +- xen/arch/x86/include/asm/mm.h | 4 ++-- xen/arch/x86/mm.c | 6 ++++-- xen/arch/x86/pv/domain.c | 2 +- xen/arch/x86/x86_64/mm.c | 2 +- 8 files changed, 22 insertions(+), 24 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 0481164f3727..6e1f622f7385 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -559,6 +559,10 @@ int arch_vcpu_create(struct vcpu *v) v->arch.flags = TF_kernel_mode; + rc = create_perdomain_mapping(v, PERDOMAIN_VIRT_START, 0, false); + if ( rc ) + return rc; + rc = mapcache_vcpu_init(v); if ( rc ) return rc; @@ -607,6 +611,7 @@ int arch_vcpu_create(struct vcpu *v) return rc; fail: + free_perdomain_mappings(v); paging_vcpu_teardown(v); vcpu_destroy_fpu(v); xfree(v->arch.msrs); @@ -629,6 +634,8 @@ void arch_vcpu_destroy(struct vcpu *v) hvm_vcpu_destroy(v); else pv_vcpu_destroy(v); + + free_perdomain_mappings(v); } int arch_sanitise_domain_config(struct xen_domctl_createdomain *config) @@ -870,8 +877,7 @@ int arch_domain_create(struct domain *d, } else if ( is_pv_domain(d) ) { - if ( (rc = mapcache_domain_init(d)) != 0 ) - goto fail; + mapcache_domain_init(d); if ( (rc = pv_domain_initialise(d)) != 0 ) goto fail; @@ -909,7 +915,6 @@ int arch_domain_create(struct domain *d, XFREE(d->arch.cpu_policy); if ( paging_initialised ) paging_final_teardown(d); - free_perdomain_mappings(d); return rc; } @@ -935,7 +940,6 @@ void arch_domain_destroy(struct domain *d) if ( is_pv_domain(d) ) pv_domain_destroy(d); - free_perdomain_mappings(d); free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c index ad6d86be6918..1372be20224e 100644 --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -231,7 +231,7 @@ void unmap_domain_page(const void *ptr) local_irq_restore(flags); } -int mapcache_domain_init(struct domain *d) +void mapcache_domain_init(struct domain *d) { struct mapcache_domain *dcache = &d->arch.pv.mapcache; unsigned int bitmap_pages; @@ -240,7 +240,7 @@ int mapcache_domain_init(struct domain *d) #ifdef NDEBUG if ( !mem_hotplug && max_page <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) ) - return 0; + return; #endif BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 + @@ -252,9 +252,6 @@ int mapcache_domain_init(struct domain *d) (bitmap_pages + 1) * PAGE_SIZE / sizeof(long); spin_lock_init(&dcache->lock); - - return create_perdomain_mapping(d, (unsigned long)dcache->inuse, - 2 * bitmap_pages + 1, false); } int mapcache_vcpu_init(struct vcpu *v) @@ -271,14 +268,14 @@ int mapcache_vcpu_init(struct vcpu *v) if ( ents > dcache->entries ) { /* Populate page tables. */ - int rc = create_perdomain_mapping(d, MAPCACHE_VIRT_START, ents, false); + int rc = create_perdomain_mapping(v, MAPCACHE_VIRT_START, ents, false); /* Populate bit maps. */ if ( !rc ) - rc = create_perdomain_mapping(d, (unsigned long)dcache->inuse, + rc = create_perdomain_mapping(v, (unsigned long)dcache->inuse, nr, true); if ( !rc ) - rc = create_perdomain_mapping(d, (unsigned long)dcache->garbage, + rc = create_perdomain_mapping(v, (unsigned long)dcache->garbage, nr, true); if ( rc ) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e7817144059e..0dc693818349 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -601,10 +601,6 @@ int hvm_domain_initialise(struct domain *d, INIT_LIST_HEAD(&d->arch.hvm.mmcfg_regions); INIT_LIST_HEAD(&d->arch.hvm.msix_tables); - rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, false); - if ( rc ) - goto fail; - hvm_init_cacheattr_region_list(d); rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); @@ -708,7 +704,6 @@ int hvm_domain_initialise(struct domain *d, XFREE(d->arch.hvm.irq); fail0: hvm_destroy_cacheattr_region_list(d); - fail: hvm_domain_relinquish_resources(d); XFREE(d->arch.hvm.io_handler); XFREE(d->arch.hvm.pl_time); diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h index fbe59baa82ec..7c143d2a6c46 100644 --- a/xen/arch/x86/include/asm/domain.h +++ b/xen/arch/x86/include/asm/domain.h @@ -73,7 +73,7 @@ struct mapcache_domain { unsigned long *garbage; }; -int mapcache_domain_init(struct domain *d); +void mapcache_domain_init(struct domain *d); int mapcache_vcpu_init(struct vcpu *v); void mapcache_override_current(struct vcpu *v); diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h index 0c57442c9593..f501e5e115ff 100644 --- a/xen/arch/x86/include/asm/mm.h +++ b/xen/arch/x86/include/asm/mm.h @@ -600,13 +600,13 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg); #define NIL(type) ((type *)-sizeof(type)) #define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr)))) -int create_perdomain_mapping(struct domain *d, unsigned long va, +int create_perdomain_mapping(struct vcpu *v, unsigned long va, unsigned int nr, bool populate); void populate_perdomain_mapping(const struct vcpu *v, unsigned long va, mfn_t *mfn, unsigned long nr); void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va, unsigned int nr); -void free_perdomain_mappings(struct domain *d); +void free_perdomain_mappings(struct vcpu *v); void __iomem *ioremap_wc(paddr_t pa, size_t len); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 45664c56cb8f..c321f5723b04 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -6300,9 +6300,10 @@ static bool perdomain_l1e_needs_freeing(l1_pgentry_t l1e) (_PAGE_PRESENT | _PAGE_AVAIL0); } -int create_perdomain_mapping(struct domain *d, unsigned long va, +int create_perdomain_mapping(struct vcpu *v, unsigned long va, unsigned int nr, bool populate) { + struct domain *d = v->domain; struct page_info *pg; l3_pgentry_t *l3tab; l2_pgentry_t *l2tab; @@ -6560,8 +6561,9 @@ void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va, unmap_domain_page(l3tab); } -void free_perdomain_mappings(struct domain *d) +void free_perdomain_mappings(struct vcpu *v) { + struct domain *d = v->domain; l3_pgentry_t *l3tab; unsigned int i; diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c index ca32e7b5d686..534d2899100f 100644 --- a/xen/arch/x86/pv/domain.c +++ b/xen/arch/x86/pv/domain.c @@ -277,7 +277,7 @@ int switch_compat(struct domain *d) static int pv_create_gdt_ldt_l1tab(struct vcpu *v) { - return create_perdomain_mapping(v->domain, GDT_VIRT_START(v), + return create_perdomain_mapping(v, GDT_VIRT_START(v), 1U << GDT_LDT_VCPU_SHIFT, false); } diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index 55bba7e473ae..3b421d218e0b 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -730,7 +730,7 @@ void __init zap_low_mappings(void) int setup_compat_arg_xlat(struct vcpu *v) { - return create_perdomain_mapping(v->domain, ARG_XLAT_START(v), + return create_perdomain_mapping(v, ARG_XLAT_START(v), PFN_UP(COMPAT_ARG_XLAT_SIZE), true); } -- 2.46.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |