diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index b8a64c3..d0a5e4e 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -598,6 +598,11 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) /* For Guest vMCE MSRs virtualization */ vmce_init_msr(d); + + d->arch.nested_p2m = xzalloc(struct nested_p2m_per_domain); + rc = -ENOMEM; + if ( d->arch.nested_p2m == NULL ) + goto fail; } if ( is_hvm_domain(d) ) diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 9f6b990..03d31b9 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -607,7 +607,7 @@ int hap_enable(struct domain *d, u32 mode) } for (i = 0; i < MAX_NESTEDP2M; i++) { - rv = p2m_alloc_table(d->arch.nested_p2m[i]); + rv = p2m_alloc_table(d->arch.nested_p2m->nested_p2m[i]); if ( rv != 0 ) goto out; } @@ -626,7 +626,7 @@ void hap_final_teardown(struct domain *d) /* Destroy nestedp2m's first */ for (i = 0; i < MAX_NESTEDP2M; i++) { - p2m_teardown(d->arch.nested_p2m[i]); + p2m_teardown(d->arch.nested_p2m->nested_p2m[i]); } if ( d->arch.paging.hap.total_pages != 0 ) diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h index 738b27c..f545984 100644 --- a/xen/arch/x86/mm/mm-locks.h +++ b/xen/arch/x86/mm/mm-locks.h @@ -160,8 +160,8 @@ declare_mm_lock(shr) * (i.e. assigning a p2m table to be the shadow of that cr3 */ declare_mm_lock(nestedp2m) -#define nestedp2m_lock(d) mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock) -#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock) +#define nestedp2m_lock(d) mm_lock(nestedp2m, &(d)->arch.nested_p2m->nested_p2m_lock) +#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m->nested_p2m_lock) /* P2M lock (per-p2m-table) * diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 51a0096..4eb1fd9 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -96,9 +96,9 @@ p2m_init_nestedp2m(struct domain *d) uint8_t i; struct p2m_domain *p2m; - mm_lock_init(&d->arch.nested_p2m_lock); + mm_lock_init(&d->arch.nested_p2m->nested_p2m_lock); for (i = 0; i < MAX_NESTEDP2M; i++) { - d->arch.nested_p2m[i] = p2m = xzalloc(struct p2m_domain); + d->arch.nested_p2m->nested_p2m[i] = p2m = xzalloc(struct p2m_domain); if (p2m == NULL) return -ENOMEM; if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) ) @@ -376,11 +376,10 @@ static void p2m_teardown_nestedp2m(struct domain *d) uint8_t i; for (i = 0; i < MAX_NESTEDP2M; i++) { - if ( !d->arch.nested_p2m[i] ) + if ( !d->arch.nested_p2m->nested_p2m[i] ) continue; - free_cpumask_var(d->arch.nested_p2m[i]->dirty_cpumask); - xfree(d->arch.nested_p2m[i]); - d->arch.nested_p2m[i] = NULL; + free_cpumask_var(d->arch.nested_p2m->nested_p2m[i]->dirty_cpumask); + d->arch.nested_p2m->nested_p2m[i] = NULL; } } @@ -1328,7 +1327,7 @@ p2m_flush_nestedp2m(struct domain *d) { int i; for ( i = 0; i < MAX_NESTEDP2M; i++ ) - p2m_flush_table(d->arch.nested_p2m[i]); + p2m_flush_table(d->arch.nested_p2m->nested_p2m[i]); } struct p2m_domain * diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 00bbaeb..a543f6a 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -238,6 +238,12 @@ struct pv_domain unsigned int nr_e820; }; +struct nested_p2m_per_domain +{ + struct p2m_domain *nested_p2m[MAX_NESTEDP2M]; + mm_lock_t nested_p2m_lock; +}; + struct arch_domain { #ifdef CONFIG_X86_64 @@ -274,8 +280,7 @@ struct arch_domain int page_alloc_unlock_level; /* nestedhvm: translate l2 guest physical to host physical */ - struct p2m_domain *nested_p2m[MAX_NESTEDP2M]; - mm_lock_t nested_p2m_lock; + struct nested_p2m_per_domain *nested_p2m; /* NB. protected by d->event_lock and by irq_desc[irq].lock */ struct radix_tree_root irq_pirq;