[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 2/9] domain: GADDR based shared guest area registration alternative - teardown
In preparation of the introduction of new vCPU operations allowing to register the respective areas (one of the two is x86-specific) by guest-physical address, add the necessary domain cleanup hooks. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> --- v4: Re-base. v2: Add assertion in unmap_guest_area(). --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1024,7 +1024,10 @@ int arch_domain_soft_reset(struct domain } for_each_vcpu ( d, v ) + { set_xen_guest_handle(v->arch.time_info_guest, NULL); + unmap_guest_area(v, &v->arch.time_guest_area); + } exit_put_gfn: put_gfn(d, gfn_x(gfn)); @@ -2381,6 +2384,8 @@ int domain_relinquish_resources(struct d if ( ret ) return ret; + unmap_guest_area(v, &v->arch.time_guest_area); + vpmu_destroy(v); } --- a/xen/arch/x86/include/asm/domain.h +++ b/xen/arch/x86/include/asm/domain.h @@ -669,6 +669,7 @@ struct arch_vcpu /* A secondary copy of the vcpu time info. */ XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest; + struct guest_area time_guest_area; struct arch_vm_event *vm_event; --- a/xen/arch/x86/pv/shim.c +++ b/xen/arch/x86/pv/shim.c @@ -382,8 +382,10 @@ int pv_shim_shutdown(uint8_t reason) for_each_vcpu ( d, v ) { - /* Unmap guest vcpu_info pages. */ + /* Unmap guest vcpu_info page and runstate/time areas. */ unmap_vcpu_info(v); + unmap_guest_area(v, &v->runstate_guest_area); + unmap_guest_area(v, &v->arch.time_guest_area); /* Zap runstate and time area handles. */ set_xen_guest_handle(runstate_guest(v), NULL); --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -992,7 +992,10 @@ int domain_kill(struct domain *d) if ( cpupool_move_domain(d, cpupool0) ) return -ERESTART; for_each_vcpu ( d, v ) + { unmap_vcpu_info(v); + unmap_guest_area(v, &v->runstate_guest_area); + } d->is_dying = DOMDYING_dead; /* Mem event cleanup has to go here because the rings * have to be put before we call put_domain. */ @@ -1446,6 +1449,7 @@ int domain_soft_reset(struct domain *d, { set_xen_guest_handle(runstate_guest(v), NULL); unmap_vcpu_info(v); + unmap_guest_area(v, &v->runstate_guest_area); } rc = arch_domain_soft_reset(d); @@ -1597,6 +1601,19 @@ void unmap_vcpu_info(struct vcpu *v) put_page_and_type(mfn_to_page(mfn)); } +/* + * This is only intended to be used for domain cleanup (or more generally only + * with at least the respective vCPU, if it's not the current one, reliably + * paused). + */ +void unmap_guest_area(struct vcpu *v, struct guest_area *area) +{ + struct domain *d = v->domain; + + if ( v != current ) + ASSERT(atomic_read(&v->pause_count) | atomic_read(&d->pause_count)); +} + int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg) { struct vcpu_guest_context *ctxt; --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -5,6 +5,12 @@ #include <xen/types.h> #include <public/xen.h> + +struct guest_area { + struct page_info *pg; + void *map; +}; + #include <asm/domain.h> #include <asm/numa.h> @@ -77,6 +83,11 @@ void arch_vcpu_destroy(struct vcpu *v); int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned int offset); void unmap_vcpu_info(struct vcpu *v); +int map_guest_area(struct vcpu *v, paddr_t gaddr, unsigned int size, + struct guest_area *area, + void (*populate)(void *dst, struct vcpu *v)); +void unmap_guest_area(struct vcpu *v, struct guest_area *area); + struct xen_domctl_createdomain; int arch_domain_create(struct domain *d, struct xen_domctl_createdomain *config, --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -203,6 +203,7 @@ struct vcpu XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat; } runstate_guest; /* guest address */ #endif + struct guest_area runstate_guest_area; unsigned int new_state; /* Has the FPU been initialised? */
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |