[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 08/11] xen: arch-specific hooks for domain_soft_reset()
x86-specific hook cleans up the pirq-emuirq mappings, destroys all ioreq servers and and replaces the shared_info frame with an empty page to support subsequent XENMAPSPACE_shared_info call. ARM-specific hook is -ENOSYS for now. Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> --- Changes since v8: - Comments fixes [Konrad Rzeszutek Wilk] - pirq 0 is a valid pirq [Konrad Rzeszutek Wilk] - s/0/PAGE_ORDER_4K/ for guest_physmap_{add,remove}_page [Konrad Rzeszutek Wilk] - Free new page in case of guest_physmap_add_page() failure [Konrad Rzeszutek Wilk] - Make ARM-specific hook return -ENOSYS [Julien Grall] Changes since 'PATCH RFC' of the 'reset everything' approach to PVHVM guest kexec: - Coding style, check get_gfn_query() return value, various minor fixes [Jan Beulich] - Do not unpause VCPUs on arch hook failure [Jan Beulich] --- xen/arch/arm/domain.c | 5 +++ xen/arch/x86/domain.c | 84 +++++++++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/hvm.c | 2 +- xen/common/domain.c | 4 +++ xen/include/asm-x86/hvm/hvm.h | 2 ++ xen/include/xen/domain.h | 2 ++ 6 files changed, 98 insertions(+), 1 deletion(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index b2bfc7d..5bdc2e9 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -655,6 +655,11 @@ void arch_domain_unpause(struct domain *d) { } +int arch_domain_soft_reset(struct domain *d) +{ + return -ENOSYS; +} + static int is_guest_pv32_psr(uint32_t psr) { switch (psr & PSR_MODE_MASK) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 34ecd7c..1829535 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -704,6 +704,90 @@ void arch_domain_unpause(struct domain *d) viridian_time_ref_count_thaw(d); } +int arch_domain_soft_reset(struct domain *d) +{ + struct page_info *page = virt_to_page(d->shared_info), *new_page; + int ret = 0; + struct domain *owner; + unsigned long mfn, mfn_new, gfn; + p2m_type_t p2mt; + unsigned int i; + + /* Soft reset is supported for HVM domains only. */ + if ( !is_hvm_domain(d) ) + return -EINVAL; + + hvm_destroy_all_ioreq_servers(d); + + spin_lock(&d->event_lock); + for ( i = 0; i < d->nr_pirqs ; i++ ) + { + if ( domain_pirq_to_emuirq(d, i) != IRQ_UNBOUND ) + { + ret = unmap_domain_pirq_emuirq(d, i); + if ( ret ) + break; + } + } + spin_unlock(&d->event_lock); + + if ( ret ) + return ret; + + /* + * shared_info page needs to be replaced with a new page, otherwise we + * will get a hole if the domain does XENMAPSPACE_shared_info. + */ + + owner = page_get_owner_and_reference(page); + /* If shared_info page wasn't used, we do not need to replace it. */ + if ( owner != d ) + goto exit_put_page; + + mfn = page_to_mfn(page); + if ( !mfn_valid(mfn) ) + { + printk(XENLOG_G_ERR "Dom%d's shared_info page points to invalid MFN\n", + d->domain_id); + ret = -EINVAL; + goto exit_put_page; + } + + gfn = mfn_to_gmfn(d, mfn); + if ( mfn_x(get_gfn_query(d, gfn, &p2mt)) == INVALID_MFN ) + { + printk(XENLOG_G_ERR "Failed to get Dom%d's shared_info GFN (%lx)\n", + d->domain_id, gfn); + ret = -EINVAL; + goto exit_put_page; + } + + new_page = alloc_domheap_page(d, 0); + if ( !new_page ) + { + printk(XENLOG_G_ERR "Failed to alloc a page to replace" + " Dom%d's shared_info frame %lx\n", d->domain_id, gfn); + ret = -ENOMEM; + goto exit_put_gfn; + } + mfn_new = page_to_mfn(new_page); + guest_physmap_remove_page(d, gfn, mfn, PAGE_ORDER_4K); + + ret = guest_physmap_add_page(d, gfn, mfn_new, PAGE_ORDER_4K); + if ( ret ) + { + printk(XENLOG_G_ERR "Failed to add a page to replace" + " Dom%d's shared_info frame %lx\n", d->domain_id, gfn); + free_domheap_page(new_page); + } + exit_put_gfn: + put_gfn(d, gfn); + exit_put_page: + put_page(page); + + return ret; +} + /* * These are the masks of CR4 bits (subject to hardware availability) which a * PV guest may not legitimiately attempt to modify. diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 545aa91..08a7a10 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1370,7 +1370,7 @@ static void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v) spin_unlock(&d->arch.hvm_domain.ioreq_server.lock); } -static void hvm_destroy_all_ioreq_servers(struct domain *d) +void hvm_destroy_all_ioreq_servers(struct domain *d) { struct hvm_ioreq_server *s, *next; diff --git a/xen/common/domain.c b/xen/common/domain.c index 4f805d5..8401b42 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -1035,6 +1035,10 @@ int domain_soft_reset(struct domain *d) for_each_vcpu ( d, v ) unmap_vcpu_info(v); + rc = arch_domain_soft_reset(d); + if (rc) + return rc; + domain_resume(d); return 0; diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 82f1b32..450ca0c 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -229,6 +229,8 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *p, bool_t buffered); unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered); +void hvm_destroy_all_ioreq_servers(struct domain *d); + void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat); int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat); diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index 848db8a..a469fe0 100644 --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -65,6 +65,8 @@ void arch_domain_shutdown(struct domain *d); void arch_domain_pause(struct domain *d); void arch_domain_unpause(struct domain *d); +int arch_domain_soft_reset(struct domain *d); + int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u); void arch_get_info_guest(struct vcpu *, vcpu_guest_context_u); -- 2.4.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |