[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 6/9] xen: move VCPUOP_register_vcpu_info to common code
Move the implementation of VCPUOP_register_vcpu_info from x86 specific to commmon code. Move vcpu_info_mfn from an arch specific vcpu sub-field to the common vcpu struct. Move the initialization of vcpu_info_mfn to common code. Move unmap_vcpu_info and the call to unmap_vcpu_info at domain destruction time to common code. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- xen/arch/x86/domain.c | 113 ------------------------------------------ xen/common/domain.c | 110 ++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/domain.h | 3 - xen/include/xen/domain.h | 3 + xen/include/xen/sched.h | 3 + 5 files changed, 116 insertions(+), 116 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 8d30d08..ee22869 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -393,8 +393,6 @@ int vcpu_initialise(struct vcpu *v) goto done; } - v->arch.pv_vcpu.vcpu_info_mfn = INVALID_MFN; - spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock); if ( !is_idle_domain(d) ) @@ -953,99 +951,6 @@ void arch_vcpu_reset(struct vcpu *v) } } -/* - * Unmap the vcpu info page if the guest decided to place it somewhere - * else. This is only used from arch_domain_destroy, so there's no - * need to do anything clever. - */ -static void -unmap_vcpu_info(struct vcpu *v) -{ - unsigned long mfn; - - if ( v->arch.pv_vcpu.vcpu_info_mfn == INVALID_MFN ) - return; - - mfn = v->arch.pv_vcpu.vcpu_info_mfn; - unmap_domain_page_global(v->vcpu_info); - - v->vcpu_info = &dummy_vcpu_info; - v->arch.pv_vcpu.vcpu_info_mfn = INVALID_MFN; - - put_page_and_type(mfn_to_page(mfn)); -} - -/* - * Map a guest page in and point the vcpu_info pointer at it. This - * makes sure that the vcpu_info is always pointing at a valid piece - * of memory, and it sets a pending event to make sure that a pending - * event doesn't get missed. - */ -static int -map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset) -{ - struct domain *d = v->domain; - void *mapping; - vcpu_info_t *new_info; - struct page_info *page; - int i; - - if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) ) - return -EINVAL; - - if ( v->arch.pv_vcpu.vcpu_info_mfn != INVALID_MFN ) - return -EINVAL; - - /* Run this command on yourself or on other offline VCPUS. */ - if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) ) - return -EINVAL; - - page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); - if ( !page ) - return -EINVAL; - - if ( !get_page_type(page, PGT_writable_page) ) - { - put_page(page); - return -EINVAL; - } - - mapping = __map_domain_page_global(page); - if ( mapping == NULL ) - { - put_page_and_type(page); - return -ENOMEM; - } - - new_info = (vcpu_info_t *)(mapping + offset); - - if ( v->vcpu_info == &dummy_vcpu_info ) - { - memset(new_info, 0, sizeof(*new_info)); - __vcpu_info(v, new_info, evtchn_upcall_mask) = 1; - } - else - { - memcpy(new_info, v->vcpu_info, sizeof(*new_info)); - } - - v->vcpu_info = new_info; - v->arch.pv_vcpu.vcpu_info_mfn = page_to_mfn(page); - - /* Set new vcpu_info pointer /before/ setting pending flags. */ - wmb(); - - /* - * Mark everything as being pending just to make sure nothing gets - * lost. The domain will get a spurious event, but it can cope. - */ - vcpu_info(v, evtchn_upcall_pending) = 1; - for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ ) - set_bit(i, &vcpu_info(v, evtchn_pending_sel)); - - return 0; -} - long arch_do_vcpu_op( int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg) @@ -1082,22 +987,6 @@ arch_do_vcpu_op( break; } - case VCPUOP_register_vcpu_info: - { - struct domain *d = v->domain; - struct vcpu_register_vcpu_info info; - - rc = -EFAULT; - if ( copy_from_guest(&info, arg, 1) ) - break; - - domain_lock(d); - rc = map_vcpu_info(v, info.mfn, info.offset); - domain_unlock(d); - - break; - } - /* * XXX Disable for 4.0.0: __update_vcpu_system_time() writes to the given * virtual address even when running in another domain's address space. @@ -2014,8 +1903,6 @@ int domain_relinquish_resources(struct domain *d) * mappings. */ destroy_gdt(v); - - unmap_vcpu_info(v); } if ( d->arch.pv_domain.pirq_eoi_map != NULL ) diff --git a/xen/common/domain.c b/xen/common/domain.c index 64ee29d..7aad36a 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -141,6 +141,7 @@ struct vcpu *alloc_vcpu( v->vcpu_info = ((vcpu_id < XEN_LEGACY_MAX_VCPUS) ? (vcpu_info_t *)&shared_info(d, vcpu_info[vcpu_id]) : &dummy_vcpu_info); + v->vcpu_info_mfn = INVALID_MFN; init_waitqueue_vcpu(v); } @@ -493,6 +494,7 @@ int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d) int domain_kill(struct domain *d) { int rc = 0; + struct vcpu *v; if ( d == current->domain ) return -EINVAL; @@ -517,6 +519,8 @@ int domain_kill(struct domain *d) BUG_ON(rc != -EAGAIN); break; } + for_each_vcpu ( d, v ) + unmap_vcpu_info(v); d->is_dying = DOMDYING_dead; /* Mem event cleanup has to go here because the rings * have to be put before we call put_domain. */ @@ -842,6 +846,96 @@ void vcpu_reset(struct vcpu *v) vcpu_unpause(v); } +/* + * Map a guest page in and point the vcpu_info pointer at it. This + * makes sure that the vcpu_info is always pointing at a valid piece + * of memory, and it sets a pending event to make sure that a pending + * event doesn't get missed. + */ +int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset) +{ + struct domain *d = v->domain; + void *mapping; + vcpu_info_t *new_info; + struct page_info *page; + int i; + + if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) ) + return -EINVAL; + + if ( v->vcpu_info_mfn != INVALID_MFN ) + return -EINVAL; + + /* Run this command on yourself or on other offline VCPUS. */ + if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) ) + return -EINVAL; + + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); + if ( !page ) + return -EINVAL; + + if ( !get_page_type(page, PGT_writable_page) ) + { + put_page(page); + return -EINVAL; + } + + mapping = __map_domain_page_global(page); + if ( mapping == NULL ) + { + put_page_and_type(page); + return -ENOMEM; + } + + new_info = (vcpu_info_t *)(mapping + offset); + + if ( v->vcpu_info == &dummy_vcpu_info ) + { + memset(new_info, 0, sizeof(*new_info)); + __vcpu_info(v, new_info, evtchn_upcall_mask) = 1; + } + else + { + memcpy(new_info, v->vcpu_info, sizeof(*new_info)); + } + + v->vcpu_info = new_info; + v->vcpu_info_mfn = page_to_mfn(page); + + /* Set new vcpu_info pointer /before/ setting pending flags. */ + wmb(); + + /* + * Mark everything as being pending just to make sure nothing gets + * lost. The domain will get a spurious event, but it can cope. + */ + vcpu_info(v, evtchn_upcall_pending) = 1; + for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ ) + set_bit(i, &vcpu_info(v, evtchn_pending_sel)); + + return 0; +} + +/* + * Unmap the vcpu info page if the guest decided to place it somewhere + * else. This is only used from arch_domain_destroy, so there's no + * need to do anything clever. + */ +void unmap_vcpu_info(struct vcpu *v) +{ + unsigned long mfn; + + if ( v->vcpu_info_mfn == INVALID_MFN ) + return; + + mfn = v->vcpu_info_mfn; + unmap_domain_page_global(v->vcpu_info); + + v->vcpu_info = &dummy_vcpu_info; + v->vcpu_info_mfn = INVALID_MFN; + + put_page_and_type(mfn_to_page(mfn)); +} long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) { @@ -961,6 +1055,22 @@ long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) break; + case VCPUOP_register_vcpu_info: + { + struct domain *d = v->domain; + struct vcpu_register_vcpu_info info; + + rc = -EFAULT; + if ( copy_from_guest(&info, arg, 1) ) + break; + + domain_lock(d); + rc = map_vcpu_info(v, info.mfn, info.offset); + domain_unlock(d); + + break; + } + #ifdef VCPU_TRAP_NMI case VCPUOP_send_nmi: if ( !guest_handle_is_null(arg) ) diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 6f9744a..e7ed7c3 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -374,9 +374,6 @@ struct pv_vcpu /* Current LDT details. */ unsigned long shadow_ldt_mapcnt; spinlock_t shadow_ldt_lock; - - /* Guest-specified relocation of vcpu_info. */ - unsigned long vcpu_info_mfn; }; struct arch_vcpu diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index d4ac50f..d0263a9 100644 --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -52,6 +52,9 @@ void free_pirq_struct(void *); int vcpu_initialise(struct vcpu *v); void vcpu_destroy(struct vcpu *v); +int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset); +void unmap_vcpu_info(struct vcpu *v); + int arch_domain_create(struct domain *d, unsigned int domcr_flags); void arch_domain_destroy(struct domain *d); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index cabaf27..504c3d6 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -187,6 +187,9 @@ struct vcpu struct waitqueue_vcpu *waitqueue_vcpu; + /* Guest-specified relocation of vcpu_info. */ + unsigned long vcpu_info_mfn; + struct arch_vcpu arch; }; -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |