[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Move construct_vmcs to arch_vmx_do_launch.
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID ddb803b6308baa434b5eb25bb72d3bfc30c82581 # Parent 37bea65ed6ca2faff4bf5f1b281b6cdc9df3522d Move construct_vmcs to arch_vmx_do_launch. Currently, since VMX guest is brought up by xen0, construct_vmcs is executed in xen0 hypervisor context, this is not reasonable. This=20 patch moves construct_vmcs to arch_vmx_do_launch. Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx> Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx> Signed-off-by: Nakajima Jun <nakajima.jun@xxxxxxxxx> diff -r 37bea65ed6ca -r ddb803b6308b xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri Oct 7 09:57:13 2005 +++ b/xen/arch/x86/domain.c Fri Oct 7 10:00:56 2005 @@ -323,47 +323,16 @@ #ifdef CONFIG_VMX static int vmx_switch_on; -static int vmx_final_setup_guest( - struct vcpu *v, struct vcpu_guest_context *ctxt) -{ - int error; - struct cpu_user_regs *regs; - struct vmcs_struct *vmcs; - - regs = &ctxt->user_regs; - - /* - * Create a new VMCS - */ - if (!(vmcs = alloc_vmcs())) { - printk("Failed to create a new VMCS\n"); - return -ENOMEM; - } - - memset(&v->arch.arch_vmx, 0, sizeof (struct arch_vmx_struct)); - - v->arch.arch_vmx.vmcs = vmcs; - error = construct_vmcs( - &v->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV); - if ( error < 0 ) - { - printk("Failed to construct a new VMCS\n"); - goto out; - } - +static void vmx_final_setup_guest(struct vcpu *v) +{ v->arch.schedule_tail = arch_vmx_do_launch; -#if defined (__i386__) - v->domain->arch.vmx_platform.real_mode_data = - (unsigned long *) regs->esi; -#endif - if (v == v->domain->vcpu[0]) { - /* + /* * Required to do this once per domain * XXX todo: add a seperate function to do these. */ - memset(&v->domain->shared_info->evtchn_mask[0], 0xff, + memset(&v->domain->shared_info->evtchn_mask[0], 0xff, sizeof(v->domain->shared_info->evtchn_mask)); /* Put the domain in shadow mode even though we're going to be using @@ -375,23 +344,6 @@ if (!vmx_switch_on) vmx_switch_on = 1; - - return 0; - -out: - free_vmcs(vmcs); - if(v->arch.arch_vmx.io_bitmap_a != 0) { - free_xenheap_pages( - v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000)); - v->arch.arch_vmx.io_bitmap_a = 0; - } - if(v->arch.arch_vmx.io_bitmap_b != 0) { - free_xenheap_pages( - v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000)); - v->arch.arch_vmx.io_bitmap_b = 0; - } - v->arch.arch_vmx.vmcs = 0; - return error; } #endif @@ -480,8 +432,7 @@ if ( !pagetable_get_paddr(d->arch.phys_table) ) d->arch.phys_table = v->arch.guest_table; - if ( (rc = vmx_final_setup_guest(v, c)) != 0 ) - return rc; + vmx_final_setup_guest(v); } update_pagetables(v); @@ -968,20 +919,7 @@ if ( !VMX_DOMAIN(v) ) return; - BUG_ON(v->arch.arch_vmx.vmcs == NULL); - free_vmcs(v->arch.arch_vmx.vmcs); - if(v->arch.arch_vmx.io_bitmap_a != 0) { - free_xenheap_pages( - v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000)); - v->arch.arch_vmx.io_bitmap_a = 0; - } - if(v->arch.arch_vmx.io_bitmap_b != 0) { - free_xenheap_pages( - v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000)); - v->arch.arch_vmx.io_bitmap_b = 0; - } - v->arch.arch_vmx.vmcs = 0; - + destroy_vmcs(&v->arch.arch_vmx); free_monitor_pagetable(v); rem_ac_timer(&v->domain->arch.vmx_platform.vmx_pit.pit_timer); } diff -r 37bea65ed6ca -r ddb803b6308b xen/arch/x86/vmx.c --- a/xen/arch/x86/vmx.c Fri Oct 7 09:57:13 2005 +++ b/xen/arch/x86/vmx.c Fri Oct 7 10:00:56 2005 @@ -47,8 +47,6 @@ int hvm_enabled; #ifdef CONFIG_VMX - -int vmcs_size; unsigned int opt_vmx_debug_level = 0; integer_param("vmx_debug", opt_vmx_debug_level); diff -r 37bea65ed6ca -r ddb803b6308b xen/arch/x86/vmx_vmcs.c --- a/xen/arch/x86/vmx_vmcs.c Fri Oct 7 09:57:13 2005 +++ b/xen/arch/x86/vmx_vmcs.c Fri Oct 7 10:00:56 2005 @@ -37,6 +37,8 @@ #endif #ifdef CONFIG_VMX +int vmcs_size; + struct vmcs_struct *alloc_vmcs(void) { struct vmcs_struct *vmcs; @@ -51,13 +53,35 @@ return vmcs; } -void free_vmcs(struct vmcs_struct *vmcs) +static void free_vmcs(struct vmcs_struct *vmcs) { int order; order = get_order_from_bytes(vmcs_size); free_xenheap_pages(vmcs, order); } + +static int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr) +{ + int error; + + if ((error = __vmptrld(phys_ptr))) { + clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); + return error; + } + set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); + return 0; +} + +#if 0 +static int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr) +{ + /* take the current VMCS */ + __vmptrst(phys_ptr); + clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); + return 0; +} +#endif static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx) { @@ -118,7 +142,7 @@ #endif }; -static void vmx_setup_platform(struct vcpu *v, struct cpu_user_regs *regs) +static void vmx_setup_platform(struct vcpu *v) { int i; unsigned char e820_map_nr; @@ -161,9 +185,6 @@ } unmap_domain_page(p); - if (v->vcpu_id) - return; - /* Initialise shared page */ mpfn = get_mfn_from_pfn(gpfn); if (mpfn == INVALID_MFN) { @@ -184,7 +205,7 @@ &v->domain->shared_info->evtchn_mask[0]); } -void vmx_set_host_env(struct vcpu *v) +static void vmx_set_host_env(struct vcpu *v) { unsigned int tr, cpu, error = 0; struct host_execution_env host_env; @@ -209,14 +230,13 @@ error |= __vmwrite(HOST_TR_BASE, host_env.tr_base); } -void vmx_do_launch(struct vcpu *v) +static void vmx_do_launch(struct vcpu *v) { /* Update CR3, GDT, LDT, TR */ unsigned int error = 0; unsigned long pfn = 0; unsigned long cr0, cr4; struct pfn_info *page; - struct cpu_user_regs *regs = guest_cpu_user_regs(); __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : ); @@ -246,7 +266,7 @@ page = (struct pfn_info *) alloc_domheap_page(NULL); pfn = (unsigned long) (page - frame_table); - vmx_setup_platform(v, regs); + vmx_setup_platform(v); vmx_set_host_env(v); @@ -267,8 +287,7 @@ /* * Initially set the same environement as host. */ -static inline int -construct_init_vmcs_guest(struct cpu_user_regs *regs) +static inline int construct_init_vmcs_guest(cpu_user_regs_t *regs) { int error = 0; union vmcs_arbytes arbytes; @@ -374,34 +393,33 @@ return error; } -static inline int construct_vmcs_host(struct host_execution_env *host_env) +static inline int construct_vmcs_host() { int error = 0; +#ifdef __x86_64__ + unsigned long fs_base; + unsigned long gs_base; +#endif unsigned long crn; /* Host Selectors */ - host_env->ds_selector = __HYPERVISOR_DS; - error |= __vmwrite(HOST_ES_SELECTOR, host_env->ds_selector); - error |= __vmwrite(HOST_SS_SELECTOR, host_env->ds_selector); - error |= __vmwrite(HOST_DS_SELECTOR, host_env->ds_selector); + error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS); + error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS); + error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS); #if defined (__i386__) - error |= __vmwrite(HOST_FS_SELECTOR, host_env->ds_selector); - error |= __vmwrite(HOST_GS_SELECTOR, host_env->ds_selector); - error |= __vmwrite(HOST_FS_BASE, host_env->ds_base); - error |= __vmwrite(HOST_GS_BASE, host_env->ds_base); + error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS); + error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS); + error |= __vmwrite(HOST_FS_BASE, 0); + error |= __vmwrite(HOST_GS_BASE, 0); #else - rdmsrl(MSR_FS_BASE, host_env->fs_base); - rdmsrl(MSR_GS_BASE, host_env->gs_base); - error |= __vmwrite(HOST_FS_BASE, host_env->fs_base); - error |= __vmwrite(HOST_GS_BASE, host_env->gs_base); - -#endif - host_env->cs_selector = __HYPERVISOR_CS; - error |= __vmwrite(HOST_CS_SELECTOR, host_env->cs_selector); - - host_env->ds_base = 0; - host_env->cs_base = 0; + rdmsrl(MSR_FS_BASE, fs_base); + rdmsrl(MSR_GS_BASE, gs_base); + error |= __vmwrite(HOST_FS_BASE, fs_base); + error |= __vmwrite(HOST_GS_BASE, gs_base); + +#endif + error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS); __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : ); error |= __vmwrite(HOST_CR0, crn); /* same CR0 */ @@ -423,55 +441,58 @@ /* * Need to extend to support full virtualization. - * The variable use_host_env indicates if the new VMCS needs to use - * the same setups as the host has (xenolinux). */ - -int construct_vmcs(struct arch_vmx_struct *arch_vmx, - struct cpu_user_regs *regs, - struct vcpu_guest_context *ctxt, - int use_host_env) +static int construct_vmcs(struct arch_vmx_struct *arch_vmx, + cpu_user_regs_t *regs) { int error; + long rc; u64 vmcs_phys_ptr; - struct host_execution_env host_env; - - if (use_host_env != VMCS_USE_HOST_ENV) - return -EINVAL; - - memset(&host_env, 0, sizeof(struct host_execution_env)); - + memset(arch_vmx, 0, sizeof(struct arch_vmx_struct)); + /* + * Create a new VMCS + */ + if (!(arch_vmx->vmcs = alloc_vmcs())) { + printk("Failed to create a new VMCS\n"); + rc = -ENOMEM; + goto err_out; + } vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs); - if ((error = __vmpclear (vmcs_phys_ptr))) { + if ((error = __vmpclear(vmcs_phys_ptr))) { printk("construct_vmcs: VMCLEAR failed\n"); - return -EINVAL; + rc = -EINVAL; + goto err_out; } if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) { printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n", (unsigned long) vmcs_phys_ptr); - return -EINVAL; + rc = -EINVAL; + goto err_out; } if ((error = construct_vmcs_controls(arch_vmx))) { printk("construct_vmcs: construct_vmcs_controls failed\n"); - return -EINVAL; + rc = -EINVAL; + goto err_out; } /* host selectors */ - if ((error = construct_vmcs_host(&host_env))) { + if ((error = construct_vmcs_host())) { printk("construct_vmcs: construct_vmcs_host failed\n"); - return -EINVAL; + rc = -EINVAL; + goto err_out; } /* guest selectors */ if ((error = construct_init_vmcs_guest(regs))) { printk("construct_vmcs: construct_vmcs_guest failed\n"); - return -EINVAL; - } - + rc = -EINVAL; + goto err_out; + } if ((error |= __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP))) { printk("construct_vmcs: setting Exception bitmap failed\n"); - return -EINVAL; + rc = -EINVAL; + goto err_out; } if (regs->eflags & EF_TF) @@ -480,6 +501,27 @@ __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); return 0; + +err_out: + destroy_vmcs(arch_vmx); + return rc; +} + +void destroy_vmcs(struct arch_vmx_struct *arch_vmx) +{ + if(arch_vmx->vmcs != NULL) + free_vmcs(arch_vmx->vmcs); + if(arch_vmx->io_bitmap_a != 0) { + free_xenheap_pages( + arch_vmx->io_bitmap_a, get_order_from_bytes(0x1000)); + arch_vmx->io_bitmap_a = 0; + } + if(arch_vmx->io_bitmap_b != 0) { + free_xenheap_pages( + arch_vmx->io_bitmap_b, get_order_from_bytes(0x1000)); + arch_vmx->io_bitmap_b = 0; + } + arch_vmx->vmcs = 0; } /* @@ -506,26 +548,6 @@ return 0; } -int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr) -{ - int error; - - if ((error = __vmptrld(phys_ptr))) { - clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); - return error; - } - set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); - return 0; -} - -int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr) -{ - /* take the current VMCS */ - __vmptrst(phys_ptr); - clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); - return 0; -} - void vm_launch_fail(unsigned long eflags) { unsigned long error; @@ -553,9 +575,19 @@ void arch_vmx_do_launch(struct vcpu *v) { - u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs); - - load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr); + int error; + cpu_user_regs_t *regs = ¤t->arch.guest_context.user_regs; + + error = construct_vmcs(&v->arch.arch_vmx, regs); + if ( error < 0 ) + { + if (v->vcpu_id == 0) { + printk("Failed to construct a new VMCS for BSP.\n"); + } else { + printk("Failed to construct a new VMCS for AP %d\n", v->vcpu_id); + } + domain_crash_synchronous(); + } vmx_do_launch(v); reset_stack_and_jump(vmx_asm_do_launch); } diff -r 37bea65ed6ca -r ddb803b6308b xen/include/asm-x86/vmx.h --- a/xen/include/asm-x86/vmx.h Fri Oct 7 09:57:13 2005 +++ b/xen/include/asm-x86/vmx.h Fri Oct 7 10:00:56 2005 @@ -39,7 +39,6 @@ extern void arch_vmx_do_resume(struct vcpu *); extern void arch_vmx_do_relaunch(struct vcpu *); -extern int vmcs_size; extern unsigned int cpu_rev; /* @@ -449,7 +448,7 @@ __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); } -/* Works only for ed == current */ +/* Works only for vcpu == current */ static inline int vmx_paging_enabled(struct vcpu *v) { unsigned long cr0; diff -r 37bea65ed6ca -r ddb803b6308b xen/include/asm-x86/vmx_vmcs.h --- a/xen/include/asm-x86/vmx_vmcs.h Fri Oct 7 09:57:13 2005 +++ b/xen/include/asm-x86/vmx_vmcs.h Fri Oct 7 10:00:56 2005 @@ -52,6 +52,8 @@ u32 vmcs_revision_id; unsigned char data [0]; /* vmcs size is read from MSR */ }; + +extern int vmcs_size; enum { VMX_INDEX_MSR_LSTAR = 0, @@ -94,18 +96,11 @@ #define ARCH_VMX_VMCS_RESUME 2 /* Needs VMCS resume */ #define ARCH_VMX_IO_WAIT 3 /* Waiting for I/O completion */ -void vmx_do_launch(struct vcpu *); -void vmx_do_resume(struct vcpu *); -void vmx_set_host_env(struct vcpu *); - +void vmx_do_resume(struct vcpu *); struct vmcs_struct *alloc_vmcs(void); -void free_vmcs(struct vmcs_struct *); -int load_vmcs(struct arch_vmx_struct *, u64); -int store_vmcs(struct arch_vmx_struct *, u64); -int construct_vmcs(struct arch_vmx_struct *, struct cpu_user_regs *, - struct vcpu_guest_context *, int); int modify_vmcs(struct arch_vmx_struct *arch_vmx, struct cpu_user_regs *regs); +void destroy_vmcs(struct arch_vmx_struct *arch_vmx); #define VMCS_USE_HOST_ENV 1 #define VMCS_USE_SEPARATE_ENV 0 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |