[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 1/4] x86/pvh: Set 32b PVH guest mode in XEN_DOMCTL_set_address_size
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- Changes in v3: * Updated switch_native() not to call release_compat_l4() on PVH guests * hvm_set_mode() handles both 8 and 4 modes and returns -EOPNOTSUPP otherwise. Similar chages to vmx_set_mode() xen/arch/x86/domain.c | 27 ++++++++++++++++----------- xen/arch/x86/hvm/hvm.c | 24 +++++++++++++++++++++++- xen/arch/x86/hvm/vmx/vmcs.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 19 +++++++++++++++++++ xen/include/asm-x86/hvm/hvm.h | 2 ++ 5 files changed, 61 insertions(+), 13 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 956ac70..9c29ef2 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -366,7 +366,11 @@ int switch_native(struct domain *d) for_each_vcpu( d, v ) { free_compat_arg_xlat(v); - release_compat_l4(v); + + if ( !is_pvh_domain(d) ) + release_compat_l4(v); + else + hvm_set_mode(v, 8); } return 0; @@ -377,25 +381,26 @@ int switch_compat(struct domain *d) struct vcpu *v; int rc; - if ( is_pvh_domain(d) ) - { - printk(XENLOG_G_INFO - "Xen currently does not support 32bit PVH guests\n"); - return -EINVAL; - } - if ( !may_switch_mode(d) ) return -EACCES; if ( is_pv_32bit_domain(d) ) return 0; - d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1; + d->arch.has_32bit_shinfo = 1; + if ( is_pv_domain(d) ) + d->arch.is_32bit_pv = 1; for_each_vcpu( d, v ) { rc = setup_compat_arg_xlat(v); if ( !rc ) - rc = setup_compat_l4(v); + { + if ( !is_pvh_domain(d) ) + rc = setup_compat_l4(v); + else + rc = hvm_set_mode(v, 4); + } + if ( rc ) goto undo_and_fail; } @@ -410,7 +415,7 @@ int switch_compat(struct domain *d) { free_compat_arg_xlat(v); - if ( !pagetable_is_null(v->arch.guest_table) ) + if ( !is_pvh_domain(d) && !pagetable_is_null(v->arch.guest_table) ) release_compat_l4(v); } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index ebcf7a9..6f247a0 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2373,7 +2373,6 @@ int hvm_vcpu_initialise(struct vcpu *v) if ( is_pvh_domain(d) ) { - v->arch.hvm_vcpu.hcall_64bit = 1; /* PVH 32bitfixme. */ /* This is for hvm_long_mode_enabled(v). */ v->arch.hvm_vcpu.guest_efer = EFER_LMA | EFER_LME; return 0; @@ -6463,6 +6462,29 @@ void hvm_toggle_singlestep(struct vcpu *v) v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step; } +int hvm_set_mode(struct vcpu *v, int mode) +{ + + switch ( mode ) + { + case 4: + v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME); + break; + case 8: + v->arch.hvm_vcpu.guest_efer |= (EFER_LMA | EFER_LME); + break; + default: + return -EOPNOTSUPP; + } + + hvm_update_guest_efer(v); + + if ( hvm_funcs.set_mode ) + return hvm_funcs.set_mode(v, mode); + + return 0; +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 4c5ceb5..55ab7ad 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1125,7 +1125,7 @@ static int construct_vmcs(struct vcpu *v) __vmwrite(GUEST_FS_AR_BYTES, 0xc093); __vmwrite(GUEST_GS_AR_BYTES, 0xc093); if ( is_pvh_domain(d) ) - /* CS.L == 1, exec, read/write, accessed. PVH 32bitfixme. */ + /* CS.L == 1, exec, read/write, accessed. */ __vmwrite(GUEST_CS_AR_BYTES, 0xa09b); else __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */ diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index bc3212f..888dea2 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1770,6 +1770,24 @@ static bool_t vmx_is_singlestep_supported(void) return cpu_has_monitor_trap_flag; } +static int vmx_set_mode(struct vcpu *v, int mode) +{ + unsigned long attr; + + if ( !is_pvh_vcpu(v) ) + return 0; + + ASSERT((mode == 4) || (mode == 8)); + + attr = (mode == 4) ? 0xc09b : 0xa09b; + + vmx_vmcs_enter(v); + __vmwrite(GUEST_CS_AR_BYTES, attr); + vmx_vmcs_exit(v); + + return 0; +} + static struct hvm_function_table __initdata vmx_function_table = { .name = "VMX", .cpu_up_prepare = vmx_cpu_up_prepare, @@ -1828,6 +1846,7 @@ static struct hvm_function_table __initdata vmx_function_table = { .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf, .enable_msr_exit_interception = vmx_enable_msr_exit_interception, .is_singlestep_supported = vmx_is_singlestep_supported, + .set_mode = vmx_set_mode, }; const struct hvm_function_table * __init start_vmx(void) diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 35f1300..c35dd19 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -203,6 +203,7 @@ struct hvm_function_table { void (*enable_msr_exit_interception)(struct domain *d); bool_t (*is_singlestep_supported)(void); + int (*set_mode)(struct vcpu *v, int mode); }; extern struct hvm_function_table hvm_funcs; @@ -237,6 +238,7 @@ void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc); u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc); #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0) +int hvm_set_mode(struct vcpu *v, int mode); void hvm_init_guest_time(struct domain *d); void hvm_set_guest_time(struct vcpu *v, u64 guest_time); u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc); -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |