[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: fix EFER handling
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1179222198 -3600 # Node ID 9e9c09c75110a0a75dbf6ac8a23be4ddc87f54ed # Parent f4390e34ad120afd4f7d65789d2394b7c6dfd7a5 x86: fix EFER handling Introduce a per-CPU shadow of what is currently in EFER, as context switch code must re-write this MSR so that all guests run with appropriate EFER.SCE and EFER.NX settings. Remove EFER from the set of MSRs that VMX deals with in a generic fashion. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- xen/arch/x86/domain.c | 20 ++-- xen/arch/x86/hvm/svm/svm.c | 26 +++--- xen/arch/x86/hvm/vmx/vmcs.c | 5 - xen/arch/x86/hvm/vmx/vmx.c | 154 ++++++++++++++++++++++++++++--------- xen/arch/x86/mm/shadow/multi.c | 8 - xen/include/asm-x86/cpufeature.h | 2 xen/include/asm-x86/hvm/hvm.h | 17 +++- xen/include/asm-x86/hvm/svm/svm.h | 5 + xen/include/asm-x86/hvm/vmx/vmcs.h | 6 - xen/include/asm-x86/hvm/vmx/vmx.h | 6 - xen/include/asm-x86/msr.h | 22 +++++ 11 files changed, 192 insertions(+), 79 deletions(-) diff -r f4390e34ad12 -r 9e9c09c75110 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Tue May 15 10:28:28 2007 +0100 +++ b/xen/arch/x86/domain.c Tue May 15 10:43:18 2007 +0100 @@ -47,6 +47,7 @@ #endif DEFINE_PER_CPU(struct vcpu *, curr_vcpu); +DEFINE_PER_CPU(__u64, efer); static void paravirt_ctxt_switch_from(struct vcpu *v); static void paravirt_ctxt_switch_to(struct vcpu *v); @@ -1135,21 +1136,18 @@ void context_switch(struct vcpu *prev, s __context_switch(); #ifdef CONFIG_COMPAT - if ( is_idle_vcpu(prev) || - (is_pv_32on64_domain(prev->domain) != - is_pv_32on64_domain(next->domain)) ) - { - uint32_t efer_lo, efer_hi; + if ( !is_hvm_vcpu(next) && + (is_idle_vcpu(prev) || + is_hvm_vcpu(prev) || + is_pv_32on64_vcpu(prev) != is_pv_32on64_vcpu(next)) ) + { + uint64_t efer = read_efer(); local_flush_tlb_one(GDT_VIRT_START(next) + FIRST_RESERVED_GDT_BYTE); - rdmsr(MSR_EFER, efer_lo, efer_hi); - if ( !is_pv_32on64_domain(next->domain) == !(efer_lo & EFER_SCE) ) - { - efer_lo ^= EFER_SCE; - wrmsr(MSR_EFER, efer_lo, efer_hi); - } + if ( !is_pv_32on64_vcpu(next) == !(efer & EFER_SCE) ) + write_efer(efer ^ EFER_SCE); } #endif diff -r f4390e34ad12 -r 9e9c09c75110 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Tue May 15 10:28:28 2007 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Tue May 15 10:43:18 2007 +0100 @@ -93,11 +93,8 @@ static inline void svm_inject_exception( static void stop_svm(void) { - u32 eax, edx; /* We turn off the EFER_SVME bit. */ - rdmsr(MSR_EFER, eax, edx); - eax &= ~EFER_SVME; - wrmsr(MSR_EFER, eax, edx); + write_efer(read_efer() & ~EFER_SVME); } static void svm_store_cpu_guest_regs( @@ -138,7 +135,13 @@ static inline int long_mode_do_msr_write { case MSR_EFER: /* Offending reserved bit will cause #GP. */ - if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) +#ifdef __x86_64__ + if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) || +#else + if ( (msr_content & ~(EFER_NX | EFER_SCE)) || +#endif + (!cpu_has_nx && (msr_content & EFER_NX)) || + (!cpu_has_syscall && (msr_content & EFER_SCE)) ) { gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " "EFER: %"PRIx64"\n", msr_content); @@ -495,7 +498,7 @@ int svm_vmcb_restore(struct vcpu *v, str } -void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) +static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -511,7 +514,7 @@ void svm_save_cpu_state(struct vcpu *v, } -void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) +static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -530,13 +533,13 @@ void svm_load_cpu_state(struct vcpu *v, hvm_set_guest_time(v, data->tsc); } -void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) +static void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) { svm_save_cpu_state(v, ctxt); svm_vmcb_save(v, ctxt); } -int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) +static int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) { svm_load_cpu_state(v, ctxt); if (svm_vmcb_restore(v, ctxt)) { @@ -911,6 +914,7 @@ static struct hvm_function_table svm_fun .paging_enabled = svm_paging_enabled, .long_mode_enabled = svm_long_mode_enabled, .pae_enabled = svm_pae_enabled, + .nx_enabled = svm_nx_enabled, .interrupts_enabled = svm_interrupts_enabled, .guest_x86_mode = svm_guest_x86_mode, .get_guest_ctrl_reg = svm_get_ctrl_reg, @@ -967,9 +971,7 @@ int start_svm(void) ((root_vmcb[cpu] = alloc_vmcb()) == NULL) ) return 0; - rdmsr(MSR_EFER, eax, edx); - eax |= EFER_SVME; - wrmsr(MSR_EFER, eax, edx); + write_efer(read_efer() | EFER_SVME); svm_npt_detect(); diff -r f4390e34ad12 -r 9e9c09c75110 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Tue May 15 10:28:28 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue May 15 10:43:18 2007 +0100 @@ -285,11 +285,6 @@ static void construct_vmcs(struct vcpu * vmx_vmcs_enter(v); - v->arch.hvm_vmx.cpu_cr2 = 0; - v->arch.hvm_vmx.cpu_cr3 = 0; - memset(&v->arch.hvm_vmx.msr_state, 0, sizeof(v->arch.hvm_vmx.msr_state)); - v->arch.hvm_vmx.vmxassist_enabled = 0; - /* VMCS controls. */ __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control); __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control); diff -r f4390e34ad12 -r 9e9c09c75110 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue May 15 10:28:28 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue May 15 10:43:18 2007 +0100 @@ -89,7 +89,7 @@ static u32 msr_index[VMX_MSR_COUNT] = static u32 msr_index[VMX_MSR_COUNT] = { MSR_LSTAR, MSR_STAR, MSR_CSTAR, - MSR_SYSCALL_MASK, MSR_EFER, + MSR_SYSCALL_MASK }; static void vmx_save_host_msrs(void) @@ -116,8 +116,7 @@ static inline int long_mode_do_msr_read( switch ( (u32)regs->ecx ) { case MSR_EFER: - HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content); - msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_EFER]; + msr_content = v->arch.hvm_vmx.efer; break; case MSR_FS_BASE: @@ -129,7 +128,7 @@ static inline int long_mode_do_msr_read( goto check_long_mode; case MSR_SHADOW_GS_BASE: - msr_content = guest_msr_state->shadow_gs; + msr_content = v->arch.hvm_vmx.shadow_gs; check_long_mode: if ( !(vmx_long_mode_enabled(v)) ) { @@ -181,7 +180,9 @@ static inline int long_mode_do_msr_write { case MSR_EFER: /* offending reserved bit will cause #GP */ - if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) + if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) || + (!cpu_has_nx && (msr_content & EFER_NX)) || + (!cpu_has_syscall && (msr_content & EFER_SCE)) ) { gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " "EFER: %"PRIx64"\n", msr_content); @@ -189,7 +190,7 @@ static inline int long_mode_do_msr_write } if ( (msr_content & EFER_LME) - && !(guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) ) + && !(v->arch.hvm_vmx.efer & EFER_LME) ) { if ( unlikely(vmx_paging_enabled(v)) ) { @@ -199,7 +200,7 @@ static inline int long_mode_do_msr_write } } else if ( !(msr_content & EFER_LME) - && (guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) ) + && (v->arch.hvm_vmx.efer & EFER_LME) ) { if ( unlikely(vmx_paging_enabled(v)) ) { @@ -209,7 +210,11 @@ static inline int long_mode_do_msr_write } } - guest_msr_state->msrs[VMX_INDEX_MSR_EFER] = msr_content; + if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) ) + write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) | + (msr_content & (EFER_NX|EFER_SCE))); + + v->arch.hvm_vmx.efer = msr_content; break; case MSR_FS_BASE: @@ -227,7 +232,7 @@ static inline int long_mode_do_msr_write __vmwrite(GUEST_GS_BASE, msr_content); else { - v->arch.hvm_vmx.msr_state.shadow_gs = msr_content; + v->arch.hvm_vmx.shadow_gs = msr_content; wrmsrl(MSR_SHADOW_GS_BASE, msr_content); } @@ -279,12 +284,14 @@ static void vmx_restore_host_msrs(void) wrmsrl(msr_index[i], host_msr_state->msrs[i]); clear_bit(i, &host_msr_state->flags); } + if ( cpu_has_nx && !(read_efer() & EFER_NX) ) + write_efer(read_efer() | EFER_NX); } static void vmx_save_guest_msrs(struct vcpu *v) { /* MSR_SHADOW_GS_BASE may have been changed by swapgs instruction. */ - rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_state.shadow_gs); + rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); } static void vmx_restore_guest_msrs(struct vcpu *v) @@ -296,11 +303,9 @@ static void vmx_restore_guest_msrs(struc guest_msr_state = &v->arch.hvm_vmx.msr_state; host_msr_state = &this_cpu(host_msr_state); - wrmsrl(MSR_SHADOW_GS_BASE, guest_msr_state->shadow_gs); + wrmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); guest_flags = guest_msr_state->flags; - if ( !guest_flags ) - return; while ( guest_flags ) { i = find_first_set_bit(guest_flags); @@ -312,23 +317,90 @@ static void vmx_restore_guest_msrs(struc wrmsrl(msr_index[i], guest_msr_state->msrs[i]); clear_bit(i, &guest_flags); } + + if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX|EFER_SCE) ) + { + HVM_DBG_LOG(DBG_LEVEL_2, + "restore guest's EFER with value %lx", + v->arch.hvm_vmx.efer); + write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) | + (v->arch.hvm_vmx.efer & (EFER_NX|EFER_SCE))); + } } #else /* __i386__ */ #define vmx_save_host_msrs() ((void)0) -#define vmx_restore_host_msrs() ((void)0) + +static void vmx_restore_host_msrs(void) +{ + if ( cpu_has_nx && !(read_efer() & EFER_NX) ) + write_efer(read_efer() | EFER_NX); +} + #define vmx_save_guest_msrs(v) ((void)0) -#define vmx_restore_guest_msrs(v) ((void)0) + +static void vmx_restore_guest_msrs(struct vcpu *v) +{ + if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX ) + { + HVM_DBG_LOG(DBG_LEVEL_2, + "restore guest's EFER with value %lx", + v->arch.hvm_vmx.efer); + write_efer((read_efer() & ~EFER_NX) | + (v->arch.hvm_vmx.efer & EFER_NX)); + } +} static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) { - return 0; + u64 msr_content = 0; + struct vcpu *v = current; + + switch ( regs->ecx ) { + case MSR_EFER: + msr_content = v->arch.hvm_vmx.efer; + break; + + default: + return 0; + } + + regs->eax = msr_content >> 0; + regs->edx = msr_content >> 32; + + return 1; } static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) { - return 0; + u64 msr_content = regs->eax | ((u64)regs->edx << 32); + struct vcpu *v = current; + + switch ( regs->ecx ) + { + case MSR_EFER: + /* offending reserved bit will cause #GP */ + if ( (msr_content & ~EFER_NX) || + (!cpu_has_nx && (msr_content & EFER_NX)) ) + { + gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " + "EFER: %"PRIx64"\n", msr_content); + vmx_inject_hw_exception(v, TRAP_gp_fault, 0); + return 0; + } + + if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX ) + write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX)); + + v->arch.hvm_vmx.efer = msr_content; + break; + + default: + return 0; + } + + return 1; } #endif /* __i386__ */ @@ -636,7 +708,7 @@ int vmx_vmcs_restore(struct vcpu *v, str return -EINVAL; } -#ifdef HVM_DEBUG_SUSPEND +#if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND) static void dump_msr_state(struct vmx_msr_state *m) { int i = 0; @@ -647,17 +719,16 @@ static void dump_msr_state(struct vmx_ms printk("\n"); } #else -static void dump_msr_state(struct vmx_msr_state *m) -{ -} +#define dump_msr_state(m) ((void)0) #endif -void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) -{ +static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) +{ +#ifdef __x86_64__ struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state; unsigned long guest_flags = guest_state->flags; - data->shadow_gs = guest_state->shadow_gs; + data->shadow_gs = v->arch.hvm_vmx.shadow_gs; /* save msrs */ data->msr_flags = guest_flags; @@ -665,15 +736,18 @@ void vmx_save_cpu_state(struct vcpu *v, data->msr_star = guest_state->msrs[VMX_INDEX_MSR_STAR]; data->msr_cstar = guest_state->msrs[VMX_INDEX_MSR_CSTAR]; data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK]; - data->msr_efer = guest_state->msrs[VMX_INDEX_MSR_EFER]; +#endif + + data->msr_efer = v->arch.hvm_vmx.efer; data->tsc = hvm_get_guest_time(v); dump_msr_state(guest_state); } -void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) -{ +static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) +{ +#ifdef __x86_64__ struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state; /* restore msrs */ @@ -682,9 +756,11 @@ void vmx_load_cpu_state(struct vcpu *v, guest_state->msrs[VMX_INDEX_MSR_STAR] = data->msr_star; guest_state->msrs[VMX_INDEX_MSR_CSTAR] = data->msr_cstar; guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK] = data->msr_syscall_mask; - guest_state->msrs[VMX_INDEX_MSR_EFER] = data->msr_efer; - - guest_state->shadow_gs = data->shadow_gs; + + v->arch.hvm_vmx.shadow_gs = data->shadow_gs; +#endif + + v->arch.hvm_vmx.efer = data->msr_efer; v->arch.hvm_vmx.vmxassist_enabled = !(data->cr0 & X86_CR0_PE); @@ -694,7 +770,7 @@ void vmx_load_cpu_state(struct vcpu *v, } -void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) +static void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) { vmx_save_cpu_state(v, ctxt); vmx_vmcs_enter(v); @@ -702,7 +778,7 @@ void vmx_save_vmcs_ctxt(struct vcpu *v, vmx_vmcs_exit(v); } -int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) +static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) { vmx_load_cpu_state(v, ctxt); if (vmx_vmcs_restore(v, ctxt)) { @@ -1016,6 +1092,11 @@ static int vmx_pae_enabled(struct vcpu * return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE)); } +static int vmx_nx_enabled(struct vcpu *v) +{ + return v->arch.hvm_vmx.efer & EFER_NX; +} + static int vmx_interrupts_enabled(struct vcpu *v) { unsigned long eflags = __vmread(GUEST_RFLAGS); @@ -1096,6 +1177,7 @@ static struct hvm_function_table vmx_fun .paging_enabled = vmx_paging_enabled, .long_mode_enabled = vmx_long_mode_enabled, .pae_enabled = vmx_pae_enabled, + .nx_enabled = vmx_nx_enabled, .interrupts_enabled = vmx_interrupts_enabled, .guest_x86_mode = vmx_guest_x86_mode, .get_guest_ctrl_reg = vmx_get_ctrl_reg, @@ -1996,8 +2078,7 @@ static int vmx_set_cr0(unsigned long val else { HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n"); - v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER] - |= EFER_LMA; + v->arch.hvm_vmx.efer |= EFER_LMA; vm_entry_value = __vmread(VM_ENTRY_CONTROLS); vm_entry_value |= VM_ENTRY_IA32E_MODE; __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); @@ -2046,8 +2127,7 @@ static int vmx_set_cr0(unsigned long val */ if ( vmx_long_mode_enabled(v) ) { - v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER] - &= ~EFER_LMA; + v->arch.hvm_vmx.efer &= ~EFER_LMA; vm_entry_value = __vmread(VM_ENTRY_CONTROLS); vm_entry_value &= ~VM_ENTRY_IA32E_MODE; __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); @@ -2079,7 +2159,7 @@ static int vmx_set_cr0(unsigned long val { if ( vmx_long_mode_enabled(v) ) { - v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER] &= ~EFER_LMA; + v->arch.hvm_vmx.efer &= ~EFER_LMA; vm_entry_value = __vmread(VM_ENTRY_CONTROLS); vm_entry_value &= ~VM_ENTRY_IA32E_MODE; __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); diff -r f4390e34ad12 -r 9e9c09c75110 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Tue May 15 10:28:28 2007 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Tue May 15 10:43:18 2007 +0100 @@ -181,11 +181,11 @@ static inline int static inline int guest_supports_nx(struct vcpu *v) { + if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx ) + return 0; if ( !is_hvm_vcpu(v) ) - return cpu_has_nx; - - // XXX - fix this! - return 1; + return 1; + return hvm_nx_enabled(v); } diff -r f4390e34ad12 -r 9e9c09c75110 xen/include/asm-x86/cpufeature.h --- a/xen/include/asm-x86/cpufeature.h Tue May 15 10:28:28 2007 +0100 +++ b/xen/include/asm-x86/cpufeature.h Tue May 15 10:43:18 2007 +0100 @@ -114,6 +114,7 @@ #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) +#define cpu_has_syscall boot_cpu_has(X86_FEATURE_SYSCALL) #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) @@ -136,6 +137,7 @@ #define cpu_has_xmm2 1 #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) +#define cpu_has_syscall 1 #define cpu_has_mp 1 #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) #define cpu_has_k6_mtrr 0 diff -r f4390e34ad12 -r 9e9c09c75110 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Tue May 15 10:28:28 2007 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Tue May 15 10:43:18 2007 +0100 @@ -93,14 +93,17 @@ struct hvm_function_table { * 1) determine whether paging is enabled, * 2) determine whether long mode is enabled, * 3) determine whether PAE paging is enabled, - * 4) determine whether interrupts are enabled or not, - * 5) determine the mode the guest is running in, - * 6) return the current guest control-register value - * 7) return the current guest segment descriptor base + * 4) determine whether NX is enabled, + * 5) determine whether interrupts are enabled or not, + * 6) determine the mode the guest is running in, + * 7) return the current guest control-register value + * 8) return the current guest segment descriptor base + * 9) return the current guest segment descriptor */ int (*paging_enabled)(struct vcpu *v); int (*long_mode_enabled)(struct vcpu *v); int (*pae_enabled)(struct vcpu *v); + int (*nx_enabled)(struct vcpu *v); int (*interrupts_enabled)(struct vcpu *v); int (*guest_x86_mode)(struct vcpu *v); unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num); @@ -199,6 +202,12 @@ hvm_interrupts_enabled(struct vcpu *v) } static inline int +hvm_nx_enabled(struct vcpu *v) +{ + return hvm_funcs.nx_enabled(v); +} + +static inline int hvm_guest_x86_mode(struct vcpu *v) { return hvm_funcs.guest_x86_mode(v); diff -r f4390e34ad12 -r 9e9c09c75110 xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h Tue May 15 10:28:28 2007 +0100 +++ b/xen/include/asm-x86/hvm/svm/svm.h Tue May 15 10:43:18 2007 +0100 @@ -60,6 +60,11 @@ static inline int svm_pae_enabled(struct return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE); } +static inline int svm_nx_enabled(struct vcpu *v) +{ + return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX; +} + static inline int svm_pgbit_test(struct vcpu *v) { return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG; diff -r f4390e34ad12 -r 9e9c09c75110 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Tue May 15 10:28:28 2007 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Tue May 15 10:43:18 2007 +0100 @@ -39,7 +39,6 @@ enum { VMX_INDEX_MSR_STAR, VMX_INDEX_MSR_CSTAR, VMX_INDEX_MSR_SYSCALL_MASK, - VMX_INDEX_MSR_EFER, VMX_MSR_COUNT }; @@ -47,7 +46,6 @@ struct vmx_msr_state { struct vmx_msr_state { unsigned long flags; unsigned long msrs[VMX_MSR_COUNT]; - unsigned long shadow_gs; }; struct arch_vmx_struct { @@ -76,7 +74,11 @@ struct arch_vmx_struct { unsigned long cpu_shadow_cr4; /* copy of guest read shadow CR4 */ unsigned long cpu_cr2; /* save CR2 */ unsigned long cpu_cr3; +#ifdef __x86_64__ struct vmx_msr_state msr_state; + unsigned long shadow_gs; +#endif + unsigned long efer; unsigned long vmxassist_enabled:1; }; diff -r f4390e34ad12 -r 9e9c09c75110 xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue May 15 10:28:28 2007 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue May 15 10:43:18 2007 +0100 @@ -261,14 +261,12 @@ static inline int vmx_paging_enabled(str static inline int vmx_long_mode_enabled(struct vcpu *v) { - u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]; - return efer & EFER_LMA; + return v->arch.hvm_vmx.efer & EFER_LMA; } static inline int vmx_lme_is_set(struct vcpu *v) { - u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]; - return efer & EFER_LME; + return v->arch.hvm_vmx.efer & EFER_LME; } static inline int vmx_pgbit_test(struct vcpu *v) diff -r f4390e34ad12 -r 9e9c09c75110 xen/include/asm-x86/msr.h --- a/xen/include/asm-x86/msr.h Tue May 15 10:28:28 2007 +0100 +++ b/xen/include/asm-x86/msr.h Tue May 15 10:43:18 2007 +0100 @@ -2,6 +2,9 @@ #define __ASM_MSR_H #ifndef __ASSEMBLY__ + +#include <xen/smp.h> +#include <xen/percpu.h> #define rdmsr(msr,val1,val2) \ __asm__ __volatile__("rdmsr" \ @@ -141,6 +144,25 @@ static inline void wrmsrl(unsigned int m #define EFER_LMA (1<<_EFER_LMA) #define EFER_NX (1<<_EFER_NX) #define EFER_SVME (1<<_EFER_SVME) + +#ifndef __ASSEMBLY__ + +DECLARE_PER_CPU(__u64, efer); + +static inline __u64 read_efer(void) +{ + if (!this_cpu(efer)) + rdmsrl(MSR_EFER, this_cpu(efer)); + return this_cpu(efer); +} + +static inline void write_efer(__u64 val) +{ + this_cpu(efer) = val; + wrmsrl(MSR_EFER, val); +} + +#endif /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_PLATFORM_ID 0x17 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |