[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH for 4.9 2/6] x86/hvm: Correct long mode predicate
> -----Original Message----- > From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx] > Sent: 31 March 2017 20:51 > To: Xen-devel <xen-devel@xxxxxxxxxxxxx> > Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich > <JBeulich@xxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Tim > (Xen.org) <tim@xxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx>; Jun > Nakajima <jun.nakajima@xxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>; > Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>; Suravee Suthikulpanit > <suravee.suthikulpanit@xxxxxxx>; Julien Grall <julien.grall@xxxxxxx> > Subject: [PATCH for 4.9 2/6] x86/hvm: Correct long mode predicate > > hvm_long_mode_enabled() tests for EFER.LMA, which is specifically > different to > EFER.LME. > > Rename it to match its behaviour, and have it strictly return a boolean value > (although all its callers already use it in implicitly-boolean contexts, so no > functional change). > > Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > --- > CC: Jan Beulich <JBeulich@xxxxxxxx> > CC: Paul Durrant <paul.durrant@xxxxxxxxxx> > CC: Tim Deegan <tim@xxxxxxx> > CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx> > CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> > CC: Kevin Tian <kevin.tian@xxxxxxxxx> > CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> > CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> > CC: Julien Grall <julien.grall@xxxxxxx> > --- > xen/arch/x86/cpuid.c | 2 +- > xen/arch/x86/hvm/emulate.c | 2 +- > xen/arch/x86/hvm/hvm.c | 10 +++++----- > xen/arch/x86/hvm/svm/svm.c | 6 +++--- > xen/arch/x86/hvm/vmx/vmx.c | 6 +++--- > xen/arch/x86/hvm/vmx/vvmx.c | 8 ++++---- > xen/arch/x86/mm/hap/hap.c | 8 ++++---- > xen/arch/x86/mm/shadow/common.c | 4 ++-- > xen/arch/x86/oprofile/backtrace.c | 2 +- > xen/include/asm-x86/hvm/hvm.h | 3 +-- > 10 files changed, 25 insertions(+), 26 deletions(-) > > diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c > index 1c6a6c6..d359e09 100644 > --- a/xen/arch/x86/cpuid.c > +++ b/xen/arch/x86/cpuid.c > @@ -911,7 +911,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf, > case 0x80000001: > /* SYSCALL is hidden outside of long mode on Intel. */ > if ( p->x86_vendor == X86_VENDOR_INTEL && > - is_hvm_domain(d) && !hvm_long_mode_enabled(v) ) > + is_hvm_domain(d) && !hvm_long_mode_active(v) ) > res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL); > > common_leaf1_adjustments: > diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c > index 4073715..3d084ca 100644 > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -2051,7 +2051,7 @@ void hvm_emulate_init_per_insn( > unsigned int pfec = PFEC_page_present; > unsigned long addr; > > - if ( hvm_long_mode_enabled(curr) && > + if ( hvm_long_mode_active(curr) && > hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l ) > hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64; > else > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 0282986..9f83cd8 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -2227,7 +2227,7 @@ int hvm_set_cr0(unsigned long value, bool_t > may_defer) > } > > /* When CR0.PG is cleared, LMA is cleared immediately. */ > - if ( hvm_long_mode_enabled(v) ) > + if ( hvm_long_mode_active(v) ) > { > v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA; > hvm_update_guest_efer(v); > @@ -2321,7 +2321,7 @@ int hvm_set_cr4(unsigned long value, bool_t > may_defer) > > if ( !(value & X86_CR4_PAE) ) > { > - if ( hvm_long_mode_enabled(v) ) > + if ( hvm_long_mode_active(v) ) > { > HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while " > "EFER.LMA is set"); > @@ -2332,7 +2332,7 @@ int hvm_set_cr4(unsigned long value, bool_t > may_defer) > old_cr = v->arch.hvm_vcpu.guest_cr[4]; > > if ( (value & X86_CR4_PCIDE) && !(old_cr & X86_CR4_PCIDE) && > - (!hvm_long_mode_enabled(v) || > + (!hvm_long_mode_active(v) || > (v->arch.hvm_vcpu.guest_cr[3] & 0xfff)) ) > { > HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to change CR4.PCIDE > from " > @@ -3605,7 +3605,7 @@ void hvm_ud_intercept(struct cpu_user_regs > *regs) > > if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip, > sizeof(sig), hvm_access_insn_fetch, > - (hvm_long_mode_enabled(cur) && > + (hvm_long_mode_active(cur) && > cs->attr.fields.l) ? 64 : > cs->attr.fields.db ? 32 : 16, &addr) > && > (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig), > @@ -3616,7 +3616,7 @@ void hvm_ud_intercept(struct cpu_user_regs > *regs) > regs->eflags &= ~X86_EFLAGS_RF; > > /* Zero the upper 32 bits of %rip if not in 64bit mode. */ > - if ( !(hvm_long_mode_enabled(cur) && cs->attr.fields.l) ) > + if ( !(hvm_long_mode_active(cur) && cs->attr.fields.l) ) > regs->rip = regs->eip; > > add_taint(TAINT_HVM_FEP); > diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c > index b69789b..4d7e49f 100644 > --- a/xen/arch/x86/hvm/svm/svm.c > +++ b/xen/arch/x86/hvm/svm/svm.c > @@ -516,7 +516,7 @@ static int svm_guest_x86_mode(struct vcpu *v) > return 0; > if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) ) > return 1; > - if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) ) > + if ( hvm_long_mode_active(v) && likely(vmcb->cs.attr.fields.l) ) > return 8; > return (likely(vmcb->cs.attr.fields.db) ? 4 : 2); > } > @@ -2279,7 +2279,7 @@ void svm_vmexit_handler(struct cpu_user_regs > *regs) > > exit_reason = vmcb->exitcode; > > - if ( hvm_long_mode_enabled(v) ) > + if ( hvm_long_mode_active(v) ) > HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG > : 0, > 1/*cycles*/, 3, exit_reason, > regs->eip, regs->rip >> 32, 0, 0, 0); > @@ -2429,7 +2429,7 @@ void svm_vmexit_handler(struct cpu_user_regs > *regs) > { > if ( trace_will_trace_event(TRC_SHADOW) ) > break; > - if ( hvm_long_mode_enabled(v) ) > + if ( hvm_long_mode_active(v) ) > HVMTRACE_LONG_2D(PF_XEN, regs->error_code, > TRC_PAR_LONG(va)); > else > HVMTRACE_2D(PF_XEN, regs->error_code, va); > diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c > index d201956..b6526c9 100644 > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -611,7 +611,7 @@ int vmx_guest_x86_mode(struct vcpu *v) > if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) ) > return 1; > __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes); > - if ( hvm_long_mode_enabled(v) && > + if ( hvm_long_mode_active(v) && > likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) ) > return 8; > return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2); > @@ -3392,7 +3392,7 @@ void vmx_vmexit_handler(struct cpu_user_regs > *regs) > > __vmread(VM_EXIT_REASON, &exit_reason); > > - if ( hvm_long_mode_enabled(v) ) > + if ( hvm_long_mode_active(v) ) > HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason, > regs->eip, regs->rip >> 32, 0, 0, 0); > else > @@ -3632,7 +3632,7 @@ void vmx_vmexit_handler(struct cpu_user_regs > *regs) > { > if ( trace_will_trace_event(TRC_SHADOW) ) > break; > - if ( hvm_long_mode_enabled(v) ) > + if ( hvm_long_mode_active(v) ) > HVMTRACE_LONG_2D(PF_XEN, regs->error_code, > TRC_PAR_LONG(exit_qualification) ); > else > diff --git a/xen/arch/x86/hvm/vmx/vvmx.c > b/xen/arch/x86/hvm/vmx/vvmx.c > index 09e4250..e9860f7 100644 > --- a/xen/arch/x86/hvm/vmx/vvmx.c > +++ b/xen/arch/x86/hvm/vmx/vvmx.c > @@ -392,7 +392,7 @@ static int vmx_inst_check_privilege(struct > cpu_user_regs *regs, int vmxop_check) > else if ( !nvmx_vcpu_in_vmx(v) ) > goto invalid_op; > > - if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) ) > + if ( vmx_guest_x86_mode(v) < (hvm_long_mode_active(v) ? 8 : 2) ) > goto invalid_op; > else if ( nestedhvm_vcpu_in_guestmode(v) ) > goto vmexit; > @@ -1154,13 +1154,13 @@ static void virtual_vmentry(struct cpu_user_regs > *regs) > /* > * EFER handling: > * hvm_set_efer won't work if CR0.PG = 1, so we change the value > - * directly to make hvm_long_mode_enabled(v) work in L2. > + * directly to make hvm_long_mode_active(v) work in L2. > * An additional update_paging_modes is also needed if > * there is 32/64 switch. v->arch.hvm_vcpu.guest_efer doesn't > * need to be saved, since its value on vmexit is determined by > * L1 exit_controls > */ > - lm_l1 = !!hvm_long_mode_enabled(v); > + lm_l1 = !!hvm_long_mode_active(v); > lm_l2 = !!(get_vvmcs(v, VM_ENTRY_CONTROLS) & > VM_ENTRY_IA32E_MODE); > > if ( lm_l2 ) > @@ -1359,7 +1359,7 @@ static void virtual_vmexit(struct cpu_user_regs > *regs) > nvcpu->nv_vmexit_pending = 0; > nvcpu->nv_vmswitch_in_progress = 1; > > - lm_l2 = !!hvm_long_mode_enabled(v); > + lm_l2 = !!hvm_long_mode_active(v); > lm_l1 = !!(get_vvmcs(v, VM_EXIT_CONTROLS) & VM_EXIT_IA32E_MODE); > > if ( lm_l1 ) > diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c > index a57b385..283d4b7 100644 > --- a/xen/arch/x86/mm/hap/hap.c > +++ b/xen/arch/x86/mm/hap/hap.c > @@ -690,10 +690,10 @@ static void hap_update_cr3(struct vcpu *v, int > do_locking) > const struct paging_mode * > hap_paging_get_mode(struct vcpu *v) > { > - return !hvm_paging_enabled(v) ? &hap_paging_real_mode : > - hvm_long_mode_enabled(v) ? &hap_paging_long_mode : > - hvm_pae_enabled(v) ? &hap_paging_pae_mode : > - &hap_paging_protected_mode; > + return !hvm_paging_enabled(v) ? &hap_paging_real_mode : > + hvm_long_mode_active(v) ? &hap_paging_long_mode : > + hvm_pae_enabled(v) ? &hap_paging_pae_mode : > + &hap_paging_protected_mode; > } > > static void hap_update_paging_modes(struct vcpu *v) > diff --git a/xen/arch/x86/mm/shadow/common.c > b/xen/arch/x86/mm/shadow/common.c > index 03cb24d..14a07dd 100644 > --- a/xen/arch/x86/mm/shadow/common.c > +++ b/xen/arch/x86/mm/shadow/common.c > @@ -331,7 +331,7 @@ const struct x86_emulate_ops > *shadow_init_emulation( > creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt); > > /* Work out the emulation mode. */ > - if ( hvm_long_mode_enabled(v) && creg->attr.fields.l ) > + if ( hvm_long_mode_active(v) && creg->attr.fields.l ) > { > sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64; > } > @@ -2921,7 +2921,7 @@ static void sh_update_paging_modes(struct vcpu > *v) > v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable; > v->arch.paging.mode = > &SHADOW_INTERNAL_NAME(sh_paging_mode, 2); > } > - else if ( hvm_long_mode_enabled(v) ) > + else if ( hvm_long_mode_active(v) ) > { > // long mode guest... > v->arch.paging.mode = > diff --git a/xen/arch/x86/oprofile/backtrace.c > b/xen/arch/x86/oprofile/backtrace.c > index f0fbb42..316821f 100644 > --- a/xen/arch/x86/oprofile/backtrace.c > +++ b/xen/arch/x86/oprofile/backtrace.c > @@ -47,7 +47,7 @@ dump_hypervisor_backtrace(struct vcpu *vcpu, const > struct frame_head *head, > static inline int is_32bit_vcpu(struct vcpu *vcpu) > { > if (is_hvm_vcpu(vcpu)) > - return !hvm_long_mode_enabled(vcpu); > + return !hvm_long_mode_active(vcpu); > else > return is_pv_32bit_vcpu(vcpu); > } > diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm- > x86/hvm/hvm.h > index c854183..49c8001 100644 > --- a/xen/include/asm-x86/hvm/hvm.h > +++ b/xen/include/asm-x86/hvm/hvm.h > @@ -302,8 +302,7 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, > uint8_t dest, uint8_t dest_mode); > #define hap_has_1gb (!!(hvm_funcs.hap_capabilities & > HVM_HAP_SUPERPAGE_1GB)) > #define hap_has_2mb (!!(hvm_funcs.hap_capabilities & > HVM_HAP_SUPERPAGE_2MB)) > > -#define hvm_long_mode_enabled(v) \ > - ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA) > +#define hvm_long_mode_active(v) (!!((v)->arch.hvm_vcpu.guest_efer & > EFER_LMA)) > > enum hvm_intblk > hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack); > -- > 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |