[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 07/17] vmx: nest: handling VMX instruction exits
At 10:41 +0100 on 22 Apr (1271932879), Qing He wrote: > add a VMX instruction decoder and handle simple VMX instructions > except vmlaunch/vmresume and invept > > Signed-off-by: Qing He <qing.he@xxxxxxxxx> > +static void decode_vmx_inst(struct cpu_user_regs *regs, > + struct vmx_inst_decoded *decode) > +{ > + struct vcpu *v = current; > + union vmx_inst_info info; > + struct segment_register seg; > + unsigned long base, index, seg_base, disp; > + int scale; > + > + info.word = __vmread(VMX_INSTRUCTION_INFO); > + > + if ( info.fields.memreg ) { > + decode->type = VMX_INST_MEMREG_TYPE_REG; > + decode->reg1 = info.fields.reg1; > + } > + else > + { > + decode->type = VMX_INST_MEMREG_TYPE_MEMORY; > + hvm_get_segment_register(v, sreg_to_index[info.fields.segment], > &seg); > + seg_base = seg.base; > + > + base = info.fields.base_reg_invalid ? 0 : > + reg_read(regs, info.fields.base_reg); > + > + index = info.fields.index_reg_invalid ? 0 : > + reg_read(regs, info.fields.index_reg); > + > + scale = 1 << info.fields.scaling; > + > + disp = __vmread(EXIT_QUALIFICATION); > + > + > + decode->mem = seg_base + base + index * scale + disp; > + decode->len = 1 << (info.fields.addr_size + 1); Don't we need to check the segment limit, type &c here? > + } > + > + decode->reg2 = info.fields.reg2; > +} > + > +static void vmreturn(struct cpu_user_regs *regs, enum vmx_ops_result res) > +{ > + unsigned long eflags = regs->eflags; > + unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | > + X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF; > + > + eflags &= ~mask; > + > + switch ( res ) { > + case VMSUCCEED: > + break; > + case VMFAIL_VALID: > + /* TODO: error number of VMFailValid */ ? :) > + eflags |= X86_EFLAGS_ZF; > + break; > + case VMFAIL_INVALID: > + default: > + eflags |= X86_EFLAGS_CF; > + break; > + } > + > + regs->eflags = eflags; > +} > + > +static void __clear_current_vvmcs(struct vmx_nest_struct *nest) > +{ > + if ( nest->svmcs ) > + __vmpclear(virt_to_maddr(nest->svmcs)); > + > + hvm_copy_to_guest_phys(nest->gvmcs_pa, nest->vvmcs, PAGE_SIZE); Do we care about failure here? > + nest->vmcs_invalid = 1; > +} > + > +/* > + * VMX instructions handling > + */ > + > +int vmx_nest_handle_vmxon(struct cpu_user_regs *regs) > +{ > + struct vcpu *v = current; > + struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest; > + struct vmx_inst_decoded decode; > + unsigned long gpa = 0; > + > + if ( !v->domain->arch.hvm_domain.nesting_avail ) > + goto invalid_op; > + > + decode_vmx_inst(regs, &decode); > + > + ASSERT(decode.type == VMX_INST_MEMREG_TYPE_MEMORY); > + hvm_copy_from_guest_virt(&gpa, decode.mem, decode.len, 0); We _definitely_ care about failure here! We need to inject #PF rather than just using zero (and #GP/#SS based on the segment limit check I mentioned above). Also somewhere we should be checking CR0.PE, CR4.VMXE and RFLAGS.VM and returning #UD if they're not correct. And checking that CPL == 0, too. > + nest->guest_vmxon_pa = gpa; > + nest->gvmcs_pa = 0; > + nest->vmcs_invalid = 1; > + nest->vvmcs = alloc_xenheap_page(); > + if ( !nest->vvmcs ) > + { > + gdprintk(XENLOG_ERR, "nest: allocation for virtual vmcs failed\n"); > + vmreturn(regs, VMFAIL_INVALID); > + goto out; > + } Could we just take a writeable refcount of the guest memory rather than allocating our own copy? ISTR the guest's not allowed to write directly to the VMCS memory anyway. It would be expensive on 32-bit Xen (because of having to map/unmap all the time) but cheaper on 64-bit Xen (by skipping various 4k memcpy()s) > + nest->svmcs = alloc_xenheap_page(); > + if ( !nest->svmcs ) > + { > + gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n"); > + free_xenheap_page(nest->vvmcs); > + vmreturn(regs, VMFAIL_INVALID); > + goto out; > + } > + > + /* > + * `fork' the host vmcs to shadow_vmcs > + * vmcs_lock is not needed since we are on current > + */ > + nest->hvmcs = v->arch.hvm_vmx.vmcs; > + __vmpclear(virt_to_maddr(nest->hvmcs)); > + memcpy(nest->svmcs, nest->hvmcs, PAGE_SIZE); > + __vmptrld(virt_to_maddr(nest->hvmcs)); > + v->arch.hvm_vmx.launched = 0; > + > + vmreturn(regs, VMSUCCEED); > + > +out: > + return X86EMUL_OKAY; > + > +invalid_op: > + hvm_inject_exception(TRAP_invalid_op, 0, 0); > + return X86EMUL_EXCEPTION; > +} > + > +int vmx_nest_handle_vmxoff(struct cpu_user_regs *regs) > +{ Needs error handling... > + struct vcpu *v = current; > + struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest; > + > + if ( unlikely(!nest->guest_vmxon_pa) ) > + goto invalid_op; > + > + nest->guest_vmxon_pa = 0; > + __vmpclear(virt_to_maddr(nest->svmcs)); > + > + free_xenheap_page(nest->vvmcs); > + free_xenheap_page(nest->svmcs); > + > + vmreturn(regs, VMSUCCEED); > + return X86EMUL_OKAY; > + > +invalid_op: > + hvm_inject_exception(TRAP_invalid_op, 0, 0); > + return X86EMUL_EXCEPTION; > +} > + > +int vmx_nest_handle_vmptrld(struct cpu_user_regs *regs) > +{ > + struct vcpu *v = current; > + struct vmx_inst_decoded decode; > + struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest; > + unsigned long gpa = 0; > + > + if ( unlikely(!nest->guest_vmxon_pa) ) > + goto invalid_op; > + > + decode_vmx_inst(regs, &decode); > + > + ASSERT(decode.type == VMX_INST_MEMREG_TYPE_MEMORY); > + hvm_copy_from_guest_virt(&gpa, decode.mem, decode.len, 0); Error handling... #PF, segments, CPL != 0 > + if ( gpa == nest->guest_vmxon_pa || gpa & 0xfff ) > + { > + vmreturn(regs, VMFAIL_INVALID); > + goto out; > + } > + > + if ( nest->gvmcs_pa != gpa ) > + { > + if ( !nest->vmcs_invalid ) > + __clear_current_vvmcs(nest); > + nest->gvmcs_pa = gpa; > + ASSERT(nest->vmcs_invalid == 1); > + } > + > + > + if ( nest->vmcs_invalid ) > + { > + hvm_copy_from_guest_phys(nest->vvmcs, nest->gvmcs_pa, PAGE_SIZE); I think you know what I'm going to say here. :) Apart from the error paths the rest of this patch looks OK to me. Tim. -- Tim Deegan <Tim.Deegan@xxxxxxxxxx> Principal Software Engineer, XenServer Engineering Citrix Systems UK Ltd. (Company #02937203, SL9 0BG) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |