|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2] nested VMX: don't ignore mapping errors
On 11/11/13 11:06, Jan Beulich wrote:
> Rather than ignoring failures to map the virtual VMCS as well as MSR or
> I/O port bitmaps, convert those into failures of the respective
> instructions (avoiding to dereference NULL pointers). Ultimately such
> failures should be handled transparently (by using transient mappings
> when they actually need to be accessed, just like nested SVM does).
>
> Once at it, also eliminate a pointless (and incomplete) result check:
> Non-global (i.e. non-permanent) map operations can't fail.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Looks better. It might be worth leaving a comment around
_map_{io,msr}_bitmap() indicating that support for shared/paged pages is
currently TODO.
I also think the commit message should indicate that shared/paged pages
wont work. This fundamentally means that nested virt is currently
mutually exclusive with sharing/paging.
Either way, Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> v2: Remove the change to nvmx_handle_vmclear() -
> hvm_map_guest_frame_*() can return NULL for reasons other than
> map_domain_page_global() doing so (pointed out by Andrew Cooper).
> That leaves the function broken though (it ought to handle the
> shared/paged out cases in an appropriate way).
>
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
> __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
> }
>
> -static void __map_msr_bitmap(struct vcpu *v)
> +static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
> {
> struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> unsigned long gpa;
> @@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
> hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
> gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
> nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> + return nvmx->msrbitmap != NULL;
> }
>
> -static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
> +static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
> {
> struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> unsigned long gpa;
> @@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu
> hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
> gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
> nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> + return nvmx->iobitmap[index] != NULL;
> }
>
> -static inline void map_io_bitmap_all(struct vcpu *v)
> +static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
> {
> - __map_io_bitmap (v, IO_BITMAP_A);
> - __map_io_bitmap (v, IO_BITMAP_B);
> + return _map_io_bitmap(v, IO_BITMAP_A) &&
> + _map_io_bitmap(v, IO_BITMAP_B);
> }
>
> static void nvmx_purge_vvmcs(struct vcpu *v)
> @@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
> if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
> {
> nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
> - nvcpu->nv_vvmcxaddr = gpa;
> - map_io_bitmap_all (v);
> - __map_msr_bitmap(v);
> + if ( nvcpu->nv_vvmcx )
> + nvcpu->nv_vvmcxaddr = gpa;
> + if ( !nvcpu->nv_vvmcx ||
> + !map_io_bitmap_all(v) ||
> + !_map_msr_bitmap(v) )
> + {
> + vmreturn(regs, VMFAIL_VALID);
> + goto out;
> + }
> }
>
> if ( cpu_has_vmx_vmcs_shadowing )
> @@ -1724,6 +1734,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
> struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> unsigned long operand;
> u64 vmcs_encoding;
> + bool_t okay = 1;
>
> if ( decode_vmx_inst(regs, &decode, &operand, 0)
> != X86EMUL_OKAY )
> @@ -1732,16 +1743,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
> vmcs_encoding = reg_read(regs, decode.reg2);
> __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
>
> - if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
> - __map_io_bitmap (v, IO_BITMAP_A);
> - else if ( vmcs_encoding == IO_BITMAP_B ||
> - vmcs_encoding == IO_BITMAP_B_HIGH )
> - __map_io_bitmap (v, IO_BITMAP_B);
> + switch ( vmcs_encoding )
> + {
> + case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
> + okay = _map_io_bitmap(v, IO_BITMAP_A);
> + break;
> + case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
> + okay = _map_io_bitmap(v, IO_BITMAP_B);
> + break;
> + case MSR_BITMAP: case MSR_BITMAP_HIGH:
> + okay = _map_msr_bitmap(v);
> + break;
> + }
>
> - if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
> - __map_msr_bitmap(v);
> + vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
>
> - vmreturn(regs, VMSUCCEED);
> return X86EMUL_OKAY;
> }
>
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |