|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 05/19] vmx: Merge MSR management routines
Am Dienstag 13 Mai 2014, 11:53:19 schrieb Boris Ostrovsky:
> vmx_add_host_load_msr()/vmx_rm_guest_msr() and
> vmx_add_guest_msr()/vmx_rm_guest_msr()
> share fair amount of code. Merge them to simplify code maintenance.
Another hypervisor crash.
>
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
> xen/arch/x86/hvm/vmx/vmcs.c | 154
> +++++++++++++++++--------------------
> xen/arch/x86/hvm/vmx/vmx.c | 4 +-
> xen/arch/x86/hvm/vmx/vpmu_core2.c | 8 +-
> xen/include/asm-x86/hvm/vmx/vmcs.h | 10 ++-
> 4 files changed, 83 insertions(+), 93 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index 0f43a1b..aaa3691 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1172,121 +1172,109 @@ int vmx_write_guest_msr(u32 msr, u64 val)
> return -ESRCH;
> }
>
> -int vmx_add_guest_msr(u32 msr)
> +int vmx_add_msr(u32 msr, u8 type)
> {
> struct vcpu *curr = current;
> - unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
> - struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
> + unsigned int idx, *msr_count;
> + struct vmx_msr_entry **msr_area;
>
> - if ( msr_area == NULL )
> + ASSERT( (type == VMX_GUEST_MSR) || (type == VMX_HOST_MSR) );
> +
> + if ( type == VMX_GUEST_MSR )
> {
> - if ( (msr_area = alloc_xenheap_page()) == NULL )
> + msr_count = &curr->arch.hvm_vmx.msr_count;
> + msr_area = &curr->arch.hvm_vmx.msr_area;
> + }
> + else
> + {
> + msr_count = &curr->arch.hvm_vmx.host_msr_count;
> + msr_area = &curr->arch.hvm_vmx.host_msr_area;
> + }
> +
> + if ( *msr_area == NULL )
> + {
> + if ( (*msr_area = alloc_xenheap_page()) == NULL )
> return -ENOMEM;
> - curr->arch.hvm_vmx.msr_area = msr_area;
> - __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
> - __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
> +
> + if ( type == VMX_GUEST_MSR )
> + {
> + __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
> + __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
> + }
> + else
> + __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
> }
>
> - for ( i = 0; i < msr_count; i++ )
> - if ( msr_area[i].index == msr )
> + for ( idx = 0; idx < *msr_count; idx++ )
> + if ( msr_area[idx]->index == msr )
> return 0;
>
> - if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
> + if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
> return -ENOSPC;
>
> - msr_area[msr_count].index = msr;
> - msr_area[msr_count].mbz = 0;
> - msr_area[msr_count].data = 0;
> - curr->arch.hvm_vmx.msr_count = ++msr_count;
> - __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
> - __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
> + msr_area[*msr_count]->index = msr;
The addressing of the vector msr_area[] is wrong. You need something like
(*msr_area)[*msr_count].index = msr;
or similar.
Dietmar.
> + msr_area[*msr_count]->mbz = 0;
> + (*msr_count)++;
> + if ( type == VMX_GUEST_MSR )
> + {
> + msr_area[*msr_count - 1]->data = 0;
> + __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
> + __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
> + }
> + else
> + {
> + rdmsrl(msr, msr_area[*msr_count - 1]->data);
> + __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
> + }
>
> return 0;
> }
>
> -void vmx_rm_guest_msr(u32 msr)
> +void vmx_rm_msr(u32 msr, u8 type)
> {
> struct vcpu *curr = current;
> - unsigned int idx, msr_count = curr->arch.hvm_vmx.msr_count;
> - struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
> + unsigned int idx, *msr_count;
> + struct vmx_msr_entry **msr_area;
>
> - if ( msr_area == NULL )
> - return;
> -
> - for ( idx = 0; idx < msr_count; idx++ )
> - if ( msr_area[idx].index == msr )
> - break;
> + ASSERT( (type == VMX_GUEST_MSR) || (type == VMX_HOST_MSR) );
>
> - if ( idx == msr_count )
> - return;
> -
> - for ( ; idx < msr_count - 1; idx++ )
> + if ( type == VMX_GUEST_MSR )
> {
> - msr_area[idx].index = msr_area[idx + 1].index;
> - msr_area[idx].data = msr_area[idx + 1].data;
> + msr_count = &curr->arch.hvm_vmx.msr_count;
> + msr_area = &curr->arch.hvm_vmx.msr_area;
> }
> - msr_area[msr_count - 1].index = 0;
> -
> - curr->arch.hvm_vmx.msr_count = --msr_count;
> - __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
> - __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
> -}
> -
> -int vmx_add_host_load_msr(u32 msr)
> -{
> - struct vcpu *curr = current;
> - unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
> - struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
> -
> - if ( msr_area == NULL )
> + else
> {
> - if ( (msr_area = alloc_xenheap_page()) == NULL )
> - return -ENOMEM;
> - curr->arch.hvm_vmx.host_msr_area = msr_area;
> - __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
> + msr_count = &curr->arch.hvm_vmx.host_msr_count;
> + msr_area = &curr->arch.hvm_vmx.host_msr_area;
> }
>
> - for ( i = 0; i < msr_count; i++ )
> - if ( msr_area[i].index == msr )
> - return 0;
> -
> - if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
> - return -ENOSPC;
> -
> - msr_area[msr_count].index = msr;
> - msr_area[msr_count].mbz = 0;
> - rdmsrl(msr, msr_area[msr_count].data);
> - curr->arch.hvm_vmx.host_msr_count = ++msr_count;
> - __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
> -
> - return 0;
> -}
> -
> -void vmx_rm_host_load_msr(u32 msr)
> -{
> - struct vcpu *curr = current;
> - unsigned int idx, msr_count = curr->arch.hvm_vmx.host_msr_count;
> - struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
> -
> - if ( msr_area == NULL )
> + if ( *msr_area == NULL )
> return;
>
> - for ( idx = 0; idx < msr_count; idx++ )
> - if ( msr_area[idx].index == msr )
> + for ( idx = 0; idx < *msr_count; idx++ )
> + if ( msr_area[idx]->index == msr )
> break;
>
> - if ( idx == msr_count )
> + if ( idx == *msr_count )
> return;
>
> - for ( ; idx < msr_count - 1; idx++ )
> + for ( ; idx < *msr_count - 1; idx++ )
> {
> - msr_area[idx].index = msr_area[idx + 1].index;
> - msr_area[idx].data = msr_area[idx + 1].data;
> + msr_area[idx]->index = msr_area[idx + 1]->index;
> + msr_area[idx]->data = msr_area[idx + 1]->data;
> + }
> + msr_area[*msr_count - 1]->index = 0;
> + (*msr_count)--;
> + if ( type == VMX_GUEST_MSR )
> + {
> + __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
> + __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
> + }
> + else
> + {
> + __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
> }
> - msr_area[msr_count - 1].index = 0;
> -
> - curr->arch.hvm_vmx.host_msr_count = --msr_count;
> - __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
> }
>
> void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index ecdbc17..23d58d9 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2234,12 +2234,12 @@ static int vmx_msr_write_intercept(unsigned int msr,
> uint64_t msr_content)
>
> for ( ; (rc == 0) && lbr->count; lbr++ )
> for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
> - if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
> + if ( (rc = vmx_add_msr(lbr->base + i, VMX_GUEST_MSR)) ==
> 0 )
> vmx_disable_intercept_for_msr(v, lbr->base + i,
> MSR_TYPE_R | MSR_TYPE_W);
> }
>
> if ( (rc < 0) ||
> - (vmx_add_host_load_msr(msr) < 0) )
> + (vmx_add_msr(msr, VMX_HOST_MSR) < 0) )
> hvm_inject_hw_exception(TRAP_machine_check, 0);
> else
> {
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 0a9c643..5e980fa 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -370,10 +370,10 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
> return 0;
>
> wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> - if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
> + if ( vmx_add_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_HOST_MSR) )
> goto out_err;
>
> - if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
> + if ( vmx_add_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_GUEST_MSR) )
> goto out_err;
> vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> core2_calc_intial_glb_ctrl_msr());
> @@ -390,8 +390,8 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
> return 1;
>
> out_err:
> - vmx_rm_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL);
> - vmx_rm_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL);
> + vmx_rm_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_HOST_MSR);
> + vmx_rm_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_GUEST_MSR);
> release_pmu_ownship(PMU_OWNER_HVM);
>
> printk("Failed to allocate VPMU resources for domain %u vcpu %u\n",
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
> b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 50befe1..dd34b2c 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -475,14 +475,16 @@ enum vmcs_field {
>
> #define MSR_TYPE_R 1
> #define MSR_TYPE_W 2
> +
> +#define VMX_GUEST_MSR 0
> +#define VMX_HOST_MSR 1
> +
> void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
> void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
> int vmx_read_guest_msr(u32 msr, u64 *val);
> int vmx_write_guest_msr(u32 msr, u64 val);
> -int vmx_add_guest_msr(u32 msr);
> -void vmx_rm_guest_msr(u32 msr);
> -int vmx_add_host_load_msr(u32 msr);
> -void vmx_rm_host_load_msr(u32 msr);
> +int vmx_add_msr(u32 msr, u8 type);
> +void vmx_rm_msr(u32 msr, u8 type);
> void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to);
> void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
> void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
>
--
Company details: http://ts.fujitsu.com/imprint.html
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |