|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 05/19] vmx: Merge MSR management routines
vmx_add_host_load_msr()/vmx_rm_guest_msr() and
vmx_add_guest_msr()/vmx_rm_guest_msr()
share fair amount of code. Merge them to simplify code maintenance.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/arch/x86/hvm/vmx/vmcs.c | 154 +++++++++++++++++--------------------
xen/arch/x86/hvm/vmx/vmx.c | 4 +-
xen/arch/x86/hvm/vmx/vpmu_core2.c | 8 +-
xen/include/asm-x86/hvm/vmx/vmcs.h | 10 ++-
4 files changed, 84 insertions(+), 92 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 1f9ac39..4a59ffa 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1176,117 +1176,107 @@ int vmx_write_guest_msr(u32 msr, u64 val)
return -ESRCH;
}
-int vmx_add_guest_msr(u32 msr)
+int vmx_add_msr(u32 msr, u8 type)
{
struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ unsigned int idx, *msr_count;
+ struct vmx_msr_entry **msr_area;
+
+ ASSERT((type == VMX_GUEST_MSR) || (type == VMX_HOST_MSR));
+
+ if ( type == VMX_GUEST_MSR )
+ {
+ msr_count = &curr->arch.hvm_vmx.msr_count;
+ msr_area = &curr->arch.hvm_vmx.msr_area;
+ }
+ else
+ {
+ msr_count = &curr->arch.hvm_vmx.host_msr_count;
+ msr_area = &curr->arch.hvm_vmx.host_msr_area;
+ }
- if ( msr_area == NULL )
+ if ( *msr_area == NULL )
{
- if ( (msr_area = alloc_xenheap_page()) == NULL )
+ if ( (*msr_area = alloc_xenheap_page()) == NULL )
return -ENOMEM;
- curr->arch.hvm_vmx.msr_area = msr_area;
- __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
- __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
+
+ if ( type == VMX_GUEST_MSR )
+ {
+ __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
+ __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+ }
+ else
+ __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
}
- for ( i = 0; i < msr_count; i++ )
- if ( msr_area[i].index == msr )
+ for ( idx = 0; idx < *msr_count; idx++ )
+ if ( (*msr_area)[idx].index == msr )
return 0;
- if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+ if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
return -ENOSPC;
- msr_area[msr_count].index = msr;
- msr_area[msr_count].mbz = 0;
- msr_area[msr_count].data = 0;
- curr->arch.hvm_vmx.msr_count = ++msr_count;
- __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
- __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
+ (*msr_area)[*msr_count].index = msr;
+ (*msr_area)[*msr_count].mbz = 0;
+ (*msr_count)++;
+ if ( type == VMX_GUEST_MSR )
+ {
+ (*msr_area)[*msr_count - 1].data = 0;
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
+ }
+ else
+ {
+ rdmsrl(msr, (*msr_area)[*msr_count - 1].data);
+ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+ }
return 0;
}
-void vmx_rm_guest_msr(u32 msr)
+void vmx_rm_msr(u32 msr, u8 type)
{
struct vcpu *curr = current;
- unsigned int idx, msr_count = curr->arch.hvm_vmx.msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ unsigned int idx, *msr_count;
+ struct vmx_msr_entry **msr_area;
- if ( msr_area == NULL )
- return;
+ ASSERT((type == VMX_GUEST_MSR) || (type == VMX_HOST_MSR));
- for ( idx = 0; idx < msr_count; idx++ )
- if ( msr_area[idx].index == msr )
- break;
-
- if ( idx == msr_count )
- return;
-
- --msr_count;
- memmove(&msr_area[idx], &msr_area[idx + 1],
- sizeof(struct vmx_msr_entry) * (msr_count - idx));
- msr_area[msr_count].index = 0;
-
- curr->arch.hvm_vmx.msr_count = msr_count;
- __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
- __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
-}
-
-int vmx_add_host_load_msr(u32 msr)
-{
- struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
-
- if ( msr_area == NULL )
+ if ( type == VMX_GUEST_MSR )
{
- if ( (msr_area = alloc_xenheap_page()) == NULL )
- return -ENOMEM;
- curr->arch.hvm_vmx.host_msr_area = msr_area;
- __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
+ msr_count = &curr->arch.hvm_vmx.msr_count;
+ msr_area = &curr->arch.hvm_vmx.msr_area;
+ }
+ else
+ {
+ msr_count = &curr->arch.hvm_vmx.host_msr_count;
+ msr_area = &curr->arch.hvm_vmx.host_msr_area;
}
- for ( i = 0; i < msr_count; i++ )
- if ( msr_area[i].index == msr )
- return 0;
-
- if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
- return -ENOSPC;
-
- msr_area[msr_count].index = msr;
- msr_area[msr_count].mbz = 0;
- rdmsrl(msr, msr_area[msr_count].data);
- curr->arch.hvm_vmx.host_msr_count = ++msr_count;
- __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
-
- return 0;
-}
-
-void vmx_rm_host_load_msr(u32 msr)
-{
- struct vcpu *curr = current;
- unsigned int idx, msr_count = curr->arch.hvm_vmx.host_msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
-
- if ( msr_area == NULL )
+ if ( *msr_area == NULL )
return;
- for ( idx = 0; idx < msr_count; idx++ )
- if ( msr_area[idx].index == msr )
+ for ( idx = 0; idx < *msr_count; idx++ )
+ if ( (*msr_area)[idx].index == msr )
break;
- if ( idx == msr_count )
+ if ( idx == *msr_count )
return;
- --msr_count;
- memmove(&msr_area[idx], &msr_area[idx + 1],
- sizeof(struct vmx_msr_entry) * (msr_count - idx));
- msr_area[msr_count].index = 0;
+ --(*msr_count);
+ memmove(&(*msr_area)[idx], &(*msr_area)[idx + 1],
+ sizeof(struct vmx_msr_entry) * (*msr_count - idx));
+ msr_area[(*msr_count)]->index = 0;
- curr->arch.hvm_vmx.host_msr_count = msr_count;
- __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
+ if ( type == VMX_GUEST_MSR )
+ {
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
+ }
+ else
+ {
+ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+ }
}
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2caa04a..22b3325 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2262,12 +2262,12 @@ static int vmx_msr_write_intercept(unsigned int msr,
uint64_t msr_content)
for ( ; (rc == 0) && lbr->count; lbr++ )
for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
- if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
+ if ( (rc = vmx_add_msr(lbr->base + i, VMX_GUEST_MSR)) == 0
)
vmx_disable_intercept_for_msr(v, lbr->base + i,
MSR_TYPE_R | MSR_TYPE_W);
}
if ( (rc < 0) ||
- (vmx_add_host_load_msr(msr) < 0) )
+ (vmx_add_msr(msr, VMX_HOST_MSR) < 0) )
hvm_inject_hw_exception(TRAP_machine_check, 0);
else
{
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index dd7107d..fe78135 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -371,10 +371,10 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
return 0;
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
- if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
+ if ( vmx_add_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_HOST_MSR) )
goto out_err;
- if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
+ if ( vmx_add_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_GUEST_MSR) )
goto out_err;
vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
core2_calc_intial_glb_ctrl_msr());
@@ -391,8 +391,8 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
return 1;
out_err:
- vmx_rm_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL);
- vmx_rm_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL);
+ vmx_rm_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_HOST_MSR);
+ vmx_rm_msr(MSR_CORE_PERF_GLOBAL_CTRL, VMX_GUEST_MSR);
release_pmu_ownship(PMU_OWNER_HVM);
printk("Failed to allocate VPMU resources for domain %u vcpu %u\n",
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 4cfcd47..80bc998 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -475,14 +475,16 @@ enum vmcs_field {
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
+
+#define VMX_GUEST_MSR 0
+#define VMX_HOST_MSR 1
+
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
int vmx_read_guest_msr(u32 msr, u64 *val);
int vmx_write_guest_msr(u32 msr, u64 val);
-int vmx_add_guest_msr(u32 msr);
-void vmx_rm_guest_msr(u32 msr);
-int vmx_add_host_load_msr(u32 msr);
-void vmx_rm_host_load_msr(u32 msr);
+int vmx_add_msr(u32 msr, u8 type);
+void vmx_rm_msr(u32 msr, u8 type);
void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
--
1.8.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |