|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 15/27] x86/hvm: Improve CPUID and MSR handling using named features
This avoids hvm_cpuid() recursing into itself, and the MSR paths using
hvm_cpuid() to obtain information which is directly available.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 95 +++++++++++++++-----------------------------------
1 file changed, 29 insertions(+), 66 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e2060d2..6a3fdaa 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3292,6 +3292,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
{
struct vcpu *v = current;
struct domain *d = v->domain;
+ const struct cpuid_policy *p = d->arch.cpuid;
unsigned int count, dummy = 0;
if ( !eax )
@@ -3329,8 +3330,6 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
switch ( input )
{
- unsigned int _ebx, _ecx, _edx;
-
case 0x1:
/* Fix up VLAPIC details. */
*ebx &= 0x00FFFFFFu;
@@ -3413,8 +3412,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
break;
case XSTATE_CPUID:
- hvm_cpuid(1, NULL, NULL, &_ecx, NULL);
- if ( !(_ecx & cpufeat_mask(X86_FEATURE_XSAVE)) || count >= 63 )
+ if ( !p->basic.xsave || count >= 63 )
{
*eax = *ebx = *ecx = *edx = 0;
break;
@@ -3426,7 +3424,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
uint64_t xfeature_mask = XSTATE_FP_SSE;
uint32_t xstate_size = XSTATE_AREA_MIN_SIZE;
- if ( _ecx & cpufeat_mask(X86_FEATURE_AVX) )
+ if ( p->basic.avx )
{
xfeature_mask |= XSTATE_YMM;
xstate_size = max(xstate_size,
@@ -3434,10 +3432,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
xstate_sizes[_XSTATE_YMM]);
}
- _ecx = 0;
- hvm_cpuid(7, NULL, &_ebx, &_ecx, NULL);
-
- if ( _ebx & cpufeat_mask(X86_FEATURE_MPX) )
+ if ( p->feat.mpx )
{
xfeature_mask |= XSTATE_BNDREGS | XSTATE_BNDCSR;
xstate_size = max(xstate_size,
@@ -3445,7 +3440,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
xstate_sizes[_XSTATE_BNDCSR]);
}
- if ( _ebx & cpufeat_mask(X86_FEATURE_AVX512F) )
+ if ( p->feat.avx512f )
{
xfeature_mask |= XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM;
xstate_size = max(xstate_size,
@@ -3459,7 +3454,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
xstate_sizes[_XSTATE_HI_ZMM]);
}
- if ( _ecx & cpufeat_mask(X86_FEATURE_PKU) )
+ if ( p->feat.pku )
{
xfeature_mask |= XSTATE_PKRU;
xstate_size = max(xstate_size,
@@ -3467,9 +3462,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
xstate_sizes[_XSTATE_PKRU]);
}
- hvm_cpuid(0x80000001, NULL, NULL, &_ecx, NULL);
-
- if ( _ecx & cpufeat_mask(X86_FEATURE_LWP) )
+ if ( p->extd.lwp )
{
xfeature_mask |= XSTATE_LWP;
xstate_size = max(xstate_size,
@@ -3493,7 +3486,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
case 1:
*eax &= hvm_featureset[FEATURESET_Da1];
- if ( *eax & cpufeat_mask(X86_FEATURE_XSAVES) )
+ if ( p->xstate.xsaves )
{
/*
* Always read CPUID[0xD,1].EBX from hardware, rather than
@@ -3574,14 +3567,11 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
if ( *eax > count )
*eax = count;
- hvm_cpuid(1, NULL, NULL, NULL, &_edx);
- count = _edx & (cpufeat_mask(X86_FEATURE_PAE) |
- cpufeat_mask(X86_FEATURE_PSE36)) ? 36 : 32;
+ count = (p->basic.pae || p->basic.pse36) ? 36 : 32;
if ( *eax < count )
*eax = count;
- hvm_cpuid(0x80000001, NULL, NULL, NULL, &_edx);
- *eax |= (_edx & cpufeat_mask(X86_FEATURE_LM) ? vaddr_bits : 32) << 8;
+ *eax |= (p->extd.lm ? vaddr_bits : 32) << 8;
*ebx &= hvm_featureset[FEATURESET_e8b];
break;
@@ -3648,26 +3638,16 @@ void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
{
struct vcpu *v = current;
+ struct domain *d = v->domain;
uint64_t *var_range_base, *fixed_range_base;
- bool mtrr = false;
int ret = X86EMUL_OKAY;
var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
- if ( msr == MSR_MTRRcap ||
- (msr >= MSR_IA32_MTRR_PHYSBASE(0) && msr <= MSR_MTRRdefType) )
- {
- unsigned int edx;
-
- hvm_cpuid(1, NULL, NULL, NULL, &edx);
- if ( edx & cpufeat_mask(X86_FEATURE_MTRR) )
- mtrr = true;
- }
-
switch ( msr )
{
- unsigned int eax, ebx, ecx, index;
+ unsigned int index;
case MSR_EFER:
*msr_content = v->arch.hvm_vcpu.guest_efer;
@@ -3703,53 +3683,49 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
break;
case MSR_MTRRcap:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
*msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
break;
case MSR_MTRRdefType:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
*msr_content = v->arch.hvm_vcpu.mtrr.def_type
| (v->arch.hvm_vcpu.mtrr.enabled << 10);
break;
case MSR_MTRRfix64K_00000:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
*msr_content = fixed_range_base[0];
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix16K_80000;
*msr_content = fixed_range_base[index + 1];
break;
case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix4K_C0000;
*msr_content = fixed_range_base[index + 3];
break;
case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT-1):
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_IA32_MTRR_PHYSBASE(0);
*msr_content = var_range_base[index];
break;
case MSR_IA32_XSS:
- ecx = 1;
- hvm_cpuid(XSTATE_CPUID, &eax, NULL, &ecx, NULL);
- if ( !(eax & cpufeat_mask(X86_FEATURE_XSAVES)) )
+ if ( !d->arch.cpuid->xstate.xsaves )
goto gp_fault;
*msr_content = v->arch.hvm_vcpu.msr_xss;
break;
case MSR_IA32_BNDCFGS:
- ecx = 0;
- hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
- if ( !(ebx & cpufeat_mask(X86_FEATURE_MPX)) ||
+ if ( !d->arch.cpuid->feat.mpx ||
!hvm_get_guest_bndcfgs(v, msr_content) )
goto gp_fault;
break;
@@ -3790,21 +3766,12 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
bool_t may_defer)
{
struct vcpu *v = current;
- bool mtrr = false;
+ struct domain *d = v->domain;
int ret = X86EMUL_OKAY;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
- if ( msr >= MSR_IA32_MTRR_PHYSBASE(0) && msr <= MSR_MTRRdefType )
- {
- unsigned int edx;
-
- hvm_cpuid(1, NULL, NULL, NULL, &edx);
- if ( edx & cpufeat_mask(X86_FEATURE_MTRR) )
- mtrr = true;
- }
-
if ( may_defer && unlikely(monitored_msr(v->domain, msr)) )
{
ASSERT(v->arch.vm_event);
@@ -3820,7 +3787,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
switch ( msr )
{
- unsigned int eax, ebx, ecx, index;
+ unsigned int index;
case MSR_EFER:
if ( hvm_set_efer(msr_content) )
@@ -3866,14 +3833,14 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
goto gp_fault;
case MSR_MTRRdefType:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
msr_content) )
goto gp_fault;
break;
case MSR_MTRRfix64K_00000:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr, 0,
msr_content) )
@@ -3881,7 +3848,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix16K_80000 + 1;
if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
@@ -3889,7 +3856,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
goto gp_fault;
break;
case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix4K_C0000 + 3;
if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
@@ -3897,7 +3864,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
goto gp_fault;
break;
case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT-1):
- if ( !mtrr )
+ if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
if ( !mtrr_var_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
msr, msr_content) )
@@ -3905,18 +3872,14 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
break;
case MSR_IA32_XSS:
- ecx = 1;
- hvm_cpuid(XSTATE_CPUID, &eax, NULL, &ecx, NULL);
/* No XSS features currently supported for guests. */
- if ( !(eax & cpufeat_mask(X86_FEATURE_XSAVES)) || msr_content != 0 )
+ if ( !d->arch.cpuid->xstate.xsaves || msr_content != 0 )
goto gp_fault;
v->arch.hvm_vcpu.msr_xss = msr_content;
break;
case MSR_IA32_BNDCFGS:
- ecx = 0;
- hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
- if ( !(ebx & cpufeat_mask(X86_FEATURE_MPX)) ||
+ if ( !d->arch.cpuid->feat.mpx ||
!hvm_set_guest_bndcfgs(v, msr_content) )
goto gp_fault;
break;
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |