|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 5/8] x86/vMTRR: pass domain to mtrr_*_msr_set()
This is in preparation for the next patch, and mtrr_def_type_msr_set()
and mtrr_fix_range_msr_set() in sync with mtrr_var_range_msr_set() in
this regard.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3297,13 +3297,15 @@ int hvm_msr_write_intercept(unsigned int
case MSR_MTRRdefType:
if ( !mtrr )
goto gp_fault;
- if ( !mtrr_def_type_msr_set(&v->arch.hvm_vcpu.mtrr, msr_content) )
+ if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ msr_content) )
goto gp_fault;
break;
case MSR_MTRRfix64K_00000:
if ( !mtrr )
goto gp_fault;
- if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, 0, msr_content) )
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr, 0,
+ msr_content) )
goto gp_fault;
break;
case MSR_MTRRfix16K_80000:
@@ -3311,7 +3313,7 @@ int hvm_msr_write_intercept(unsigned int
if ( !mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix16K_80000 + 1;
- if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
index, msr_content) )
goto gp_fault;
break;
@@ -3319,7 +3321,7 @@ int hvm_msr_write_intercept(unsigned int
if ( !mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix4K_C0000 + 3;
- if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
index, msr_content) )
goto gp_fault;
break;
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -412,7 +412,8 @@ static inline bool_t valid_mtrr_type(uin
return 0;
}
-bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content)
+bool_t mtrr_def_type_msr_set(struct domain *d, struct mtrr_state *m,
+ uint64_t msr_content)
{
uint8_t def_type = msr_content & 0xff;
uint8_t enabled = (msr_content >> 10) & 0x3;
@@ -436,8 +437,8 @@ bool_t mtrr_def_type_msr_set(struct mtrr
return 1;
}
-bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
- uint64_t msr_content)
+bool_t mtrr_fix_range_msr_set(struct domain *d, struct mtrr_state *m,
+ uint32_t row, uint64_t msr_content)
{
uint64_t *fixed_range_base = (uint64_t *)m->fixed_ranges;
@@ -669,7 +670,7 @@ static int hvm_load_mtrr_msr(struct doma
mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
for ( i = 0; i < NUM_FIXED_MSR; i++ )
- mtrr_fix_range_msr_set(mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);
+ mtrr_fix_range_msr_set(d, mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);
for ( i = 0; i < MTRR_VCNT; i++ )
{
@@ -681,7 +682,7 @@ static int hvm_load_mtrr_msr(struct doma
hw_mtrr.msr_mtrr_var[i * 2 + 1]);
}
- mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);
+ mtrr_def_type_msr_set(d, mtrr_state, hw_mtrr.msr_mtrr_def_type);
return 0;
}
--- a/xen/include/asm-x86/mtrr.h
+++ b/xen/include/asm-x86/mtrr.h
@@ -82,12 +82,12 @@ extern void mtrr_aps_sync_begin(void);
extern void mtrr_aps_sync_end(void);
extern void mtrr_bp_restore(void);
-extern bool_t mtrr_var_range_msr_set(
- struct domain *d, struct mtrr_state *m,
- uint32_t msr, uint64_t msr_content);
-extern bool_t mtrr_fix_range_msr_set(struct mtrr_state *v,
- uint32_t row, uint64_t msr_content);
-extern bool_t mtrr_def_type_msr_set(struct mtrr_state *v, uint64_t
msr_content);
+extern bool_t mtrr_var_range_msr_set(struct domain *, struct mtrr_state *,
+ uint32_t msr, uint64_t msr_content);
+extern bool_t mtrr_fix_range_msr_set(struct domain *, struct mtrr_state *,
+ uint32_t row, uint64_t msr_content);
+extern bool_t mtrr_def_type_msr_set(struct domain *, struct mtrr_state *,
+ uint64_t msr_content);
extern bool_t pat_msr_set(uint64_t *pat, uint64_t msr);
bool_t is_var_mtrr_overlapped(struct mtrr_state *m);
Attachment:
x86-HVM-MTRR-set-domain.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |