[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v10 06/11] x86/hvm: Introduce hvm_save_mtrr_msr_one func
> -----Original Message----- > From: Alexandru Isaila [mailto:aisaila@xxxxxxxxxxxxxxx] > Sent: 04 July 2018 14:32 > To: xen-devel@xxxxxxxxxxxxx > Cc: Ian Jackson <Ian.Jackson@xxxxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; > jbeulich@xxxxxxxx; Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul > Durrant <Paul.Durrant@xxxxxxxxxx>; Alexandru Isaila > <aisaila@xxxxxxxxxxxxxxx> > Subject: [PATCH v10 06/11] x86/hvm: Introduce hvm_save_mtrr_msr_one > func > > This is used to save data from a single instance. > > Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> > > --- > Changes since V9: > - Change return of the save_one func to return hvm_save_entry. > > Note: This patch is based on Roger Pau Monne's series[1] > --- > xen/arch/x86/hvm/mtrr.c | 75 +++++++++++++++++++++++++--------------- > --------- > 1 file changed, 39 insertions(+), 36 deletions(-) > > diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c > index 48facbb..9bbff59 100644 > --- a/xen/arch/x86/hvm/mtrr.c > +++ b/xen/arch/x86/hvm/mtrr.c > @@ -718,6 +718,44 @@ int hvm_set_mem_pinned_cacheattr(struct domain > *d, uint64_t gfn_start, > return 0; > } > > +static int hvm_save_mtrr_msr_one(struct vcpu *v, hvm_domain_context_t > *h) > +{ > + const struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr; > + struct hvm_hw_mtrr hw_mtrr; > + unsigned int i; I believe coding style says there should be a blank line here. Paul > + memset(&hw_mtrr, 0, sizeof(hw_mtrr)); > + hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type | > + MASK_INSR(mtrr_state->fixed_enabled, > + MTRRdefType_FE) | > + MASK_INSR(mtrr_state->enabled, > MTRRdefType_E); > + hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap; > + > + if ( MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT) > > + (ARRAY_SIZE(hw_mtrr.msr_mtrr_var) / 2) ) > + { > + dprintk(XENLOG_G_ERR, > + "HVM save: %pv: too many (%lu) variable range MTRRs\n", > + v, MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT)); > + return -EINVAL; > + } > + hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr); > + for ( i = 0; i < MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT); i++ ) > + { > + /* save physbase */ > + hw_mtrr.msr_mtrr_var[i*2] = > + ((uint64_t*)mtrr_state->var_ranges)[i*2]; > + /* save physmask */ > + hw_mtrr.msr_mtrr_var[i*2+1] = > + ((uint64_t*)mtrr_state->var_ranges)[i*2+1]; > + } > + > + for ( i = 0; i < NUM_FIXED_MSR; i++ ) > + hw_mtrr.msr_mtrr_fixed[i] = > + ((uint64_t*)mtrr_state->fixed_ranges)[i]; > + > + return hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr); > + } > + > static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t > *h) > { > struct vcpu *v; > @@ -725,42 +763,7 @@ static int hvm_save_mtrr_msr(struct domain *d, > hvm_domain_context_t *h) > /* save mtrr&pat */ > for_each_vcpu(d, v) > { > - const struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr; > - struct hvm_hw_mtrr hw_mtrr = { > - .msr_mtrr_def_type = mtrr_state->def_type | > - MASK_INSR(mtrr_state->fixed_enabled, > - MTRRdefType_FE) | > - MASK_INSR(mtrr_state->enabled, > MTRRdefType_E), > - .msr_mtrr_cap = mtrr_state->mtrr_cap, > - }; > - unsigned int i; > - > - if ( MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT) > > - (ARRAY_SIZE(hw_mtrr.msr_mtrr_var) / 2) ) > - { > - dprintk(XENLOG_G_ERR, > - "HVM save: %pv: too many (%lu) variable range MTRRs\n", > - v, MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT)); > - return -EINVAL; > - } > - > - hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr); > - > - for ( i = 0; i < MASK_EXTR(hw_mtrr.msr_mtrr_cap, MTRRcap_VCNT); i++ > ) > - { > - /* save physbase */ > - hw_mtrr.msr_mtrr_var[i*2] = > - ((uint64_t*)mtrr_state->var_ranges)[i*2]; > - /* save physmask */ > - hw_mtrr.msr_mtrr_var[i*2+1] = > - ((uint64_t*)mtrr_state->var_ranges)[i*2+1]; > - } > - > - for ( i = 0; i < NUM_FIXED_MSR; i++ ) > - hw_mtrr.msr_mtrr_fixed[i] = > - ((uint64_t*)mtrr_state->fixed_ranges)[i]; > - > - if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 ) > + if ( hvm_save_mtrr_msr_one(v, h) != 0 ) > return 1; > } > return 0; > -- > 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |