[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 01/15] x86/hvm: Rename hvm_emulate_init() and hvm_emulate_prepare() for clarity
> -----Original Message----- > From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx] > Sent: 23 November 2016 15:39 > To: Xen-devel <xen-devel@xxxxxxxxxxxxx> > Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich > <JBeulich@xxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Jun > Nakajima <jun.nakajima@xxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>; > Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>; Suravee Suthikulpanit > <suravee.suthikulpanit@xxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx> > Subject: [PATCH 01/15] x86/hvm: Rename hvm_emulate_init() and > hvm_emulate_prepare() for clarity > > * Move hvm_emulate_init() to immediately hvm_emulate_prepare(), as > they are > very closely related. > * Rename hvm_emulate_prepare() to hvm_emulate_init_once() and > hvm_emulate_init() to hvm_emulate_init_per_insn() to make it clearer > how to > and when to use them. > > No functional change. > > Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > --- > CC: Jan Beulich <JBeulich@xxxxxxxx> > CC: Paul Durrant <paul.durrant@xxxxxxxxxx> > CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> > CC: Kevin Tian <kevin.tian@xxxxxxxxx> > CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> > CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> > CC: Wei Liu <wei.liu2@xxxxxxxxxx> > > As hvm_emulate_prepare() was new in 4.8, it would be a good idea to take > this > patch to avoid future confusion on the stable-4.8 branch > --- > xen/arch/x86/hvm/emulate.c | 111 +++++++++++++++++++--------------- > ---- > xen/arch/x86/hvm/hvm.c | 2 +- > xen/arch/x86/hvm/io.c | 2 +- > xen/arch/x86/hvm/ioreq.c | 2 +- > xen/arch/x86/hvm/svm/emulate.c | 4 +- > xen/arch/x86/hvm/vmx/realmode.c | 2 +- > xen/include/asm-x86/hvm/emulate.h | 6 ++- > 7 files changed, 66 insertions(+), 63 deletions(-) > > diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c > index e9b8f8c..3ab0e8e 100644 > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -1755,57 +1755,6 @@ static const struct x86_emulate_ops > hvm_emulate_ops_no_write = { > .vmfunc = hvmemul_vmfunc, > }; > > -void hvm_emulate_init( > - struct hvm_emulate_ctxt *hvmemul_ctxt, > - const unsigned char *insn_buf, > - unsigned int insn_bytes) > -{ > - struct vcpu *curr = current; > - unsigned int pfec = PFEC_page_present; > - unsigned long addr; > - > - if ( hvm_long_mode_enabled(curr) && > - hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l ) > - { > - hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64; > - } > - else > - { > - hvmemul_ctxt->ctxt.addr_size = > - hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16; > - hvmemul_ctxt->ctxt.sp_size = > - hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16; > - } > - > - if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) > - pfec |= PFEC_user_mode; > - > - hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->eip; > - if ( !insn_bytes ) > - { > - hvmemul_ctxt->insn_buf_bytes = > - hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?: > - (hvm_virtual_to_linear_addr(x86_seg_cs, > - &hvmemul_ctxt->seg_reg[x86_seg_cs], > - hvmemul_ctxt->insn_buf_eip, > - sizeof(hvmemul_ctxt->insn_buf), > - hvm_access_insn_fetch, > - hvmemul_ctxt->ctxt.addr_size, > - &addr) && > - hvm_fetch_from_guest_virt_nofault(hvmemul_ctxt->insn_buf, > addr, > - > sizeof(hvmemul_ctxt->insn_buf), > - pfec) == HVMCOPY_okay) ? > - sizeof(hvmemul_ctxt->insn_buf) : 0; > - } > - else > - { > - hvmemul_ctxt->insn_buf_bytes = insn_bytes; > - memcpy(hvmemul_ctxt->insn_buf, insn_buf, insn_bytes); > - } > - > - hvmemul_ctxt->exn_pending = 0; > -} > - > static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, > const struct x86_emulate_ops *ops) > { > @@ -1815,7 +1764,8 @@ static int _hvm_emulate_one(struct > hvm_emulate_ctxt *hvmemul_ctxt, > struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; > int rc; > > - hvm_emulate_init(hvmemul_ctxt, vio->mmio_insn, vio- > >mmio_insn_bytes); > + hvm_emulate_init_per_insn(hvmemul_ctxt, vio->mmio_insn, > + vio->mmio_insn_bytes); > > vio->mmio_retry = 0; > > @@ -1915,7 +1865,7 @@ int hvm_emulate_one_mmio(unsigned long mfn, > unsigned long gla) > else > ops = &hvm_ro_emulate_ops_mmio; > > - hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); > + hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); > ctxt.ctxt.data = &mmio_ro_ctxt; > rc = _hvm_emulate_one(&ctxt, ops); > switch ( rc ) > @@ -1940,7 +1890,7 @@ void hvm_emulate_one_vm_event(enum > emul_kind kind, unsigned int trapnr, > struct hvm_emulate_ctxt ctx = {{ 0 }}; > int rc; > > - hvm_emulate_prepare(&ctx, guest_cpu_user_regs()); > + hvm_emulate_init_once(&ctx, guest_cpu_user_regs()); > > switch ( kind ) > { > @@ -1992,7 +1942,7 @@ void hvm_emulate_one_vm_event(enum > emul_kind kind, unsigned int trapnr, > hvm_emulate_writeback(&ctx); > } > > -void hvm_emulate_prepare( > +void hvm_emulate_init_once( > struct hvm_emulate_ctxt *hvmemul_ctxt, > struct cpu_user_regs *regs) > { > @@ -2006,6 +1956,57 @@ void hvm_emulate_prepare( > hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt); > } > > +void hvm_emulate_init_per_insn( > + struct hvm_emulate_ctxt *hvmemul_ctxt, > + const unsigned char *insn_buf, > + unsigned int insn_bytes) > +{ > + struct vcpu *curr = current; > + unsigned int pfec = PFEC_page_present; > + unsigned long addr; > + > + if ( hvm_long_mode_enabled(curr) && > + hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l ) > + { > + hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64; > + } > + else > + { > + hvmemul_ctxt->ctxt.addr_size = > + hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16; > + hvmemul_ctxt->ctxt.sp_size = > + hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16; > + } > + > + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) > + pfec |= PFEC_user_mode; > + > + hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->eip; > + if ( !insn_bytes ) > + { > + hvmemul_ctxt->insn_buf_bytes = > + hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?: > + (hvm_virtual_to_linear_addr(x86_seg_cs, > + &hvmemul_ctxt->seg_reg[x86_seg_cs], > + hvmemul_ctxt->insn_buf_eip, > + sizeof(hvmemul_ctxt->insn_buf), > + hvm_access_insn_fetch, > + hvmemul_ctxt->ctxt.addr_size, > + &addr) && > + hvm_fetch_from_guest_virt_nofault(hvmemul_ctxt->insn_buf, > addr, > + > sizeof(hvmemul_ctxt->insn_buf), > + pfec) == HVMCOPY_okay) ? > + sizeof(hvmemul_ctxt->insn_buf) : 0; > + } > + else > + { > + hvmemul_ctxt->insn_buf_bytes = insn_bytes; > + memcpy(hvmemul_ctxt->insn_buf, insn_buf, insn_bytes); > + } > + > + hvmemul_ctxt->exn_pending = 0; > +} > + > void hvm_emulate_writeback( > struct hvm_emulate_ctxt *hvmemul_ctxt) > { > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index f76dd90..25dc759 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -4058,7 +4058,7 @@ void hvm_ud_intercept(struct cpu_user_regs > *regs) > { > struct hvm_emulate_ctxt ctxt; > > - hvm_emulate_prepare(&ctxt, regs); > + hvm_emulate_init_once(&ctxt, regs); > > if ( opt_hvm_fep ) > { > diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c > index 1e7a5f9..7305801 100644 > --- a/xen/arch/x86/hvm/io.c > +++ b/xen/arch/x86/hvm/io.c > @@ -87,7 +87,7 @@ int handle_mmio(void) > > ASSERT(!is_pvh_vcpu(curr)); > > - hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); > + hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); > > rc = hvm_emulate_one(&ctxt); > > diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c > index d2245e2..88071ab 100644 > --- a/xen/arch/x86/hvm/ioreq.c > +++ b/xen/arch/x86/hvm/ioreq.c > @@ -167,7 +167,7 @@ bool_t handle_hvm_io_completion(struct vcpu *v) > { > struct hvm_emulate_ctxt ctxt; > > - hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); > + hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); > vmx_realmode_emulate_one(&ctxt); > hvm_emulate_writeback(&ctxt); > > diff --git a/xen/arch/x86/hvm/svm/emulate.c > b/xen/arch/x86/hvm/svm/emulate.c > index a5545ea..9cdbe9e 100644 > --- a/xen/arch/x86/hvm/svm/emulate.c > +++ b/xen/arch/x86/hvm/svm/emulate.c > @@ -107,8 +107,8 @@ int __get_instruction_length_from_list(struct vcpu > *v, > #endif > > ASSERT(v == current); > - hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); > - hvm_emulate_init(&ctxt, NULL, 0); > + hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); > + hvm_emulate_init_per_insn(&ctxt, NULL, 0); > state = x86_decode_insn(&ctxt.ctxt, hvmemul_insn_fetch); > if ( IS_ERR_OR_NULL(state) ) > return 0; > diff --git a/xen/arch/x86/hvm/vmx/realmode.c > b/xen/arch/x86/hvm/vmx/realmode.c > index e83a61f..9002638 100644 > --- a/xen/arch/x86/hvm/vmx/realmode.c > +++ b/xen/arch/x86/hvm/vmx/realmode.c > @@ -179,7 +179,7 @@ void vmx_realmode(struct cpu_user_regs *regs) > if ( intr_info & INTR_INFO_VALID_MASK ) > __vmwrite(VM_ENTRY_INTR_INFO, 0); > > - hvm_emulate_prepare(&hvmemul_ctxt, regs); > + hvm_emulate_init_once(&hvmemul_ctxt, regs); > > /* Only deliver interrupts into emulated real mode. */ > if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) && > diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm- > x86/hvm/emulate.h > index f610673..d4186a2 100644 > --- a/xen/include/asm-x86/hvm/emulate.h > +++ b/xen/include/asm-x86/hvm/emulate.h > @@ -51,10 +51,12 @@ int hvm_emulate_one_no_write( > void hvm_emulate_one_vm_event(enum emul_kind kind, > unsigned int trapnr, > unsigned int errcode); > -void hvm_emulate_prepare( > +/* Must be called once to set up hvmemul state. */ > +void hvm_emulate_init_once( > struct hvm_emulate_ctxt *hvmemul_ctxt, > struct cpu_user_regs *regs); > -void hvm_emulate_init( > +/* Must be called once before each instruction emulated. */ > +void hvm_emulate_init_per_insn( > struct hvm_emulate_ctxt *hvmemul_ctxt, > const unsigned char *insn_buf, > unsigned int insn_bytes); > -- > 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |