[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic
> -----Original Message----- > From: Alexandru Isaila [mailto:aisaila@xxxxxxxxxxxxxxx] > Sent: 19 September 2017 15:14 > To: xen-devel@xxxxxxxxxxxxx > Cc: Tim (Xen.org) <tim@xxxxxxx>; George Dunlap > <George.Dunlap@xxxxxxxxxx>; jbeulich@xxxxxxxx; Andrew Cooper > <Andrew.Cooper3@xxxxxxxxxx>; Ian Jackson <Ian.Jackson@xxxxxxxxxx>; > konrad.wilk@xxxxxxxxxx; sstabellini@xxxxxxxxxx; Wei Liu > <wei.liu2@xxxxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; > boris.ostrovsky@xxxxxxxxxx; suravee.suthikulpanit@xxxxxxx; > jun.nakajima@xxxxxxxxx; Kevin Tian <kevin.tian@xxxxxxxxx>; Alexandru Isaila > <aisaila@xxxxxxxxxxxxxxx> > Subject: [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic > > From: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > > It will be reused by later changes. > > Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > > --- > Changes since V2: > - Changed _gfn() to gaddr_to_gfn > - Changed gfn_x to gfn_to_gaddr > --- > xen/arch/x86/hvm/hvm.c | 144 +++++++++++++++++++++++----------- > ---- > xen/include/asm-x86/hvm/support.h | 12 ++++ > 2 files changed, 98 insertions(+), 58 deletions(-) > > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 488acbf..93394c1 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -3069,6 +3069,83 @@ void hvm_task_switch( > hvm_unmap_entry(nptss_desc); > } > > +enum hvm_translation_result hvm_translate_get_page( > + struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec, > + pagefault_info_t *pfinfo, struct page_info **page_p, > + gfn_t *gfn_p, p2m_type_t *p2mt_p) > +{ > + struct page_info *page; > + p2m_type_t p2mt; > + gfn_t gfn; > + > + if ( linear ) > + { > + gfn = _gfn(paging_gva_to_gfn(v, addr, &pfec)); > + > + if ( gfn_eq(gfn, INVALID_GFN) ) > + { > + if ( pfec & PFEC_page_paged ) > + return HVMTRANS_gfn_paged_out; > + > + if ( pfec & PFEC_page_shared ) > + return HVMTRANS_gfn_shared; > + > + if ( pfinfo ) > + { > + pfinfo->linear = addr; > + pfinfo->ec = pfec & ~PFEC_implicit; > + } > + > + return HVMTRANS_bad_linear_to_gfn; > + } > + } > + else > + { > + gfn = gaddr_to_gfn(addr); > + ASSERT(!pfinfo); > + } > + > + /* > + * No need to do the P2M lookup for internally handled MMIO, benefiting > + * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses, > + * - newer Windows (like Server 2012) for HPET accesses. > + */ > + if ( v == current > + && !nestedhvm_vcpu_in_guestmode(v) > + && hvm_mmio_internal(gfn_to_gaddr(gfn)) ) > + return HVMTRANS_bad_gfn_to_mfn; > + > + page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, > P2M_UNSHARE); > + > + if ( !page ) > + return HVMTRANS_bad_gfn_to_mfn; > + > + if ( p2m_is_paging(p2mt) ) > + { > + put_page(page); > + p2m_mem_paging_populate(v->domain, gfn_x(gfn)); > + return HVMTRANS_gfn_paged_out; > + } > + if ( p2m_is_shared(p2mt) ) > + { > + put_page(page); > + return HVMTRANS_gfn_shared; > + } > + if ( p2m_is_grant(p2mt) ) > + { > + put_page(page); > + return HVMTRANS_unhandleable; > + } > + > + *page_p = page; > + if ( gfn_p ) > + *gfn_p = gfn; > + if ( p2mt_p ) > + *p2mt_p = p2mt; > + > + return HVMTRANS_okay; > +} > + > #define HVMCOPY_from_guest (0u<<0) > #define HVMCOPY_to_guest (1u<<0) > #define HVMCOPY_phys (0u<<2) > @@ -3077,7 +3154,7 @@ static enum hvm_translation_result __hvm_copy( > void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags, > uint32_t pfec, pagefault_info_t *pfinfo) > { > - unsigned long gfn; > + gfn_t gfn; > struct page_info *page; > p2m_type_t p2mt; > char *p; > @@ -3103,65 +3180,15 @@ static enum hvm_translation_result > __hvm_copy( > > while ( todo > 0 ) > { > + enum hvm_translation_result res; > paddr_t gpa = addr & ~PAGE_MASK; > > count = min_t(int, PAGE_SIZE - gpa, todo); > > - if ( flags & HVMCOPY_linear ) > - { > - gfn = paging_gva_to_gfn(v, addr, &pfec); > - if ( gfn == gfn_x(INVALID_GFN) ) > - { > - if ( pfec & PFEC_page_paged ) > - return HVMTRANS_gfn_paged_out; > - if ( pfec & PFEC_page_shared ) > - return HVMTRANS_gfn_shared; > - if ( pfinfo ) > - { > - pfinfo->linear = addr; > - pfinfo->ec = pfec & ~PFEC_implicit; > - } > - return HVMTRANS_bad_linear_to_gfn; > - } > - gpa |= (paddr_t)gfn << PAGE_SHIFT; > - } > - else > - { > - gfn = addr >> PAGE_SHIFT; > - gpa = addr; > - } > - > - /* > - * No need to do the P2M lookup for internally handled MMIO, > benefiting > - * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses, > - * - newer Windows (like Server 2012) for HPET accesses. > - */ > - if ( v == current > - && !nestedhvm_vcpu_in_guestmode(v) > - && hvm_mmio_internal(gpa) ) > - return HVMTRANS_bad_gfn_to_mfn; > - > - page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE); > - > - if ( !page ) > - return HVMTRANS_bad_gfn_to_mfn; > - > - if ( p2m_is_paging(p2mt) ) > - { > - put_page(page); > - p2m_mem_paging_populate(v->domain, gfn); > - return HVMTRANS_gfn_paged_out; > - } > - if ( p2m_is_shared(p2mt) ) > - { > - put_page(page); > - return HVMTRANS_gfn_shared; > - } > - if ( p2m_is_grant(p2mt) ) > - { > - put_page(page); > - return HVMTRANS_unhandleable; > - } > + res = hvm_translate_get_page(v, addr, flags & HVMCOPY_linear, > + pfec, pfinfo, &page, &gfn, &p2mt); > + if ( res != HVMTRANS_okay ) > + return res; > > p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK); > > @@ -3170,10 +3197,11 @@ static enum hvm_translation_result > __hvm_copy( > if ( p2m_is_discard_write(p2mt) ) > { > static unsigned long lastpage; > - if ( xchg(&lastpage, gfn) != gfn ) > + > + if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) ) > dprintk(XENLOG_G_DEBUG, > "%pv attempted write to read-only gfn %#lx > (mfn=%#lx)\n", > - v, gfn, page_to_mfn(page)); > + v, gfn_x(gfn), page_to_mfn(page)); > } > else > { > diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm- > x86/hvm/support.h > index e3b035d..d784fc1 100644 > --- a/xen/include/asm-x86/hvm/support.h > +++ b/xen/include/asm-x86/hvm/support.h > @@ -24,6 +24,7 @@ > #include <xen/sched.h> > #include <asm/hvm/save.h> > #include <asm/processor.h> > +#include <asm/p2m.h> > > #ifndef NDEBUG > #define DBG_LEVEL_0 (1 << 0) > @@ -103,6 +104,17 @@ enum hvm_translation_result > hvm_fetch_from_guest_linear( > void *buf, unsigned long addr, int size, uint32_t pfec, > pagefault_info_t *pfinfo); > > +/* > + * Get a reference on the page under an HVM physical or linear address. If > + * linear, a pagewalk is performed using pfec (fault details optionally in > + * pfinfo). > + * On success, returns HVMTRANS_okay with a reference taken on > **_page. > + */ > +enum hvm_translation_result hvm_translate_get_page( > + struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec, > + pagefault_info_t *pfinfo, struct page_info **page_p, > + gfn_t *gfn_p, p2m_type_t *p2mt_p); > + > #define HVM_HCALL_completed 0 /* hypercall completed - no further > action */ > #define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute > VMCALL */ > int hvm_hypercall(struct cpu_user_regs *regs); > -- > 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |