[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC v2 1/2] x86/emulate: Move hvmemul_linear_to_phys



> -----Original Message-----
> From: Alexandru Stefan ISAILA [mailto:aisaila@xxxxxxxxxxxxxxx]
> Sent: 04 February 2019 11:37
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Paul Durrant <Paul.Durrant@xxxxxxxxxx>; jbeulich@xxxxxxxx; Andrew
> Cooper <Andrew.Cooper3@xxxxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; Roger
> Pau Monne <roger.pau@xxxxxxxxxx>; rcojocaru@xxxxxxxxxxxxxxx;
> tamas@xxxxxxxxxxxxx; George Dunlap <George.Dunlap@xxxxxxxxxx>
> Subject: Re: [PATCH RFC v2 1/2] x86/emulate: Move hvmemul_linear_to_phys
> 
> Ping, any thoughts on this are appreciated.
> 
> Regards,
> Alex
> 
> On 11.01.2019 17:37, Alexandru Stefan ISAILA wrote:
> > This is done so hvmemul_linear_to_phys() can be called from
> > hvmemul_map_linear_addr()
> >

I have no objection to pure code movement for this purpose. I certainly prefer 
it to forward declaration of statics.

  Paul

> > Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
> > ---
> >   xen/arch/x86/hvm/emulate.c | 181 ++++++++++++++++++-------------------
> >   1 file changed, 90 insertions(+), 91 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> > index 2d02ef1521..a766eecc8e 100644
> > --- a/xen/arch/x86/hvm/emulate.c
> > +++ b/xen/arch/x86/hvm/emulate.c
> > @@ -530,6 +530,95 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
> >       return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df,
> ram_gpa);
> >   }
> >
> > +/*
> > + * Convert addr from linear to physical form, valid over the range
> > + * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
> > + * the valid computed range. It is always >0 when X86EMUL_OKAY is
> returned.
> > + * @pfec indicates the access checks to be performed during page-table
> walks.
> > + */
> > +static int hvmemul_linear_to_phys(
> > +    unsigned long addr,
> > +    paddr_t *paddr,
> > +    unsigned int bytes_per_rep,
> > +    unsigned long *reps,
> > +    uint32_t pfec,
> > +    struct hvm_emulate_ctxt *hvmemul_ctxt)
> > +{
> > +    struct vcpu *curr = current;
> > +    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
> > +    int reverse;
> > +
> > +    /*
> > +     * Clip repetitions to a sensible maximum. This avoids extensive
> looping in
> > +     * this function while still amortising the cost of I/O trap-and-
> emulate.
> > +     */
> > +    *reps = min_t(unsigned long, *reps, 4096);
> > +
> > +    /* With no paging it's easy: linear == physical. */
> > +    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
> > +    {
> > +        *paddr = addr;
> > +        return X86EMUL_OKAY;
> > +    }
> > +
> > +    /* Reverse mode if this is a backwards multi-iteration string
> operation. */
> > +    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) &&
> (*reps > 1);
> > +
> > +    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
> > +    {
> > +        /* Do page-straddling first iteration forwards via recursion.
> */
> > +        paddr_t _paddr;
> > +        unsigned long one_rep = 1;
> > +        int rc = hvmemul_linear_to_phys(
> > +            addr, &_paddr, bytes_per_rep, &one_rep, pfec,
> hvmemul_ctxt);
> > +        if ( rc != X86EMUL_OKAY )
> > +            return rc;
> > +        pfn = _paddr >> PAGE_SHIFT;
> > +    }
> > +    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) ==
> gfn_x(INVALID_GFN) )
> > +    {
> > +        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> > +            return X86EMUL_RETRY;
> > +        *reps = 0;
> > +        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
> > +        return X86EMUL_EXCEPTION;
> > +    }
> > +
> > +    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
> > +    todo = *reps * bytes_per_rep;
> > +    for ( i = 1; done < todo; i++ )
> > +    {
> > +        /* Get the next PFN in the range. */
> > +        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
> > +        npfn = paging_gva_to_gfn(curr, addr, &pfec);
> > +
> > +        /* Is it contiguous with the preceding PFNs? If not then we're
> done. */
> > +        if ( (npfn == gfn_x(INVALID_GFN)) ||
> > +             (npfn != (pfn + (reverse ? -i : i))) )
> > +        {
> > +            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> > +                return X86EMUL_RETRY;
> > +            done /= bytes_per_rep;
> > +            if ( done == 0 )
> > +            {
> > +                ASSERT(!reverse);
> > +                if ( npfn != gfn_x(INVALID_GFN) )
> > +                    return X86EMUL_UNHANDLEABLE;
> > +                *reps = 0;
> > +                x86_emul_pagefault(pfec, addr & PAGE_MASK,
> &hvmemul_ctxt->ctxt);
> > +                return X86EMUL_EXCEPTION;
> > +            }
> > +            *reps = done;
> > +            break;
> > +        }
> > +
> > +        done += PAGE_SIZE;
> > +    }
> > +
> > +    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
> > +    return X86EMUL_OKAY;
> > +}
> > +
> >   /*
> >    * Map the frame(s) covering an individual linear access, for
> writeable
> >    * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other
> errors
> > @@ -692,97 +781,7 @@ static void hvmemul_unmap_linear_addr(
> >           *mfn++ = _mfn(0);
> >       }
> >   #endif
> > -}
> > -
> > -/*
> > - * Convert addr from linear to physical form, valid over the range
> > - * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
> > - * the valid computed range. It is always >0 when X86EMUL_OKAY is
> returned.
> > - * @pfec indicates the access checks to be performed during page-table
> walks.
> > - */
> > -static int hvmemul_linear_to_phys(
> > -    unsigned long addr,
> > -    paddr_t *paddr,
> > -    unsigned int bytes_per_rep,
> > -    unsigned long *reps,
> > -    uint32_t pfec,
> > -    struct hvm_emulate_ctxt *hvmemul_ctxt)
> > -{
> > -    struct vcpu *curr = current;
> > -    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
> > -    int reverse;
> > -
> > -    /*
> > -     * Clip repetitions to a sensible maximum. This avoids extensive
> looping in
> > -     * this function while still amortising the cost of I/O trap-and-
> emulate.
> > -     */
> > -    *reps = min_t(unsigned long, *reps, 4096);
> > -
> > -    /* With no paging it's easy: linear == physical. */
> > -    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
> > -    {
> > -        *paddr = addr;
> > -        return X86EMUL_OKAY;
> > -    }
> > -
> > -    /* Reverse mode if this is a backwards multi-iteration string
> operation. */
> > -    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) &&
> (*reps > 1);
> > -
> > -    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
> > -    {
> > -        /* Do page-straddling first iteration forwards via recursion.
> */
> > -        paddr_t _paddr;
> > -        unsigned long one_rep = 1;
> > -        int rc = hvmemul_linear_to_phys(
> > -            addr, &_paddr, bytes_per_rep, &one_rep, pfec,
> hvmemul_ctxt);
> > -        if ( rc != X86EMUL_OKAY )
> > -            return rc;
> > -        pfn = _paddr >> PAGE_SHIFT;
> > -    }
> > -    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) ==
> gfn_x(INVALID_GFN) )
> > -    {
> > -        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> > -            return X86EMUL_RETRY;
> > -        *reps = 0;
> > -        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
> > -        return X86EMUL_EXCEPTION;
> > -    }
> > -
> > -    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
> > -    todo = *reps * bytes_per_rep;
> > -    for ( i = 1; done < todo; i++ )
> > -    {
> > -        /* Get the next PFN in the range. */
> > -        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
> > -        npfn = paging_gva_to_gfn(curr, addr, &pfec);
> > -
> > -        /* Is it contiguous with the preceding PFNs? If not then we're
> done. */
> > -        if ( (npfn == gfn_x(INVALID_GFN)) ||
> > -             (npfn != (pfn + (reverse ? -i : i))) )
> > -        {
> > -            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> > -                return X86EMUL_RETRY;
> > -            done /= bytes_per_rep;
> > -            if ( done == 0 )
> > -            {
> > -                ASSERT(!reverse);
> > -                if ( npfn != gfn_x(INVALID_GFN) )
> > -                    return X86EMUL_UNHANDLEABLE;
> > -                *reps = 0;
> > -                x86_emul_pagefault(pfec, addr & PAGE_MASK,
> &hvmemul_ctxt->ctxt);
> > -                return X86EMUL_EXCEPTION;
> > -            }
> > -            *reps = done;
> > -            break;
> > -        }
> > -
> > -        done += PAGE_SIZE;
> > -    }
> > -
> > -    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
> > -    return X86EMUL_OKAY;
> > -}
> > -
> > +}
> >
> >   static int hvmemul_virtual_to_linear(
> >       enum x86_segment seg,
> >
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.