[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 10/15] x86/altp2m: add remaining support routines.



>>> On 14.07.15 at 02:14, <edmund.h.white@xxxxxxxxx> wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2802,10 +2802,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
> long gla,
>      mfn_t mfn;
>      struct vcpu *curr = current;
>      struct domain *currd = curr->domain;
> -    struct p2m_domain *p2m;
> +    struct p2m_domain *p2m, *hostp2m;
>      int rc, fall_through = 0, paged = 0;
>      int sharing_enomem = 0;
>      vm_event_request_t *req_ptr = NULL;
> +    bool_t ap2m_active = 0;

Pointless initializer afaict.

> @@ -2865,11 +2866,31 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
> long gla,
>          goto out;
>      }
>  
> -    p2m = p2m_get_hostp2m(currd);
> -    mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 
> +    ap2m_active = altp2m_active(currd);
> +
> +    /* Take a lock on the host p2m speculatively, to avoid potential
> +     * locking order problems later and to handle unshare etc.
> +     */

Comment style.

> @@ -2965,9 +3003,15 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
> long gla,
>          if ( npfec.write_access )
>          {
>              paging_mark_dirty(currd, mfn_x(mfn));
> +            /* If p2m is really an altp2m, unlock here to avoid lock ordering
> +             * violation when the change below is propagated from host p2m */
> +            if ( ap2m_active )
> +                __put_gfn(p2m, gfn);
>              p2m_change_type_one(currd, gfn, p2m_ram_logdirty, p2m_ram_rw);

And this won't result in any races?

Also - comment style again (and more elsewhere).

> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -2037,6 +2037,391 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
> uint16_t idx)
>      return rc;
>  }
>  
> +void p2m_flush_altp2m(struct domain *d)
> +{
> +    uint16_t i;
> +
> +    altp2m_list_lock(d);
> +
> +    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    {
> +        p2m_flush_table(d->arch.altp2m_p2m[i]);
> +        /* Uninit and reinit ept to force TLB shootdown */
> +        ept_p2m_uninit(d->arch.altp2m_p2m[i]);
> +        ept_p2m_init(d->arch.altp2m_p2m[i]);

ept_... in non-EPT code again.

> +        d->arch.altp2m_eptp[i] = INVALID_MFN;
> +    }
> +
> +    altp2m_list_unlock(d);
> +}
> +
> +static void p2m_init_altp2m_helper(struct domain *d, uint16_t i)
> +{
> +    struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
> +    struct ept_data *ept;
> +
> +    p2m->min_remapped_gfn = INVALID_GFN;
> +    p2m->max_remapped_gfn = INVALID_GFN;
> +    ept = &p2m->ept;
> +    ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
> +    d->arch.altp2m_eptp[i] = ept_get_eptp(ept);

Same here.

> +long p2m_init_altp2m_by_id(struct domain *d, uint16_t idx)
> +{
> +    long rc = -EINVAL;

Why long (for both variable and function return type)? (More of
these in functions below.)

> +long p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
> +{
> +    long rc = -EINVAL;
> +    uint16_t i;

As in the earlier patch(es) - unsigned int.

> +long p2m_change_altp2m_gfn(struct domain *d, uint16_t idx,
> +                             gfn_t old_gfn, gfn_t new_gfn)
> +{
> +    struct p2m_domain *hp2m, *ap2m;
> +    p2m_access_t a;
> +    p2m_type_t t;
> +    mfn_t mfn;
> +    unsigned int page_order;
> +    long rc = -EINVAL;
> +
> +    if ( idx > MAX_ALTP2M || d->arch.altp2m_eptp[idx] == INVALID_MFN )
> +        return rc;
> +
> +    hp2m = p2m_get_hostp2m(d);
> +    ap2m = d->arch.altp2m_p2m[idx];
> +
> +    p2m_lock(ap2m);
> +
> +    mfn = ap2m->get_entry(ap2m, gfn_x(old_gfn), &t, &a, 0, NULL, NULL);
> +
> +    if ( gfn_x(new_gfn) == INVALID_GFN )
> +    {
> +        if ( mfn_valid(mfn) )
> +            p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K);
> +        rc = 0;
> +        goto out;
> +    }
> +
> +    /* Check host p2m if no valid entry in alternate */
> +    if ( !mfn_valid(mfn) )
> +    {
> +        mfn = hp2m->get_entry(hp2m, gfn_x(old_gfn), &t, &a,
> +                              P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
> +
> +        if ( !mfn_valid(mfn) || t != p2m_ram_rw )
> +            goto out;
> +
> +        /* If this is a superpage, copy that first */
> +        if ( page_order != PAGE_ORDER_4K )
> +        {
> +            gfn_t gfn;
> +            unsigned long mask;
> +
> +            mask = ~((1UL << page_order) - 1);
> +            gfn = _gfn(gfn_x(old_gfn) & mask);
> +            mfn = _mfn(mfn_x(mfn) & mask);
> +
> +            if ( ap2m->set_entry(ap2m, gfn_x(gfn), mfn, page_order, t, a, 1) 
> )
> +                goto out;
> +        }
> +    }
> +
> +    mfn = ap2m->get_entry(ap2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
> +
> +    if ( !mfn_valid(mfn) )
> +        mfn = hp2m->get_entry(hp2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
> +
> +    if ( !mfn_valid(mfn) || (t != p2m_ram_rw) )
> +        goto out;
> +
> +    if ( !ap2m->set_entry(ap2m, gfn_x(old_gfn), mfn, PAGE_ORDER_4K, t, a,
> +                          (current->domain != d)) )
> +    {
> +        rc = 0;
> +
> +        if ( ap2m->min_remapped_gfn == INVALID_GFN ||
> +             gfn_x(new_gfn) < ap2m->min_remapped_gfn )
> +            ap2m->min_remapped_gfn = gfn_x(new_gfn);
> +        if ( ap2m->max_remapped_gfn == INVALID_GFN ||
> +             gfn_x(new_gfn) > ap2m->max_remapped_gfn )
> +            ap2m->max_remapped_gfn = gfn_x(new_gfn);

For the purpose here (and without conflict with the consumer side)
it would seem to be better to initialize max_remapped_gfn to zero,
as then both if() can get away with just one comparison.

> +void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
> +                                 mfn_t mfn, unsigned int page_order,
> +                                 p2m_type_t p2mt, p2m_access_t p2ma)
> +{
> +    struct p2m_domain *p2m;
> +    p2m_access_t a;
> +    p2m_type_t t;
> +    mfn_t m;
> +    uint16_t i;
> +    bool_t reset_p2m;
> +    unsigned int reset_count = 0;
> +    uint16_t last_reset_idx = ~0;
> +
> +    if ( !altp2m_active(d) )
> +        return;
> +
> +    altp2m_list_lock(d);
> +
> +    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    {
> +        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
> +            continue;
> +
> +        p2m = d->arch.altp2m_p2m[i];
> +        m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
> +
> +        reset_p2m = 0;
> +
> +        /* Check for a dropped page that may impact this altp2m */
> +        if ( mfn_x(mfn) == INVALID_MFN &&
> +             gfn_x(gfn) >= p2m->min_remapped_gfn &&
> +             gfn_x(gfn) <= p2m->max_remapped_gfn )
> +            reset_p2m = 1;

Considering that this looks like an optimization, what's the downside
of possibly having min=0 and max=<end-of-address-space>? I.e.
can there a long latency operation result that's this way a guest can
effect?

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.