|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 3 of 9] x86/mm: Refactor possibly deadlocking get_gfn calls
At 14:51 -0500 on 01 Feb (1328107915), Andres Lagar-Cavilla wrote:
> xen/arch/x86/hvm/emulate.c | 35 +++++++--------
> xen/arch/x86/mm/mem_sharing.c | 28 +++++++------
> xen/include/asm-x86/p2m.h | 91
> +++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 122 insertions(+), 32 deletions(-)
>
>
> When calling get_gfn multiple times on different gfn's in the same function,
> we
> can easily deadlock if p2m lookups are locked. Thus, refactor these calls to
> enforce simple deadlock-avoidance rules:
> - Lowest-numbered domain first
> - Lowest-numbered gfn first
This is a good idea, and I like the get_two_gfns() abstraction, but:
- I think the two_gfns struct should proabbly just live on the stack
instead of malloc()ing it up every time. It's not very big.
- the implementation of get_two_gfns() seems to be very complex; I'm
sure it could be simplified. At the very least, you could avoid a
bit of duplication by just deciding once which order to do the gets
in and then running all the setu and get code once.
Cheers,
Tim.
> Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxx>
>
> diff -r 27031a8a4eff -r 3de7e43b130a xen/arch/x86/hvm/emulate.c
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -660,12 +660,13 @@ static int hvmemul_rep_movs(
> {
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> - unsigned long saddr, daddr, bytes, sgfn, dgfn;
> + unsigned long saddr, daddr, bytes;
> paddr_t sgpa, dgpa;
> uint32_t pfec = PFEC_page_present;
> - p2m_type_t p2mt;
> + p2m_type_t sp2mt, dp2mt;
> int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
> char *buf;
> + struct two_gfns *tg;
>
> rc = hvmemul_virtual_to_linear(
> src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
> @@ -693,26 +694,25 @@ static int hvmemul_rep_movs(
> if ( rc != X86EMUL_OKAY )
> return rc;
>
> - /* XXX In a fine-grained p2m locking scenario, we need to sort this
> - * get_gfn's, or else we might deadlock */
> - sgfn = sgpa >> PAGE_SHIFT;
> - (void)get_gfn(current->domain, sgfn, &p2mt);
> - if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
> + tg = get_two_gfns(current->domain, sgpa >> PAGE_SHIFT, &sp2mt, NULL,
> NULL,
> + current->domain, dgpa >> PAGE_SHIFT, &dp2mt, NULL,
> NULL,
> + p2m_guest);
> + if ( !tg )
> + return X86EMUL_UNHANDLEABLE;
> +
> + if ( !p2m_is_ram(sp2mt) && !p2m_is_grant(sp2mt) )
> {
> rc = hvmemul_do_mmio(
> sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
> - put_gfn(current->domain, sgfn);
> + put_two_gfns(&tg);
> return rc;
> }
>
> - dgfn = dgpa >> PAGE_SHIFT;
> - (void)get_gfn(current->domain, dgfn, &p2mt);
> - if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
> + if ( !p2m_is_ram(dp2mt) && !p2m_is_grant(dp2mt) )
> {
> rc = hvmemul_do_mmio(
> dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
> - put_gfn(current->domain, sgfn);
> - put_gfn(current->domain, dgfn);
> + put_two_gfns(&tg);
> return rc;
> }
>
> @@ -730,8 +730,7 @@ static int hvmemul_rep_movs(
> */
> if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) )
> {
> - put_gfn(current->domain, sgfn);
> - put_gfn(current->domain, dgfn);
> + put_two_gfns(&tg);
> return X86EMUL_UNHANDLEABLE;
> }
>
> @@ -743,8 +742,7 @@ static int hvmemul_rep_movs(
> buf = xmalloc_bytes(bytes);
> if ( buf == NULL )
> {
> - put_gfn(current->domain, sgfn);
> - put_gfn(current->domain, dgfn);
> + put_two_gfns(&tg);
> return X86EMUL_UNHANDLEABLE;
> }
>
> @@ -757,8 +755,7 @@ static int hvmemul_rep_movs(
> rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
>
> xfree(buf);
> - put_gfn(current->domain, sgfn);
> - put_gfn(current->domain, dgfn);
> + put_two_gfns(&tg);
>
> if ( rc == HVMCOPY_gfn_paged_out )
> return X86EMUL_RETRY;
> diff -r 27031a8a4eff -r 3de7e43b130a xen/arch/x86/mm/mem_sharing.c
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -718,11 +718,13 @@ int mem_sharing_share_pages(struct domai
> int ret = -EINVAL;
> mfn_t smfn, cmfn;
> p2m_type_t smfn_type, cmfn_type;
> + struct two_gfns *tg;
>
> - /* XXX if sd == cd handle potential deadlock by ordering
> - * the get_ and put_gfn's */
> - smfn = get_gfn(sd, sgfn, &smfn_type);
> - cmfn = get_gfn(cd, cgfn, &cmfn_type);
> + tg = get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
> + cd, cgfn, &cmfn_type, NULL, &cmfn,
> + p2m_query);
> + if ( !tg )
> + return -ENOMEM;
>
> /* This tricky business is to avoid two callers deadlocking if
> * grabbing pages in opposite client/source order */
> @@ -819,8 +821,7 @@ int mem_sharing_share_pages(struct domai
> ret = 0;
>
> err_out:
> - put_gfn(cd, cgfn);
> - put_gfn(sd, sgfn);
> + put_two_gfns(&tg);
> return ret;
> }
>
> @@ -834,11 +835,13 @@ int mem_sharing_add_to_physmap(struct do
> struct gfn_info *gfn_info;
> struct p2m_domain *p2m = p2m_get_hostp2m(cd);
> p2m_access_t a;
> -
> - /* XXX if sd == cd handle potential deadlock by ordering
> - * the get_ and put_gfn's */
> - smfn = get_gfn_query(sd, sgfn, &smfn_type);
> - cmfn = get_gfn_type_access(p2m, cgfn, &cmfn_type, &a, p2m_query, NULL);
> + struct two_gfns *tg;
> +
> + tg = get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
> + cd, cgfn, &cmfn_type, &a, &cmfn,
> + p2m_query);
> + if ( !tg )
> + return -ENOMEM;
>
> /* Get the source shared page, check and lock */
> ret = XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID;
> @@ -886,8 +889,7 @@ int mem_sharing_add_to_physmap(struct do
> err_unlock:
> mem_sharing_page_unlock(spage);
> err_out:
> - put_gfn(cd, cgfn);
> - put_gfn(sd, sgfn);
> + put_two_gfns(&tg);
> return ret;
> }
>
> diff -r 27031a8a4eff -r 3de7e43b130a xen/include/asm-x86/p2m.h
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -378,6 +378,97 @@ static inline unsigned long mfn_to_gfn(s
> return mfn_x(mfn);
> }
>
> +/* Deadlock-avoidance scheme when calling get_gfn on different gfn's */
> +struct two_gfns {
> + struct domain *first_domain;
> + unsigned long first_gfn;
> + struct domain *second_domain;
> + unsigned long second_gfn;
> +};
> +
> +#define assign_pointers(dest, source) \
> +do { \
> + dest ## _mfn = (source ## mfn) ? (source ## mfn) : &__ ## dest ## _mfn;
> \
> + dest ## _a = (source ## a) ? (source ## a) : &__ ## dest ## _a;
> \
> + dest ## _t = (source ## t) ? (source ## t) : &__ ## dest ## _t;
> \
> +} while(0)
> +
> +/* Returns mfn, type and access for potential caller consumption, but any
> + * of those can be NULL */
> +static inline struct two_gfns *get_two_gfns(struct domain *rd, unsigned long
> rgfn,
> + p2m_type_t *rt, p2m_access_t *ra, mfn_t *rmfn, struct domain *ld,
> + unsigned long lgfn, p2m_type_t *lt, p2m_access_t *la, mfn_t *lmfn,
> + p2m_query_t q)
> +{
> + mfn_t *first_mfn, *second_mfn, __first_mfn, __second_mfn;
> + p2m_access_t *first_a, *second_a, __first_a, __second_a;
> + p2m_type_t *first_t, *second_t, __first_t, __second_t;
> +
> + struct two_gfns *rval = xmalloc(struct two_gfns);
> + if ( !rval )
> + return NULL;
> +
> + if ( rd == ld )
> + {
> + rval->first_domain = rval->second_domain = rd;
> +
> + /* Sort by gfn */
> + if (rgfn <= lgfn)
> + {
> + rval->first_gfn = rgfn;
> + rval->second_gfn = lgfn;
> + assign_pointers(first, r);
> + assign_pointers(second, l);
> + } else {
> + rval->first_gfn = lgfn;
> + rval->second_gfn = rgfn;
> + assign_pointers(first, l);
> + assign_pointers(second, r);
> + }
> + } else {
> + /* Sort by domain */
> + if ( rd->domain_id <= ld->domain_id )
> + {
> + rval->first_domain = rd;
> + rval->first_gfn = rgfn;
> + rval->second_domain = ld;
> + rval->second_gfn = lgfn;
> + assign_pointers(first, r);
> + assign_pointers(second, l);
> + } else {
> + rval->first_domain = ld;
> + rval->first_gfn = lgfn;
> + rval->second_domain = rd;
> + rval->second_gfn = rgfn;
> + assign_pointers(first, l);
> + assign_pointers(second, r);
> + }
> + }
> +
> + /* Now do the gets */
> + *first_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
> + rval->first_gfn, first_t, first_a, q,
> NULL);
> + *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
> + rval->second_gfn, second_t, second_a,
> q, NULL);
> +
> + return rval;
> +}
> +
> +static inline void put_two_gfns(struct two_gfns **arg_ptr)
> +{
> + struct two_gfns *arg;
> +
> + if ( !arg_ptr || !(*arg_ptr) )
> + return;
> +
> + arg = *arg_ptr;
> + put_gfn(arg->second_domain, arg->second_gfn);
> + put_gfn(arg->first_domain, arg->first_gfn);
> +
> + xfree(arg);
> + *arg_ptr = NULL;
> +}
> +
> /* Init the datastructures for later use by the p2m code */
> int p2m_init(struct domain *d);
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |