|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4 of 9] Reorder locks used by shadow code in anticipation of synchronized p2m lookups
xen/arch/x86/mm/shadow/common.c | 3 +++
xen/arch/x86/mm/shadow/multi.c | 18 +++++++++---------
2 files changed, 12 insertions(+), 9 deletions(-)
Currently, mm-locks.h enforces a strict ordering between locks in the mm
layer lest there be an inversion in the order locks are taken and thus
the risk of deadlock.
Once p2m lookups becoming synchronized, get_gfn* calls take the p2m lock, and a
new set of inversion arises. Reorder some of the locks in the shadow code so
that even in this case no deadlocks happen.
After this, synchronized p2m lookups are in principle ready to be enabled in
shadow mode.
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
diff -r 3de7e43b130a -r 8a920bcddd0f xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3609,6 +3609,8 @@ int shadow_track_dirty_vram(struct domai
|| end_pfn >= p2m->max_mapped_pfn)
return -EINVAL;
+ /* We perform p2m lookups, so lock the p2m upfront to avoid deadlock */
+ p2m_lock(p2m_get_hostp2m(d));
paging_lock(d);
if ( dirty_vram && (!nr ||
@@ -3782,6 +3784,7 @@ out_dirty_vram:
out:
paging_unlock(d);
+ p2m_unlock(p2m_get_hostp2m(d));
return rc;
}
diff -r 3de7e43b130a -r 8a920bcddd0f xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2444,7 +2444,7 @@ static int validate_gl1e(struct vcpu *v,
perfc_incr(shadow_validate_gl1e_calls);
gfn = guest_l1e_get_gfn(new_gl1e);
- gmfn = get_gfn_query(v->domain, gfn, &p2mt);
+ gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
@@ -2466,7 +2466,6 @@ static int validate_gl1e(struct vcpu *v,
}
#endif /* OOS */
- put_gfn(v->domain, gfn_x(gfn));
return result;
}
@@ -4715,8 +4714,6 @@ static void sh_pagetable_dying(struct vc
unsigned long l3gfn;
mfn_t l3mfn;
- paging_lock(v->domain);
-
gcr3 = (v->arch.hvm_vcpu.guest_cr[3]);
/* fast path: the pagetable belongs to the current context */
if ( gcr3 == gpa )
@@ -4728,8 +4725,11 @@ static void sh_pagetable_dying(struct vc
{
printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
gpa);
- goto out;
+ goto out_put_gfn;
}
+
+ paging_lock(v->domain);
+
if ( !fast_path )
{
gl3pa = sh_map_domain_page(l3mfn);
@@ -4770,11 +4770,11 @@ static void sh_pagetable_dying(struct vc
v->arch.paging.shadow.pagetable_dying = 1;
-out:
if ( !fast_path )
unmap_domain_page(gl3pa);
+ paging_unlock(v->domain);
+out_put_gfn:
put_gfn(v->domain, l3gfn);
- paging_unlock(v->domain);
}
#else
static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
@@ -4782,15 +4782,14 @@ static void sh_pagetable_dying(struct vc
mfn_t smfn, gmfn;
p2m_type_t p2mt;
+ gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
paging_lock(v->domain);
- gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
#if GUEST_PAGING_LEVELS == 2
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
#else
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l4_64_shadow);
#endif
- put_gfn(v->domain, gpa >> PAGE_SHIFT);
if ( mfn_valid(smfn) )
{
@@ -4808,6 +4807,7 @@ static void sh_pagetable_dying(struct vc
v->arch.paging.shadow.pagetable_dying = 1;
paging_unlock(v->domain);
+ put_gfn(v->domain, gpa >> PAGE_SHIFT);
}
#endif
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |