|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/mm: Clean up locking now that p2m lockups are fully synchronized
# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1328890027 0
# Node ID a55ea8dc9c2566eabd19211c7debe575b63be0e3
# Parent 7f79475d3de7fc1a10be590ad0c3e9f631b6ca88
x86/mm: Clean up locking now that p2m lockups are fully synchronized
With p2m lookups fully synchronized, many routines need not
call p2m_lock any longer. Also, many routines can logically
assert holding the p2m for a specific gfn.
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---
diff -r 7f79475d3de7 -r a55ea8dc9c25 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c Fri Feb 10 16:07:07 2012 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c Fri Feb 10 16:07:07 2012 +0000
@@ -877,9 +877,7 @@
goto err_unlock;
}
- p2m_lock(p2m);
ret = set_p2m_entry(p2m, cgfn, smfn, PAGE_ORDER_4K, p2m_ram_shared, a);
- p2m_unlock(p2m);
/* Tempted to turn this into an assert */
if ( !ret )
diff -r 7f79475d3de7 -r a55ea8dc9c25 xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h Fri Feb 10 16:07:07 2012 +0000
+++ b/xen/arch/x86/mm/mm-locks.h Fri Feb 10 16:07:07 2012 +0000
@@ -165,9 +165,11 @@
declare_mm_lock(p2m)
#define p2m_lock(p) mm_lock_recursive(p2m, &(p)->lock)
-#define p2m_lock_recursive(p) mm_lock_recursive(p2m, &(p)->lock)
+#define gfn_lock(p,g,o) mm_lock_recursive(p2m, &(p)->lock)
#define p2m_unlock(p) mm_unlock(&(p)->lock)
+#define gfn_unlock(p,g,o) mm_unlock(&(p)->lock)
#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock)
+#define gfn_locked_by_me(p,g) mm_locked_by_me(&(p)->lock)
/* Sharing per page lock
*
diff -r 7f79475d3de7 -r a55ea8dc9c25 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Fri Feb 10 16:07:07 2012 +0000
+++ b/xen/arch/x86/mm/p2m-ept.c Fri Feb 10 16:07:07 2012 +0000
@@ -46,31 +46,6 @@
return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
}
-/* Non-ept "lock-and-check" wrapper */
-static int ept_pod_check_and_populate(struct p2m_domain *p2m, unsigned long
gfn,
- ept_entry_t *entry, int order,
- p2m_query_t q)
-{
- int r;
-
- /* This is called from the p2m lookups, which can happen with or
- * without the lock hed. */
- p2m_lock_recursive(p2m);
-
- /* Check to make sure this is still PoD */
- if ( entry->sa_p2mt != p2m_populate_on_demand )
- {
- p2m_unlock(p2m);
- return 0;
- }
-
- r = p2m_pod_demand_populate(p2m, gfn, order, q);
-
- p2m_unlock(p2m);
-
- return r;
-}
-
static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type,
p2m_access_t access)
{
/* First apply type permissions */
@@ -551,8 +526,8 @@
index = gfn_remainder >> ( i * EPT_TABLE_ORDER);
ept_entry = table + index;
- if ( !ept_pod_check_and_populate(p2m, gfn,
- ept_entry, 9, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn,
+ PAGE_ORDER_2M, q) )
goto retry;
else
goto out;
@@ -574,8 +549,8 @@
ASSERT(i == 0);
- if ( ept_pod_check_and_populate(p2m, gfn,
- ept_entry, 0, q) )
+ if ( p2m_pod_demand_populate(p2m, gfn,
+ PAGE_ORDER_4K, q) )
goto out;
}
diff -r 7f79475d3de7 -r a55ea8dc9c25 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Fri Feb 10 16:07:07 2012 +0000
+++ b/xen/arch/x86/mm/p2m-pod.c Fri Feb 10 16:07:07 2012 +0000
@@ -521,7 +521,7 @@
/* Figure out if we need to steal some freed memory for our cache */
steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count );
- p2m_lock(p2m);
+ gfn_lock(p2m, gpfn, order);
if ( unlikely(d->is_dying) )
goto out_unlock;
@@ -615,7 +615,7 @@
}
out_unlock:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gpfn, order);
out:
return ret;
@@ -964,7 +964,7 @@
mfn_t mfn;
int i;
- ASSERT(p2m_locked_by_me(p2m));
+ ASSERT(gfn_locked_by_me(p2m, gfn));
/* This check is done with the p2m lock held. This will make sure that
* even if d->is_dying changes under our feet, p2m_pod_empty_cache()
@@ -972,6 +972,7 @@
if ( unlikely(d->is_dying) )
goto out_fail;
+
/* Because PoD does not have cache list for 1GB pages, it has to remap
* 1GB region to 2MB chunks for a retry. */
if ( order == PAGE_ORDER_1G )
@@ -981,6 +982,9 @@
* split 1GB into 512 2MB pages here. But We only do once here because
* set_p2m_entry() should automatically shatter the 1GB page into
* 512 2MB pages. The rest of 511 calls are unnecessary.
+ *
+ * NOTE: In a fine-grained p2m locking scenario this operation
+ * may need to promote its locking from gfn->1g superpage
*/
set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
@@ -1104,7 +1108,7 @@
if ( rc != 0 )
return rc;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, order);
P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
@@ -1138,7 +1142,7 @@
BUG_ON(p2m->pod.entry_count < 0);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, order);
out:
return rc;
diff -r 7f79475d3de7 -r a55ea8dc9c25 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c Fri Feb 10 16:07:07 2012 +0000
+++ b/xen/arch/x86/mm/p2m-pt.c Fri Feb 10 16:07:07 2012 +0000
@@ -473,31 +473,6 @@
}
-/* Non-ept "lock-and-check" wrapper */
-static int p2m_pod_check_and_populate(struct p2m_domain *p2m, unsigned long
gfn,
- l1_pgentry_t *p2m_entry, int order,
- p2m_query_t q)
-{
- int r;
-
- /* This is called from the p2m lookups, which can happen with or
- * without the lock hed. */
- p2m_lock_recursive(p2m);
-
- /* Check to make sure this is still PoD */
- if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) !=
p2m_populate_on_demand )
- {
- p2m_unlock(p2m);
- return 0;
- }
-
- r = p2m_pod_demand_populate(p2m, gfn, order, q);
-
- p2m_unlock(p2m);
-
- return r;
-}
-
/* Read the current domain's p2m table (through the linear mapping). */
static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
unsigned long gfn, p2m_type_t *t,
@@ -540,8 +515,7 @@
/* The read has succeeded, so we know that mapping exists */
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *) &l3e, PAGE_ORDER_1G, q)
)
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
p2mt = p2m_invalid;
gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__);
@@ -593,8 +567,8 @@
* exits at this point. */
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- p2m_entry, 9, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn,
+ PAGE_ORDER_2M, q) )
goto pod_retry_l2;
/* Allocate failed. */
@@ -651,8 +625,8 @@
* exits at this point. */
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)p2m_entry, 0,
q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn,
+ PAGE_ORDER_4K, q) )
goto pod_retry_l1;
/* Allocate failed. */
@@ -742,8 +716,7 @@
{
if ( q != p2m_query )
{
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *) l3e, PAGE_ORDER_1G, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n",
__func__);
}
@@ -781,8 +754,7 @@
if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand )
{
if ( q != p2m_query ) {
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)l2e,
PAGE_ORDER_2M, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_2M, q) )
goto pod_retry_l2;
} else
*t = p2m_populate_on_demand;
@@ -815,8 +787,7 @@
if ( p2m_flags_to_type(l1e_get_flags(*l1e)) == p2m_populate_on_demand )
{
if ( q != p2m_query ) {
- if ( !p2m_pod_check_and_populate(p2m, gfn,
- (l1_pgentry_t *)l1e,
PAGE_ORDER_4K, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_4K, q) )
goto pod_retry_l1;
} else
*t = p2m_populate_on_demand;
diff -r 7f79475d3de7 -r a55ea8dc9c25 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Fri Feb 10 16:07:07 2012 +0000
+++ b/xen/arch/x86/mm/p2m.c Fri Feb 10 16:07:07 2012 +0000
@@ -161,7 +161,7 @@
/* For now only perform locking on hap domains */
if ( locked && (hap_enabled(p2m->domain)) )
/* Grab the lock here, don't release until put_gfn */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
@@ -194,9 +194,9 @@
/* Nothing to do in this case */
return;
- ASSERT(p2m_locked_by_me(p2m));
+ ASSERT(gfn_locked_by_me(p2m, gfn));
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
}
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
@@ -207,7 +207,7 @@
unsigned int order;
int rc = 1;
- ASSERT(p2m_locked_by_me(p2m));
+ ASSERT(gfn_locked_by_me(p2m, gfn));
while ( todo )
{
@@ -429,6 +429,7 @@
return;
}
+ ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
if ( mfn_valid(_mfn(mfn)) )
@@ -449,9 +450,9 @@
unsigned long mfn, unsigned int page_order)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, page_order);
p2m_remove_page(p2m, gfn, mfn, page_order);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, page_order);
}
int
@@ -590,13 +591,13 @@
BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL);
if ( pt == ot )
set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return pt;
}
@@ -645,7 +646,7 @@
if ( !paging_mode_translate(d) )
return 0;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
if ( p2m_is_grant(ot) )
{
@@ -661,7 +662,7 @@
P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct,
p2m->default_access);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
"set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
@@ -681,7 +682,7 @@
if ( !paging_mode_translate(d) )
return 0;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL);
/* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
@@ -694,7 +695,7 @@
rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
p2m_invalid, p2m->default_access);
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return rc;
}
@@ -711,7 +712,7 @@
if ( !paging_mode_translate(p2m->domain) )
return 0;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
/* At the moment we only allow p2m change if gfn has already been made
* sharable first */
@@ -723,7 +724,7 @@
P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared,
p2m->default_access);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
"set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
@@ -759,7 +760,7 @@
mfn_t mfn;
int ret = -EBUSY;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
@@ -789,7 +790,7 @@
ret = 0;
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return ret;
}
@@ -821,7 +822,7 @@
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret = -EBUSY;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
/* Get mfn */
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
@@ -865,7 +866,7 @@
put_page(page);
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return ret;
}
@@ -950,7 +951,7 @@
req.type = MEM_EVENT_TYPE_PAGING;
/* Fix p2m mapping */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
/* Allow only nominated or evicted pages to enter page-in path */
if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
@@ -961,7 +962,7 @@
set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
/* Pause domain if request came from guest and gfn has paging type */
if ( p2m_is_paging(p2mt) && v->domain == d )
@@ -1012,7 +1013,7 @@
(!access_ok(user_ptr, PAGE_SIZE)) )
return -EINVAL;
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
@@ -1071,7 +1072,7 @@
ret = 0;
out:
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return ret;
}
@@ -1106,7 +1107,7 @@
/* Fix p2m entry if the page was not dropped */
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
{
- p2m_lock(p2m);
+ gfn_lock(p2m, rsp.gfn, 0);
mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
/* Allow only pages which were prepared properly, or pages which
* were nominated but not evicted */
@@ -1117,7 +1118,7 @@
p2m_ram_rw, a);
set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, rsp.gfn, 0);
}
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
@@ -1138,13 +1139,13 @@
p2m_access_t p2ma;
/* First, handle rx2rw conversion automatically */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL);
if ( access_w && p2ma == p2m_access_rx2rw )
{
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
return 1;
}
else if ( p2ma == p2m_access_n2rwx )
@@ -1152,7 +1153,7 @@
ASSERT(access_w || access_r || access_x);
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
}
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
/* Otherwise, check if there is a memory event listener, and send the
message along */
if ( mem_event_claim_slot(d, &d->mem_event->access) == -ENOSYS )
@@ -1171,9 +1172,9 @@
if ( p2ma != p2m_access_n2rwx )
{
/* A listener is not required, so clear the access
restrictions */
- p2m_lock(p2m);
+ gfn_lock(p2m, gfn, 0);
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt,
p2m_access_rwx);
- p2m_unlock(p2m);
+ gfn_unlock(p2m, gfn, 0);
}
return 1;
}
@@ -1304,7 +1305,10 @@
return 0;
}
+ gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query, NULL);
+ gfn_unlock(p2m, gfn, 0);
+
if ( mfn_x(mfn) == INVALID_MFN )
return -ESRCH;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |