|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 02 of 11] x86/mm: Introduce get_page_from_gfn()
# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1336661656 -3600
# Node ID e03806b10f0026590e7775008f24d2a96051552e
# Parent 4a99c5456e9d8aa707bbd57bb4f4af88e1d456ca
x86/mm: Introduce get_page_from_gfn().
This new function does a p2m lookup under the read lock, falling back
to the write lock only if it needs to make a change. If the GFN is
backed by RAM, it takes a refcount on the underlying page.
The following patches will convert many paths that currently use
get_gfn/put_gfn to use the new interface. That will avoid serializing
p2m accesses in the common case where no updates are needed (i.e. no
page-sharing, VM paging or other p2m trickery).
Signed-off-by: Tim Deegan <tim@xxxxxxx>
diff -r 4a99c5456e9d -r e03806b10f00 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/mm/p2m.c Thu May 10 15:54:16 2012 +0100
@@ -207,6 +207,59 @@ void __put_gfn(struct p2m_domain *p2m, u
gfn_unlock(p2m, gfn, 0);
}
+/* Atomically look up a GFN and take a reference count on the backing page. */
+struct page_info *get_page_from_gfn_p2m(
+ struct domain *d, struct p2m_domain *p2m, unsigned long gfn,
+ p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
+{
+ struct page_info *page = NULL;
+ p2m_access_t _a;
+ p2m_type_t _t;
+ mfn_t mfn;
+
+ /* Allow t or a to be NULL */
+ t = t ?: &_t;
+ a = a ?: &_a;
+
+ if ( likely(!p2m_locked_by_me(p2m)) )
+ {
+ /* Fast path: look up and get out */
+ p2m_read_lock(p2m);
+ mfn = __get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0);
+ if ( (p2m_is_ram(*t) || p2m_is_grant(*t))
+ && mfn_valid(mfn)
+ && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) )
+ {
+ page = mfn_to_page(mfn);
+ if ( !get_page(page, d)
+ /* Page could be shared */
+ && !get_page(page, dom_cow) )
+ page = NULL;
+ }
+ p2m_read_unlock(p2m);
+
+ if ( page )
+ return page;
+
+ /* Error path: not a suitable GFN at all */
+ if ( !p2m_is_ram(*t) && !p2m_is_paging(*t) && !p2m_is_magic(*t) )
+ return NULL;
+ }
+
+ /* Slow path: take the write lock and do fixups */
+ mfn = get_gfn_type_access(p2m, gfn, t, a, q, NULL);
+ if ( p2m_is_ram(*t) && mfn_valid(mfn) )
+ {
+ page = mfn_to_page(mfn);
+ if ( !get_page(page, d) )
+ page = NULL;
+ }
+ put_gfn(d, gfn);
+
+ return page;
+}
+
+
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
diff -r 4a99c5456e9d -r e03806b10f00 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
+++ b/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
@@ -377,6 +377,33 @@ static inline mfn_t get_gfn_query_unlock
return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0);
}
+/* Atomically look up a GFN and take a reference count on the backing page.
+ * This makes sure the page doesn't get freed (or shared) underfoot,
+ * and should be used by any path that intends to write to the backing page.
+ * Returns NULL if the page is not backed by RAM.
+ * The caller is responsible for calling put_page() afterwards. */
+struct page_info *get_page_from_gfn_p2m(struct domain *d,
+ struct p2m_domain *p2m,
+ unsigned long gfn,
+ p2m_type_t *t, p2m_access_t *a,
+ p2m_query_t q);
+
+static inline struct page_info *get_page_from_gfn(
+ struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+{
+ struct page_info *page;
+
+ if ( paging_mode_translate(d) )
+ return get_page_from_gfn_p2m(d, p2m_get_hostp2m(d), gfn, t, NULL, q);
+
+ /* Non-translated guests see 1-1 RAM mappings everywhere */
+ if (t)
+ *t = p2m_ram_rw;
+ page = __mfn_to_page(gfn);
+ return get_page(page, d) ? page : NULL;
+}
+
+
/* General conversion function from mfn to gfn */
static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
{
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |