[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 05/11] arm: shared_info page allocation and mapping



Allocate the shared_info page at domain creation.

Implement arch_memory_op, only for XENMEM_add_to_physmap with space ==
XENMAPSPACE_shared_info, so that the guest can map the shared_info page.

Changes in v3:

- /MEMF_bits(32)/MEMF_bits(64);

- do not alloc the shared_info page for the idle domain;

- define CONFIG_PAGING_ASSISTANCE;

- adjust the API to match what the common code expects;

- implement a dummy guest_physmap_remove_page.


Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/arch/arm/domain.c        |   11 +++++
 xen/arch/arm/mm.c            |   98 ++++++++++++++++++++++++++++++++++++++++--
 xen/arch/arm/p2m.c           |   24 ++++++++++-
 xen/include/asm-arm/config.h |    2 +
 xen/include/asm-arm/mm.h     |    4 ++
 xen/include/asm-arm/p2m.h    |    9 ++++
 xen/include/asm-arm/paging.h |    3 +
 7 files changed, 146 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 0b55934..fca3d60 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -235,6 +235,17 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
     if ( (rc = p2m_init(d)) != 0 )
         goto fail;
 
+    if ( !is_idle_domain(d) )
+    {
+        rc = -ENOMEM;
+        if ( (d->shared_info = alloc_xenheap_pages(0, MEMF_bits(64))) == NULL )
+            goto fail;
+
+        clear_page(d->shared_info);
+        share_xen_page_with_guest(
+                virt_to_page(d->shared_info), d, XENSHARE_writable);
+    }
+
     d->max_vcpus = 8;
 
     if ( (rc = domain_vgic_init(d)) != 0 )
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index a0f39eb..cf3de8a 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -25,8 +25,11 @@
 #include <xen/mm.h>
 #include <xen/preempt.h>
 #include <xen/errno.h>
+#include <xen/guest_access.h>
 #include <asm/page.h>
 #include <asm/current.h>
+#include <public/memory.h>
+#include <xen/sched.h>
 
 struct domain *dom_xen, *dom_io;
 
@@ -323,17 +326,104 @@ void arch_dump_shared_mem_info(void)
 {
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+int donate_page(struct domain *d, struct page_info *page, unsigned int 
memflags)
 {
+    ASSERT(0);
     return -ENOSYS;
 }
 
-int donate_page(struct domain *d, struct page_info *page, unsigned int 
memflags)
+void share_xen_page_with_guest(struct page_info *page,
+                          struct domain *d, int readonly)
 {
-    ASSERT(0);
-    return -ENOSYS;
+    if ( page_get_owner(page) == d )
+        return;
+
+    spin_lock(&d->page_alloc_lock);
+
+    /* The incremented type count pins as writable or read-only. */
+    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
+    page->u.inuse.type_info |= PGT_validated | 1;
+
+    page_set_owner(page, d);
+    wmb(); /* install valid domain ptr before updating refcnt. */
+    ASSERT((page->count_info & ~PGC_xen_heap) == 0);
+
+    /* Only add to the allocation list if the domain isn't dying. */
+    if ( !d->is_dying )
+    {
+        page->count_info |= PGC_allocated | 1;
+        if ( unlikely(d->xenheap_pages++ == 0) )
+            get_knownalive_domain(d);
+        page_list_add_tail(page, &d->xenpage_list);
+    }
+
+    spin_unlock(&d->page_alloc_lock);
+}
+
+static int xenmem_add_to_physmap_once(
+    struct domain *d,
+    const struct xen_add_to_physmap *xatp)
+{
+    unsigned long mfn = 0;
+    int rc;
+
+    switch ( xatp->space )
+    {
+        case XENMAPSPACE_shared_info:
+            if ( xatp->idx == 0 )
+                mfn = virt_to_mfn(d->shared_info);
+            break;
+        default:
+            return -ENOSYS;
+    }
+
+    domain_lock(d);
+
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp->gpfn, mfn, 0);
+
+    domain_unlock(d);
+
+    return rc;
+}
+
+static int xenmem_add_to_physmap(struct domain *d,
+                                 struct xen_add_to_physmap *xatp)
+{
+    return xenmem_add_to_physmap_once(d, xatp);
 }
 
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+    int rc;
+
+    switch ( op )
+    {
+    case XENMEM_add_to_physmap:
+    {
+        struct xen_add_to_physmap xatp;
+        struct domain *d;
+
+        if ( copy_from_guest(&xatp, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        rc = xenmem_add_to_physmap(d, &xatp);
+
+        rcu_unlock_domain(d);
+
+        return rc;
+    }
+
+    default:
+        return -ENOSYS;
+    }
+
+    return 0;
+}
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 14614fd..4c94ef0 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -118,7 +118,12 @@ static int create_p2m_entries(struct domain *d,
         }
         /* else: third already valid */
 
-        BUG_ON(third[third_table_offset(addr)].p2m.valid);
+        if ( third[third_table_offset(addr)].p2m.valid )
+        {
+            /* p2m entry already present */
+            free_domheap_page(
+                    mfn_to_page(third[third_table_offset(addr)].p2m.base));
+        }
 
         /* Allocate a new RAM page and attach */
         if (alloc)
@@ -172,6 +177,23 @@ int map_mmio_regions(struct domain *d,
     return create_p2m_entries(d, 0, start_gaddr, end_gaddr, maddr);
 }
 
+int guest_physmap_add_page(struct domain *d,
+                           unsigned long gpfn,
+                           unsigned long mfn,
+                           unsigned int page_order)
+{
+    return create_p2m_entries(d, 0, gpfn << PAGE_SHIFT,
+                              (gpfn + (1<<page_order)) << PAGE_SHIFT,
+                              mfn << PAGE_SHIFT);
+}
+
+void guest_physmap_remove_page(struct domain *d,
+                               unsigned long gpfn,
+                               unsigned long mfn, unsigned int page_order)
+{
+    ASSERT(0);
+}
+
 int p2m_alloc_table(struct domain *d)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index c2ab0a2..63fb48e 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -7,6 +7,8 @@
 #ifndef __ARM_CONFIG_H__
 #define __ARM_CONFIG_H__
 
+#define CONFIG_PAGING_ASSISTANCE 1
+
 #define CONFIG_PAGING_LEVELS 3
 
 #define CONFIG_ARM 1
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index bfc0f76..56ab9415 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -78,6 +78,10 @@ struct page_info
 #define _PGT_pinned       PG_shift(5)
 #define PGT_pinned        PG_mask(1, 5)
 
+ /* Has this page been validated for use as its current type? */
+#define _PGT_validated    PG_shift(6)
+#define PGT_validated     PG_mask(1, 6)
+
  /* Count of uses of this frame as its current type. */
 #define PGT_count_width   PG_shift(9)
 #define PGT_count_mask    ((1UL<<PGT_count_width)-1)
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index aec52f7..d8e8dc8 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -40,6 +40,15 @@ int p2m_populate_ram(struct domain *d, paddr_t start, 
paddr_t end);
 int map_mmio_regions(struct domain *d, paddr_t start_gaddr,
                      paddr_t end_gaddr, paddr_t maddr);
 
+/* Untyped version for RAM only, for compatibility */
+int guest_physmap_add_page(struct domain *d,
+                           unsigned long gfn,
+                           unsigned long mfn,
+                           unsigned int page_order);
+void guest_physmap_remove_page(struct domain *d,
+                               unsigned long gpfn,
+                               unsigned long mfn, unsigned int page_order);
+
 unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn);
 
 /*
diff --git a/xen/include/asm-arm/paging.h b/xen/include/asm-arm/paging.h
index 4dc340f..3d7dd95 100644
--- a/xen/include/asm-arm/paging.h
+++ b/xen/include/asm-arm/paging.h
@@ -1,6 +1,9 @@
 #ifndef _XEN_PAGING_H
 #define _XEN_PAGING_H
 
+#define paging_mode_translate(d)              (0)
+#define paging_mode_external(d)               (0)
+
 #endif /* XEN_PAGING_H */
 
 /*
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.