[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 02/16] xen/arm: make mmio handlers domain specific



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>

register mmio handlers at runtime and make mmio handlers
domain specific and check handlers are removed.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
 xen/arch/arm/domain.c        |    3 ++
 xen/arch/arm/io.c            |   56 +++++++++++++++++++++++-------
 xen/arch/arm/vgic.c          |   79 ++++++++++++++++++------------------------
 xen/arch/arm/vuart.c         |   51 ++++++++++++---------------
 xen/include/asm-arm/domain.h |    2 ++
 xen/include/asm-arm/mmio.h   |   22 +++++++++---
 6 files changed, 123 insertions(+), 90 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 33141e3..40acfb3 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -508,6 +508,9 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
     share_xen_page_with_guest(
         virt_to_page(d->shared_info), d, XENSHARE_writable);
 
+    if ( (rc = domain_io_init(d)) != 0 )
+        goto fail;
+
     if ( (rc = p2m_alloc_table(d)) != 0 )
         goto fail;
 
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index ada1918..220488a 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -1,5 +1,5 @@
 /*
- * xen/arch/arm/io.h
+ * xen/arch/arm/io.c
  *
  * ARM I/O handlers
  *
@@ -18,29 +18,61 @@
 
 #include <xen/config.h>
 #include <xen/lib.h>
+#include <xen/spinlock.h>
+#include <xen/sched.h>
 #include <asm/current.h>
 #include <asm/mmio.h>
 
-static const struct mmio_handler *const mmio_handlers[] =
-{
-    &vgic_distr_mmio_handler,
-    &vuart_mmio_handler,
-};
-#define MMIO_HANDLER_NR ARRAY_SIZE(mmio_handlers)
-
 int handle_mmio(mmio_info_t *info)
 {
     struct vcpu *v = current;
     int i;
+    struct mmio_handler *mmio_handler;
+    struct io_handler *io_handlers = &v->domain->arch.io_handlers;
 
-    for ( i = 0; i < MMIO_HANDLER_NR; i++ )
-        if ( mmio_handlers[i]->check_handler(v, info->gpa) )
+    for ( i = 0; i < io_handlers->num_entries; i++ )
+    {
+        mmio_handler = &io_handlers->mmio_handlers[i];
+
+        if ( (info->gpa >= mmio_handler->addr) &&
+             (info->gpa < (mmio_handler->addr + mmio_handler->size)) )
+        {
             return info->dabt.write ?
-                mmio_handlers[i]->write_handler(v, info) :
-                mmio_handlers[i]->read_handler(v, info);
+                mmio_handler->mmio_handler_ops->write_handler(v, info) :
+                mmio_handler->mmio_handler_ops->read_handler(v, info);
+        }
+    }
 
     return 0;
 }
+
+void register_mmio_handler(struct domain *d,
+                           const struct mmio_handler_ops *handle,
+                           paddr_t addr, paddr_t size)
+{
+    struct io_handler *handler = &d->arch.io_handlers;
+
+    BUG_ON(handler->num_entries >= MAX_IO_HANDLER);
+
+    spin_lock(&handler->lock);
+
+    handler->mmio_handlers[handler->num_entries].mmio_handler_ops = handle;
+    handler->mmio_handlers[handler->num_entries].addr = addr;
+    handler->mmio_handlers[handler->num_entries].size = size;
+    handler->num_entries++;
+    dsb(sy);
+
+    spin_unlock(&handler->lock);
+}
+
+int domain_io_init(struct domain *d)
+{
+   spin_lock_init(&d->arch.io_handlers.lock);
+   d->arch.io_handlers.num_entries = 0;
+
+   return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 4962e70..151ec3e 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -73,43 +73,6 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, 
int b, int n)
         return NULL;
 }
 
-int domain_vgic_init(struct domain *d)
-{
-    int i;
-
-    d->arch.vgic.ctlr = 0;
-
-    /* Currently nr_lines in vgic and gic doesn't have the same meanings
-     * Here nr_lines = number of SPIs
-     */
-    if ( is_hardware_domain(d) )
-        d->arch.vgic.nr_lines = gic_number_lines() - 32;
-    else
-        d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
-
-    d->arch.vgic.shared_irqs =
-        xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
-    if ( d->arch.vgic.shared_irqs == NULL )
-        return -ENOMEM;
-
-    d->arch.vgic.pending_irqs =
-        xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
-    if ( d->arch.vgic.pending_irqs == NULL )
-    {
-        xfree(d->arch.vgic.shared_irqs);
-        return -ENOMEM;
-    }
-
-    for (i=0; i<d->arch.vgic.nr_lines; i++)
-    {
-        INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
-        INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
-    }
-    for (i=0; i<DOMAIN_NR_RANKS(d); i++)
-        spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
-    return 0;
-}
-
 void domain_vgic_free(struct domain *d)
 {
     xfree(d->arch.vgic.shared_irqs);
@@ -676,15 +639,7 @@ write_ignore:
     return 1;
 }
 
-static int vgic_distr_mmio_check(struct vcpu *v, paddr_t addr)
-{
-    struct domain *d = v->domain;
-
-    return (addr >= (d->arch.vgic.dbase)) && (addr < (d->arch.vgic.dbase + 
PAGE_SIZE));
-}
-
-const struct mmio_handler vgic_distr_mmio_handler = {
-    .check_handler = vgic_distr_mmio_check,
+const struct mmio_handler_ops vgic_distr_mmio_handler = {
     .read_handler  = vgic_distr_mmio_read,
     .write_handler = vgic_distr_mmio_write,
 };
@@ -766,6 +721,38 @@ out:
         smp_send_event_check_mask(cpumask_of(v->processor));
 }
 
+int domain_vgic_init(struct domain *d)
+{
+    int i;
+
+    d->arch.vgic.ctlr = 0;
+
+    /* Currently nr_lines in vgic and gic doesn't have the same meanings
+     * Here nr_lines = number of SPIs
+     */
+    if ( d->domain_id == 0 )
+        d->arch.vgic.nr_lines = gic_number_lines() - 32;
+    else
+        d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
+
+    d->arch.vgic.shared_irqs =
+        xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
+    d->arch.vgic.pending_irqs =
+        xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
+    for (i=0; i<d->arch.vgic.nr_lines; i++)
+    {
+        INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
+        INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
+    }
+    for (i=0; i<DOMAIN_NR_RANKS(d); i++)
+        spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
+
+    register_mmio_handler(d, &vgic_distr_mmio_handler,
+                          d->arch.vgic.dbase, PAGE_SIZE);
+
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
index 953cd46..52f3259 100644
--- a/xen/arch/arm/vuart.c
+++ b/xen/arch/arm/vuart.c
@@ -44,24 +44,6 @@
 
 #define domain_has_vuart(d) ((d)->arch.vuart.info != NULL)
 
-int domain_vuart_init(struct domain *d)
-{
-    ASSERT( is_hardware_domain(d) );
-
-    d->arch.vuart.info = serial_vuart_info(SERHND_DTUART);
-    if ( !d->arch.vuart.info )
-        return 0;
-
-    spin_lock_init(&d->arch.vuart.lock);
-    d->arch.vuart.idx = 0;
-
-    d->arch.vuart.buf = xzalloc_array(char, VUART_BUF_SIZE);
-    if ( !d->arch.vuart.buf )
-        return -ENOMEM;
-
-    return 0;
-}
-
 void domain_vuart_free(struct domain *d)
 {
     if ( !domain_has_vuart(d) )
@@ -92,14 +74,6 @@ static void vuart_print_char(struct vcpu *v, char c)
     spin_unlock(&uart->lock);
 }
 
-static int vuart_mmio_check(struct vcpu *v, paddr_t addr)
-{
-    const struct vuart_info *info = v->domain->arch.vuart.info;
-
-    return (domain_has_vuart(v->domain) && addr >= info->base_addr &&
-            addr <= (info->base_addr + info->size));
-}
-
 static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info)
 {
     struct domain *d = v->domain;
@@ -133,12 +107,33 @@ static int vuart_mmio_write(struct vcpu *v, mmio_info_t 
*info)
     return 1;
 }
 
-const struct mmio_handler vuart_mmio_handler = {
-    .check_handler = vuart_mmio_check,
+const struct mmio_handler_ops vuart_mmio_handler = {
     .read_handler  = vuart_mmio_read,
     .write_handler = vuart_mmio_write,
 };
 
+int domain_vuart_init(struct domain *d)
+{
+    ASSERT( !d->domain_id );
+
+    d->arch.vuart.info = serial_vuart_info(SERHND_DTUART);
+    if ( !d->arch.vuart.info )
+        return 0;
+
+    spin_lock_init(&d->arch.vuart.lock);
+    d->arch.vuart.idx = 0;
+
+    d->arch.vuart.buf = xzalloc_array(char, VUART_BUF_SIZE);
+    if ( !d->arch.vuart.buf )
+        return -ENOMEM;
+
+    register_mmio_handler(d, &vuart_mmio_handler,
+                          d->arch.vuart.info->base_addr,
+                          d->arch.vuart.info->size);
+
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index f47f928..61a498f 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -7,6 +7,7 @@
 #include <asm/page.h>
 #include <asm/p2m.h>
 #include <asm/vfp.h>
+#include <asm/mmio.h>
 #include <public/hvm/params.h>
 #include <xen/serial.h>
 
@@ -117,6 +118,7 @@ struct arch_domain
     struct hvm_domain hvm_domain;
     xen_pfn_t *grant_table_gpfn;
 
+    struct io_handler io_handlers;
     /* Continuable domain_relinquish_resources(). */
     enum {
         RELMEM_not_started,
diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h
index 5870985..0160f09 100644
--- a/xen/include/asm-arm/mmio.h
+++ b/xen/include/asm-arm/mmio.h
@@ -23,6 +23,8 @@
 #include <asm/processor.h>
 #include <asm/regs.h>
 
+#define MAX_IO_HANDLER  16
+
 typedef struct
 {
     struct hsr_dabt dabt;
@@ -34,16 +36,28 @@ typedef int (*mmio_read_t)(struct vcpu *v, mmio_info_t 
*info);
 typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info);
 typedef int (*mmio_check_t)(struct vcpu *v, paddr_t addr);
 
-struct mmio_handler {
-    mmio_check_t check_handler;
+struct mmio_handler_ops {
     mmio_read_t read_handler;
     mmio_write_t write_handler;
 };
 
-extern const struct mmio_handler vgic_distr_mmio_handler;
-extern const struct mmio_handler vuart_mmio_handler;
+struct mmio_handler {
+    paddr_t addr;
+    paddr_t size;
+    const struct mmio_handler_ops *mmio_handler_ops;
+};
+
+struct io_handler {
+    int num_entries;
+    spinlock_t lock;
+    struct mmio_handler mmio_handlers[MAX_IO_HANDLER];
+};
 
 extern int handle_mmio(mmio_info_t *info);
+void register_mmio_handler(struct domain *d,
+                           const struct mmio_handler_ops *handle,
+                           paddr_t addr, paddr_t size);
+int domain_io_init(struct domain *d);
 
 #endif  /* __ASM_ARM_MMIO_H__ */
 
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.