|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH v1 02/10] xen/arm: register mmio handler at runtime
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
mmio handlers are registers at compile time
for drivers like vuart and vgic.
Make mmio handler registered at runtime by
creating linked list of mmio handlers
Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
xen/arch/arm/io.c | 32 +++++++++++++++++---------
xen/arch/arm/io.h | 16 +++++--------
xen/arch/arm/vgic.c | 61 ++++++++++++++++++++++++++------------------------
xen/arch/arm/vuart.c | 39 ++++++++++++++++----------------
4 files changed, 79 insertions(+), 69 deletions(-)
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index a6db00b..d140b29 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -17,31 +17,41 @@
*/
#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/kernel.h>
#include <xen/lib.h>
+#include <xen/spinlock.h>
#include <asm/current.h>
#include "io.h"
-static const struct mmio_handler *const mmio_handlers[] =
-{
- &vgic_distr_mmio_handler,
- &vuart_mmio_handler,
-};
-#define MMIO_HANDLER_NR ARRAY_SIZE(mmio_handlers)
+LIST_HEAD(handle_head);
+static DEFINE_SPINLOCK(handler_lock);
int handle_mmio(mmio_info_t *info)
{
struct vcpu *v = current;
- int i;
+ struct list_head *pos;
+ struct mmio_handler *mmio_handle;
- for ( i = 0; i < MMIO_HANDLER_NR; i++ )
- if ( mmio_handlers[i]->check_handler(v, info->gpa) )
+ list_for_each(pos, &handle_head) {
+ mmio_handle = list_entry(pos, struct mmio_handler, handle_list);
+ if ( mmio_handle->check_handler(v, info->gpa) )
return info->dabt.write ?
- mmio_handlers[i]->write_handler(v, info) :
- mmio_handlers[i]->read_handler(v, info);
+ mmio_handle->write_handler(v, info) :
+ mmio_handle->read_handler(v, info);
+ }
return 0;
}
+
+void register_mmio_handler(struct mmio_handler * handle)
+{
+ spin_lock(&handler_lock);
+ list_add(&handle->handle_list, &handle_head);
+ spin_unlock(&handler_lock);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/arm/io.h b/xen/arch/arm/io.h
index 8d252c0..99cd7c3 100644
--- a/xen/arch/arm/io.h
+++ b/xen/arch/arm/io.h
@@ -22,6 +22,7 @@
#include <xen/lib.h>
#include <asm/processor.h>
#include <asm/regs.h>
+#include <xen/list.h>
typedef struct
{
@@ -30,20 +31,15 @@ typedef struct
paddr_t gpa;
} mmio_info_t;
-typedef int (*mmio_read_t)(struct vcpu *v, mmio_info_t *info);
-typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info);
-typedef int (*mmio_check_t)(struct vcpu *v, paddr_t addr);
-
struct mmio_handler {
- mmio_check_t check_handler;
- mmio_read_t read_handler;
- mmio_write_t write_handler;
+ int (*read_handler)(struct vcpu *v, mmio_info_t *info);
+ int (*write_handler)(struct vcpu *v, mmio_info_t *info);
+ int (*check_handler)(struct vcpu *v, paddr_t addr);
+ struct list_head handle_list;
};
-extern const struct mmio_handler vgic_distr_mmio_handler;
-extern const struct mmio_handler vuart_mmio_handler;
-
extern int handle_mmio(mmio_info_t *info);
+void register_mmio_handler(struct mmio_handler * handle);
#endif
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 553411d..d2a13fb 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -73,34 +73,6 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v,
int b, int n)
return NULL;
}
-int domain_vgic_init(struct domain *d)
-{
- int i;
-
- d->arch.vgic.ctlr = 0;
-
- /* Currently nr_lines in vgic and gic doesn't have the same meanings
- * Here nr_lines = number of SPIs
- */
- if ( d->domain_id == 0 )
- d->arch.vgic.nr_lines = gic_number_lines() - 32;
- else
- d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
-
- d->arch.vgic.shared_irqs =
- xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
- d->arch.vgic.pending_irqs =
- xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
- for (i=0; i<d->arch.vgic.nr_lines; i++)
- {
- INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
- INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
- }
- for (i=0; i<DOMAIN_NR_RANKS(d); i++)
- spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
- return 0;
-}
-
void domain_vgic_free(struct domain *d)
{
xfree(d->arch.vgic.shared_irqs);
@@ -655,12 +627,43 @@ static int vgic_distr_mmio_check(struct vcpu *v, paddr_t
addr)
return (addr >= (d->arch.vgic.dbase)) && (addr < (d->arch.vgic.dbase +
PAGE_SIZE));
}
-const struct mmio_handler vgic_distr_mmio_handler = {
+static struct mmio_handler vgic_distr_mmio_handler = {
.check_handler = vgic_distr_mmio_check,
.read_handler = vgic_distr_mmio_read,
.write_handler = vgic_distr_mmio_write,
};
+int domain_vgic_init(struct domain *d)
+{
+ int i;
+
+ d->arch.vgic.ctlr = 0;
+
+ /* Currently nr_lines in vgic and gic doesn't have the same meanings
+ * Here nr_lines = number of SPIs
+ */
+ if ( d->domain_id == 0 )
+ d->arch.vgic.nr_lines = gic_number_lines() - 32;
+ else
+ d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
+
+ d->arch.vgic.shared_irqs =
+ xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
+ d->arch.vgic.pending_irqs =
+ xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
+ for (i=0; i<d->arch.vgic.nr_lines; i++)
+ {
+ INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
+ INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
+ }
+ for (i=0; i<DOMAIN_NR_RANKS(d); i++)
+ spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
+
+ register_mmio_handler(&vgic_distr_mmio_handler);
+ return 0;
+}
+
+
struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
{
struct pending_irq *n;
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
index b9d3ced..c237d71 100644
--- a/xen/arch/arm/vuart.c
+++ b/xen/arch/arm/vuart.c
@@ -44,24 +44,6 @@
#define domain_has_vuart(d) ((d)->arch.vuart.info != NULL)
-int domain_vuart_init(struct domain *d)
-{
- ASSERT( !d->domain_id );
-
- d->arch.vuart.info = serial_vuart_info(SERHND_DTUART);
- if ( !d->arch.vuart.info )
- return 0;
-
- spin_lock_init(&d->arch.vuart.lock);
- d->arch.vuart.idx = 0;
-
- d->arch.vuart.buf = xzalloc_array(char, VUART_BUF_SIZE);
- if ( !d->arch.vuart.buf )
- return -ENOMEM;
-
- return 0;
-}
-
void domain_vuart_free(struct domain *d)
{
if ( !domain_has_vuart(d) )
@@ -133,12 +115,31 @@ static int vuart_mmio_write(struct vcpu *v, mmio_info_t
*info)
return 1;
}
-const struct mmio_handler vuart_mmio_handler = {
+static struct mmio_handler vuart_mmio_handler = {
.check_handler = vuart_mmio_check,
.read_handler = vuart_mmio_read,
.write_handler = vuart_mmio_write,
};
+int domain_vuart_init(struct domain *d)
+{
+ ASSERT( !d->domain_id );
+
+ d->arch.vuart.info = serial_vuart_info(SERHND_DTUART);
+ if ( !d->arch.vuart.info )
+ return 0;
+
+ spin_lock_init(&d->arch.vuart.lock);
+ d->arch.vuart.idx = 0;
+
+ d->arch.vuart.buf = xzalloc_array(char, VUART_BUF_SIZE);
+ if ( !d->arch.vuart.buf )
+ return -ENOMEM;
+
+ register_mmio_handler(&vuart_mmio_handler);
+ return 0;
+}
+
/*
* Local variables:
* mode: C
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |