|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC v01 3/3] arm: omap: cleanup iopte allocations
Each allocation for iopte requires 4Kb memory.
All previous allocations from previous MMU reconfiguration
must be cleaned before new reconfigureation cycle.
Change-Id: I6db69a400cdba1170b43d9dc68d0817db77cbf9c
Signed-off-by: Andrii Tseglytskyi <andrii.tseglytskyi@xxxxxxxxxxxxxxx>
---
xen/arch/arm/omap_iommu.c | 35 +++++++++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/xen/arch/arm/omap_iommu.c b/xen/arch/arm/omap_iommu.c
index 7ec03a2..a5ad3ac 100644
--- a/xen/arch/arm/omap_iommu.c
+++ b/xen/arch/arm/omap_iommu.c
@@ -93,12 +93,18 @@
#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
#define iopte_offset(x) ((x) & IOPTE_SMALL_MASK)
+struct mmu_alloc_node {
+ u32 *vptr;
+ struct list_head node;
+};
+
struct mmu_info {
const char *name;
paddr_t mem_start;
u32 mem_size;
u32 *pagetable;
void __iomem *mem_map;
+ struct list_head alloc_list;
};
static struct mmu_info omap_ipu_mmu = {
@@ -222,8 +228,15 @@ static u32 mmu_translate_pgentry(struct domain *dom, u32
iopgd, u32 da, u32 mask
static u32 mmu_iopte_alloc(struct mmu_info *mmu, struct domain *dom, u32
iopgd, u32 sect_num)
{
u32 *iopte = NULL;
+ struct mmu_alloc_node *alloc_node;
u32 i;
+ alloc_node = xzalloc_bytes(sizeof(struct mmu_alloc_node));
+ if (!alloc_node) {
+ printk("%s Fail to alloc vptr node\n", mmu->name);
+ return 0;
+ }
+
iopte = xzalloc_bytes(PAGE_SIZE);
if (!iopte) {
printk("%s Fail to alloc 2nd level table\n", mmu->name);
@@ -238,10 +251,27 @@ static u32 mmu_iopte_alloc(struct mmu_info *mmu, struct
domain *dom, u32 iopgd,
iopte[i] = vaddr | IOPTE_SMALL;
}
+ /* store pointer for following cleanup */
+ alloc_node->vptr = iopte;
+ list_add(&alloc_node->node, &mmu->alloc_list);
+
flush_xen_dcache_va_range(iopte, PAGE_SIZE);
return __pa(iopte) | IOPGD_TABLE;
}
+static void mmu_cleanup_pagetable(struct mmu_info *mmu)
+{
+ struct mmu_alloc_node *mmu_alloc, *tmp;
+
+ ASSERT(mmu);
+
+ list_for_each_entry_safe(mmu_alloc, tmp, &mmu->alloc_list, node) {
+ xfree(mmu_alloc->vptr);
+ list_del(&mmu_alloc->node);
+ xfree(mmu_alloc);
+ }
+}
+
/*
* on boot table is empty
*/
@@ -254,6 +284,9 @@ static int mmu_translate_pagetable(struct domain *dom,
struct mmu_info *mmu)
ASSERT(dom);
ASSERT(mmu);
+ /* free all previous allocations */
+ mmu_cleanup_pagetable(mmu);
+
/* copy pagetable from domain to xen */
res = mmu_copy_pagetable(mmu);
if (res) {
@@ -432,6 +465,8 @@ static int mmu_init(struct mmu_info *mmu, u32 data)
printk("%s: %s private pagetable %lu bytes\n",
__func__, mmu->name, IOPGD_TABLE_SIZE);
+ INIT_LIST_HEAD(&mmu->alloc_list);
+
return 0;
}
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |