|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] IOMMU/MMU: Adjust top level functions for VT-d Device-TLB flush error.
Current code would be panic(), when VT-d Device-TLB flush timed out.
the panic() is going to be eliminated, so we must check all kinds of
error and all the way up the call trees.
Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Liu Jinsong <jinsong.liu@xxxxxxxxxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Feng Wu <feng.wu@xxxxxxxxx>
CC: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
xen/arch/x86/acpi/power.c | 14 +++++++++++++-
xen/arch/x86/mm.c | 13 ++++++++-----
xen/arch/x86/mm/p2m-ept.c | 10 +++++++++-
xen/arch/x86/mm/p2m-pt.c | 12 ++++++++++--
xen/common/grant_table.c | 5 +++--
xen/common/memory.c | 5 +++--
xen/drivers/passthrough/iommu.c | 16 +++++++++++-----
xen/drivers/passthrough/vtd/x86/vtd.c | 7 +++++--
xen/drivers/passthrough/x86/iommu.c | 6 +++++-
xen/include/xen/iommu.h | 6 +++---
10 files changed, 70 insertions(+), 24 deletions(-)
diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
index 2885e31..50edf3f 100644
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -45,6 +45,8 @@ void do_suspend_lowlevel(void);
static int device_power_down(void)
{
+ int err;
+
console_suspend();
time_suspend();
@@ -53,11 +55,21 @@ static int device_power_down(void)
ioapic_suspend();
- iommu_suspend();
+ err = iommu_suspend();
+ if ( err )
+ goto iommu_suspend_error;
lapic_suspend();
return 0;
+
+iommu_suspend_error:
+ ioapic_resume();
+ i8259A_resume();
+ time_resume();
+ console_resume();
+
+ return err;
}
static void device_power_up(void)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c997b53..526548e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2467,7 +2467,7 @@ static int __get_page_type(struct page_info *page,
unsigned long type,
int preemptible)
{
unsigned long nx, x, y = page->u.inuse.type_info;
- int rc = 0;
+ int rc = 0, ret = 0;
ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
@@ -2578,11 +2578,11 @@ static int __get_page_type(struct page_info *page,
unsigned long type,
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
else if ( type == PGT_writable_page )
- iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
- page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ page_to_mfn(page),
+ IOMMUF_readable|IOMMUF_writable);
}
}
@@ -2599,6 +2599,9 @@ static int __get_page_type(struct page_info *page,
unsigned long type,
if ( (x & PGT_partial) && !(nx & PGT_partial) )
put_page(page);
+ if ( !rc )
+ rc = ret;
+
return rc;
}
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 3cb6868..f9bcce7 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -830,7 +830,15 @@ out:
{
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
- iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ {
+ rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i,
iommu_flags);
+ if ( rc )
+ {
+ while ( i-- > 0 )
+ iommu_unmap_page(d, gfn + i);
+ break;
+ }
+ }
else
for ( i = 0; i < (1 << order); i++ )
iommu_unmap_page(d, gfn + i);
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3d80612..c33b753 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -680,8 +680,16 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long
gfn, mfn_t mfn,
}
else if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
+ {
+ rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+ iommu_pte_flags);
+ if ( rc )
+ {
+ while ( i-- > 0 )
+ iommu_unmap_page(p2m->domain, gfn + i);
+ break;
+ }
+ }
else
for ( i = 0; i < (1UL << page_order); i++ )
iommu_unmap_page(p2m->domain, gfn + i);
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 8b22299..b410ffc 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -932,8 +932,9 @@ __gnttab_map_grant_ref(
{
nr_gets++;
(void)get_page(pg, rd);
- if ( !(op->flags & GNTMAP_readonly) )
- get_page_type(pg, PGT_writable_page);
+ if ( !(op->flags & GNTMAP_readonly) &&
+ !get_page_type(pg, PGT_writable_page) )
+ goto could_not_pin;
}
}
}
diff --git a/xen/common/memory.c b/xen/common/memory.c
index ef57219..543647d 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -678,8 +678,9 @@ static int xenmem_add_to_physmap(struct domain *d,
if ( need_iommu(d) )
{
this_cpu(iommu_dont_flush_iotlb) = 0;
- iommu_iotlb_flush(d, xatp->idx - done, done);
- iommu_iotlb_flush(d, xatp->gpfn - done, done);
+ rc = iommu_iotlb_flush(d, xatp->idx - done, done);
+ if ( !rc )
+ rc = iommu_iotlb_flush(d, xatp->gpfn - done, done);
}
#endif
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index b64676f..29efbfe 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -277,24 +277,28 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(),
&cpu_online_map));
}
-void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int
page_count)
+int iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int
page_count)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush
)
- return;
+ return 0;
hd->platform_ops->iotlb_flush(d, gfn, page_count);
+
+ return 0;
}
-void iommu_iotlb_flush_all(struct domain *d)
+int iommu_iotlb_flush_all(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops ||
!hd->platform_ops->iotlb_flush_all )
- return;
+ return 0;
hd->platform_ops->iotlb_flush_all(d);
+
+ return 0;
}
int __init iommu_setup(void)
@@ -368,11 +372,13 @@ int iommu_do_domctl(
return ret;
}
-void iommu_suspend()
+int iommu_suspend()
{
const struct iommu_ops *ops = iommu_get_ops();
if ( iommu_enabled )
ops->suspend();
+
+ return 0;
}
void iommu_share_p2m_table(struct domain* d)
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c
b/xen/drivers/passthrough/vtd/x86/vtd.c
index c0d6aab..e5ab10a 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -140,8 +140,11 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
- IOMMUF_readable|IOMMUF_writable);
+ if ( iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
+ IOMMUF_readable|IOMMUF_writable) )
+ printk(XENLOG_G_ERR
+ "IOMMU: Map page gfn: 0x%lx(mfn: 0x%lx) failed.\n",
+ pfn * tmp + j, pfn * tmp + j);
if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
process_pending_softirqs();
diff --git a/xen/drivers/passthrough/x86/iommu.c
b/xen/drivers/passthrough/x86/iommu.c
index 8cbb655..d8e3c8f 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -104,7 +104,11 @@ int arch_iommu_populate_page_table(struct domain *d)
this_cpu(iommu_dont_flush_iotlb) = 0;
if ( !rc )
- iommu_iotlb_flush_all(d);
+ {
+ rc = iommu_iotlb_flush_all(d);
+ if ( rc )
+ iommu_teardown(d);
+ }
else if ( rc != -ERESTART )
iommu_teardown(d);
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 8217cb7..d6d489a 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -167,7 +167,7 @@ struct iommu_ops {
void (*dump_p2m_table)(struct domain *d);
};
-void iommu_suspend(void);
+int iommu_suspend(void);
void iommu_resume(void);
void iommu_crash_shutdown(void);
int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
@@ -182,8 +182,8 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain
*d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int
page_count);
-void iommu_iotlb_flush_all(struct domain *d);
+int iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int
page_count);
+int iommu_iotlb_flush_all(struct domain *d);
/*
* The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |