|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/3] x86/p2m: tighten conditions of IOMMU mapping updates
>>> On 28.09.15 at 11:06, <JBeulich@xxxxxxxx> wrote:
>>>> On 28.09.15 at 10:55, <kevin.tian@xxxxxxxxx> wrote:
>> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
>
> Thanks, but quite a bit more important would have been a reply
> to this
>
>>>In addition to the fixes here it looks to me as if both EPT and
>>>NPT/shadow code lack invalidation of IOMMU side paging structure
>>>caches, i.e. further changes may be needed. Am I overlooking something?
>
> as it may require the patch to be extended (and hence the ack to
> be dropped again).
And btw here is a first (incomplete as you can see) take at
addressing the issue (Yang, I realize I forgot to Cc you on the
original submission):
TBD: iommu_pte_flush() includes a cache flush for the passed in PTE, which
would need to be carried out for all entries which got updated and
which the IOMMU may have used (i.e. ept_invalidate_emt_range() is okay,
but resolve_misconfig() and ept_set_entry() aren't).
--- unstable.orig/xen/arch/x86/mm/p2m-ept.c
+++ unstable/xen/arch/x86/mm/p2m-ept.c
@@ -452,6 +452,10 @@ static int ept_invalidate_emt_range(stru
wrc = atomic_write_ept_entry(&table[index], split_ept_entry, i);
ASSERT(wrc == 0);
+ if ( iommu_hap_pt_share && need_iommu(p2m->domain) )
+ iommu_pte_flush(p2m->domain, first_gfn, &table[index].epte,
+ i * EPT_TABLE_ORDER, 1);
+
for ( ; i > target; --i )
if ( !ept_next_level(p2m, 1, &table, &gfn_remainder, i) )
break;
@@ -496,9 +500,9 @@ static int ept_invalidate_emt_range(stru
static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
{
struct ept_data *ept = &p2m->ept;
- unsigned int level = ept_get_wl(ept);
+ unsigned int level = ept_get_wl(ept), flush_order = 0;
unsigned long mfn = ept_get_asr(ept);
- ept_entry_t *epte;
+ ept_entry_t *epte, *flush_epte = NULL;
int wrc, rc = 0;
if ( !mfn )
@@ -575,6 +579,11 @@ static int resolve_misconfig(struct p2m_
{
if ( ept_split_super_page(p2m, &e, level, level - 1) )
{
+ if ( !flush_epte )
+ {
+ flush_epte = &epte[i];
+ flush_order = level * EPT_TABLE_ORDER;
+ }
wrc = atomic_write_ept_entry(&epte[i], e, level);
ASSERT(wrc == 0);
unmap_domain_page(epte);
@@ -619,6 +628,10 @@ static int resolve_misconfig(struct p2m_
}
unmap_domain_page(epte);
+
+ if ( iommu_hap_pt_share && flush_epte && need_iommu(p2m->domain) )
+ iommu_pte_flush(p2m->domain, gfn, &flush_epte->epte, flush_order, 1);
+
if ( rc )
{
struct vcpu *v;
@@ -668,7 +681,7 @@ ept_set_entry(struct p2m_domain *p2m, un
uint8_t ipat = 0;
bool_t need_modify_vtd_table = 1;
bool_t vtd_pte_present = 0;
- unsigned int iommu_flags = p2m_get_iommu_flags(p2mt);
+ unsigned int iommu_flags = p2m_get_iommu_flags(p2mt), flush_order = 0;
enum { sync_off, sync_on, sync_check } needs_sync = sync_check;
ept_entry_t old_entry = { .epte = 0 };
ept_entry_t new_entry = { .epte = 0 };
@@ -763,6 +776,8 @@ ept_set_entry(struct p2m_domain *p2m, un
goto out;
}
+ flush_order = i * EPT_TABLE_ORDER;
+
/* now install the newly split ept sub-tree */
/* NB: please make sure domian is paused and no in-fly VT-d DMA. */
rc = atomic_write_ept_entry(ept_entry, split_ept_entry, i);
@@ -800,7 +815,8 @@ ept_set_entry(struct p2m_domain *p2m, un
/* Safe to read-then-write because we hold the p2m lock */
if ( ept_entry->mfn == new_entry.mfn &&
- p2m_get_iommu_flags(ept_entry->sa_p2mt) == iommu_flags )
+ p2m_get_iommu_flags(ept_entry->sa_p2mt) == iommu_flags &&
+ (!iommu_hap_pt_share || flush_order <= order) )
need_modify_vtd_table = 0;
ept_p2m_type_to_flags(p2m, &new_entry, p2mt, p2ma);
@@ -829,7 +845,14 @@ out:
need_modify_vtd_table )
{
if ( iommu_hap_pt_share )
- iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
+ {
+ if ( flush_order > order )
+ vtd_pte_present = 1;
+ else
+ flush_order = order;
+ iommu_pte_flush(d, gfn, &ept_entry->epte, flush_order,
+ vtd_pte_present);
+ }
else
{
if ( iommu_flags )
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |