|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86: fix cache flushing condition in map_pages_to_xen()
This fixes yet another shortcoming of the function (exposed by 8bfaa2c2
["x86: add locking to map_pages_to_xen()"]'s adjustment to
msix_put_fixmap()): It must not flush caches when transitioning to a
non-present mapping. Doing so causes the CLFLUSH to fault, if used in
favor of WBINVD.
To help code readability, factor out the whole flush flags updating
in map_pages_to_xen() into a helper macro.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5430,6 +5430,15 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned l
flush_area_local((const void *)v, f) : \
flush_area_all((const void *)v, f))
+#define flush_flags(oldf) do { \
+ unsigned int o_ = (oldf); \
+ if ( (o_) & _PAGE_GLOBAL ) \
+ flush_flags |= FLUSH_TLB_GLOBAL; \
+ if ( (flags & _PAGE_PRESENT) && \
+ (((o_) ^ flags) & PAGE_CACHE_ATTRS) ) \
+ flush_flags |= FLUSH_CACHE; \
+} while (0)
+
int map_pages_to_xen(
unsigned long virt,
unsigned long mfn,
@@ -5465,11 +5474,7 @@ int map_pages_to_xen(
if ( l3e_get_flags(ol3e) & _PAGE_PSE )
{
- if ( l3e_get_flags(ol3e) & _PAGE_GLOBAL )
- flush_flags |= FLUSH_TLB_GLOBAL;
- if ( (lNf_to_l1f(l3e_get_flags(ol3e)) ^ flags) &
- PAGE_CACHE_ATTRS )
- flush_flags |= FLUSH_CACHE;
+ flush_flags(lNf_to_l1f(l3e_get_flags(ol3e)));
flush_area(virt, flush_flags);
}
else
@@ -5481,27 +5486,14 @@ int map_pages_to_xen(
if ( !(l2e_get_flags(ol2e) & _PAGE_PRESENT) )
continue;
if ( l2e_get_flags(ol2e) & _PAGE_PSE )
- {
- if ( l2e_get_flags(ol2e) & _PAGE_GLOBAL )
- flush_flags |= FLUSH_TLB_GLOBAL;
- if ( (lNf_to_l1f(l2e_get_flags(ol2e)) ^ flags) &
- PAGE_CACHE_ATTRS )
- flush_flags |= FLUSH_CACHE;
- }
+ flush_flags(lNf_to_l1f(l2e_get_flags(ol2e)));
else
{
unsigned int j;
pl1e = l2e_to_l1e(ol2e);
for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
- {
- ol1e = pl1e[j];
- if ( l1e_get_flags(ol1e) & _PAGE_GLOBAL )
- flush_flags |= FLUSH_TLB_GLOBAL;
- if ( (l1e_get_flags(ol1e) ^ flags) &
- PAGE_CACHE_ATTRS )
- flush_flags |= FLUSH_CACHE;
- }
+ flush_flags(l1e_get_flags(pl1e[j]));
}
}
flush_area(virt, flush_flags);
@@ -5595,24 +5587,14 @@ int map_pages_to_xen(
if ( l2e_get_flags(ol2e) & _PAGE_PSE )
{
- if ( l2e_get_flags(ol2e) & _PAGE_GLOBAL )
- flush_flags |= FLUSH_TLB_GLOBAL;
- if ( (lNf_to_l1f(l2e_get_flags(ol2e)) ^ flags) &
- PAGE_CACHE_ATTRS )
- flush_flags |= FLUSH_CACHE;
+ flush_flags(lNf_to_l1f(l2e_get_flags(ol2e)));
flush_area(virt, flush_flags);
}
else
{
pl1e = l2e_to_l1e(ol2e);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
- {
- if ( l1e_get_flags(pl1e[i]) & _PAGE_GLOBAL )
- flush_flags |= FLUSH_TLB_GLOBAL;
- if ( (l1e_get_flags(pl1e[i]) ^ flags) &
- PAGE_CACHE_ATTRS )
- flush_flags |= FLUSH_CACHE;
- }
+ flush_flags(l1e_get_flags(pl1e[i]));
flush_area(virt, flush_flags);
free_xen_pagetable(pl1e);
}
@@ -5687,10 +5669,8 @@ int map_pages_to_xen(
if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
{
unsigned int flush_flags = FLUSH_TLB | FLUSH_ORDER(0);
- if ( l1e_get_flags(ol1e) & _PAGE_GLOBAL )
- flush_flags |= FLUSH_TLB_GLOBAL;
- if ( (l1e_get_flags(ol1e) ^ flags) & PAGE_CACHE_ATTRS )
- flush_flags |= FLUSH_CACHE;
+
+ flush_flags(l1e_get_flags(ol1e));
flush_area(virt, flush_flags);
}
@@ -5769,6 +5749,8 @@ int map_pages_to_xen(
return 0;
}
+#undef flush_flags
+
void destroy_xen_mappings(unsigned long s, unsigned long e)
{
bool_t locking = system_state > SYS_STATE_boot;
@@ -5908,6 +5890,8 @@ void destroy_xen_mappings(unsigned long
flush_area(NULL, FLUSH_TLB_GLOBAL);
}
+#undef flush_area
+
void __set_fixmap(
enum fixed_addresses idx, unsigned long mfn, unsigned long flags)
{
Attachment:
x86-mp2x-less-cache-flushing.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |