|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 09/10] log-dirty: Refine common code to support PML
This patch adds several new callbacks in paging/hap/p2m layer to support PML.
At paging layer, a new callback is added to log_dirty_domain to flush hardware
cached dirty pages to log-dirty radix tree, as in case of PML, it's possible
there are dirty GPAs logged in vcpus' PML buffers when userspace peek/clear
dirty pages, therefore we need to flush them before reporting dirty pages to
userspace.
At p2m layer, three new callbacks are added to p2m_domain to enable/disable PML
and flush PML buffers. PML enabling/disabling callback will be called when
switching to log-dirty mode / switching back to normal mode respectively.
Flushing PML buffer callback will be called from paging layer when flushing PML
buffer manually.
Signed-off-by: Kai Huang <kai.huang@xxxxxxxxxxxxxxx>
---
xen/arch/x86/mm/hap/hap.c | 16 +++++++++++++++-
xen/arch/x86/mm/p2m.c | 36 ++++++++++++++++++++++++++++++++++++
xen/arch/x86/mm/paging.c | 15 ++++++++++++++-
xen/arch/x86/mm/shadow/common.c | 2 +-
xen/include/asm-x86/domain.h | 1 +
xen/include/asm-x86/p2m.h | 11 +++++++++++
xen/include/asm-x86/paging.h | 3 ++-
7 files changed, 80 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 4ecb2e2..25f2f58 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -135,6 +135,10 @@ int hap_track_dirty_vram(struct domain *d,
domain_pause(d);
+ /* flush dirty GFNs potentially cached by hardware */
+ if ( d->arch.paging.log_dirty.flush_cached_dirty )
+ d->arch.paging.log_dirty.flush_cached_dirty(d);
+
/* get the bitmap */
paging_log_dirty_range(d, begin_pfn, nr, dirty_bitmap);
@@ -190,6 +194,8 @@ static int hap_enable_log_dirty(struct domain *d, bool_t
log_global)
d->arch.paging.mode |= PG_log_dirty;
paging_unlock(d);
+ p2m_enable_hardware_log_dirty(d);
+
if ( log_global )
{
/* set l1e entries of P2M table to be read-only. */
@@ -205,6 +211,8 @@ static int hap_disable_log_dirty(struct domain *d)
d->arch.paging.mode &= ~PG_log_dirty;
paging_unlock(d);
+ p2m_disable_hardware_log_dirty(d);
+
/* set l1e entries of P2M table with normal mode */
p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
return 0;
@@ -217,6 +225,11 @@ static void hap_clean_dirty_bitmap(struct domain *d)
flush_tlb_mask(d->domain_dirty_cpumask);
}
+static void hap_flush_cached_dirty(struct domain *d)
+{
+ p2m_flush_hardware_cached_dirty(d);
+}
+
/************************************************/
/* HAP SUPPORT FUNCTIONS */
/************************************************/
@@ -431,7 +444,8 @@ void hap_domain_init(struct domain *d)
/* Use HAP logdirty mechanism. */
paging_log_dirty_init(d, hap_enable_log_dirty,
hap_disable_log_dirty,
- hap_clean_dirty_bitmap);
+ hap_clean_dirty_bitmap,
+ hap_flush_cached_dirty);
}
/* return 0 for success, -errno for failure */
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 6a06e9f..291a275 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -239,6 +239,42 @@ void p2m_memory_type_changed(struct domain *d)
}
}
+void p2m_enable_hardware_log_dirty(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ if ( p2m->enable_hardware_log_dirty )
+ {
+ p2m_lock(p2m);
+ p2m->enable_hardware_log_dirty(p2m);
+ p2m_unlock(p2m);
+ }
+}
+
+void p2m_disable_hardware_log_dirty(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ if ( p2m->disable_hardware_log_dirty )
+ {
+ p2m_lock(p2m);
+ p2m->disable_hardware_log_dirty(p2m);
+ p2m_unlock(p2m);
+ }
+}
+
+void p2m_flush_hardware_cached_dirty(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ if ( p2m->flush_hardware_cached_dirty )
+ {
+ p2m_lock(p2m);
+ p2m->flush_hardware_cached_dirty(p2m);
+ p2m_unlock(p2m);
+ }
+}
+
mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
unsigned int *page_order, bool_t locked)
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index b54d76a..c2d336a 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -411,7 +411,18 @@ static int paging_log_dirty_op(struct domain *d,
int i4, i3, i2;
if ( !resuming )
+ {
domain_pause(d);
+
+ /*
+ * Only need to flush when not resuming, as domain was paused in
+ * resuming case therefore it's not possible to have any new dirty
+ * page.
+ */
+ if ( d->arch.paging.log_dirty.flush_cached_dirty )
+ d->arch.paging.log_dirty.flush_cached_dirty(d);
+ }
+
paging_lock(d);
if ( !d->arch.paging.preempt.dom )
@@ -610,11 +621,13 @@ void paging_log_dirty_init(struct domain *d,
int (*enable_log_dirty)(struct domain *d,
bool_t log_global),
int (*disable_log_dirty)(struct domain *d),
- void (*clean_dirty_bitmap)(struct domain *d))
+ void (*clean_dirty_bitmap)(struct domain *d),
+ void (*flush_cached_dirty)(struct domain *d))
{
d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
+ d->arch.paging.log_dirty.flush_cached_dirty = flush_cached_dirty;
}
/************************************************/
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2e43d6d..f8451e8 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -54,7 +54,7 @@ int shadow_domain_init(struct domain *d, unsigned int
domcr_flags)
/* Use shadow pagetables for log-dirty support */
paging_log_dirty_init(d, sh_enable_log_dirty,
- sh_disable_log_dirty, sh_clean_dirty_bitmap);
+ sh_disable_log_dirty, sh_clean_dirty_bitmap, NULL);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
d->arch.paging.shadow.oos_active = 0;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 9cdffa8..0dc90d2 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -178,6 +178,7 @@ struct log_dirty_domain {
int (*enable_log_dirty )(struct domain *d, bool_t log_global);
int (*disable_log_dirty )(struct domain *d);
void (*clean_dirty_bitmap )(struct domain *d);
+ void (*flush_cached_dirty )(struct domain *d);
};
struct paging_domain {
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index e93c551..91c17a5 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -233,6 +233,9 @@ struct p2m_domain {
p2m_access_t *p2ma,
p2m_query_t q,
unsigned int *page_order);
+ void (*enable_hardware_log_dirty)(struct p2m_domain *p2m);
+ void (*disable_hardware_log_dirty)(struct p2m_domain *p2m);
+ void (*flush_hardware_cached_dirty)(struct p2m_domain *p2m);
void (*change_entry_type_global)(struct p2m_domain *p2m,
p2m_type_t ot,
p2m_type_t nt);
@@ -507,6 +510,14 @@ void guest_physmap_remove_page(struct domain *d,
/* Set a p2m range as populate-on-demand */
int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
unsigned int order);
+/* Enable hardware-assisted log-dirty. */
+void p2m_enable_hardware_log_dirty(struct domain *d);
+
+/* Disable hardware-assisted log-dirty */
+void p2m_disable_hardware_log_dirty(struct domain *d);
+
+/* Flush hardware cached dirty GFNs */
+void p2m_flush_hardware_cached_dirty(struct domain *d);
/* Change types across all p2m entries in a domain */
void p2m_change_entry_type_global(struct domain *d,
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 53de715..9fa8d9d 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -152,7 +152,8 @@ void paging_log_dirty_init(struct domain *d,
int (*enable_log_dirty)(struct domain *d,
bool_t log_global),
int (*disable_log_dirty)(struct domain *d),
- void (*clean_dirty_bitmap)(struct domain *d));
+ void (*clean_dirty_bitmap)(struct domain *d),
+ void (*flush_cached_dirty)(struct domain *d));
/* mark a page as dirty */
void paging_mark_dirty(struct domain *d, unsigned long guest_mfn);
--
2.1.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |