|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 02/25] xen/x86: consolidate vram tracking support
Flag PG_log_dirty is for paging log dirty support, not vram tracking support.
However data structure sh_dirty_vram{} and function paging_log_dirty_range()
designed for vram tracking support, are guarded with PG_log_dirty.
We release both from PG_log_dirty, and also move paging_log_dirty_range() into
hap.c, to make it static.
Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
xen/arch/x86/include/asm/paging.h | 32 +++++++++++++------------------
xen/arch/x86/mm/hap/hap.c | 32 +++++++++++++++++++++++++++++++
xen/arch/x86/mm/paging.c | 32 -------------------------------
3 files changed, 45 insertions(+), 51 deletions(-)
diff --git a/xen/arch/x86/include/asm/paging.h
b/xen/arch/x86/include/asm/paging.h
index 8a2a0af408..173a569610 100644
--- a/xen/arch/x86/include/asm/paging.h
+++ b/xen/arch/x86/include/asm/paging.h
@@ -133,13 +133,20 @@ struct paging_mode {
(DIV_ROUND_UP(PADDR_BITS - PAGE_SHIFT - (PAGE_SHIFT + 3), \
PAGE_SHIFT - ilog2(sizeof(mfn_t))) + 1)
-#if PG_log_dirty
+#ifdef CONFIG_HVM
+/* VRAM dirty tracking support */
+struct sh_dirty_vram {
+ unsigned long begin_pfn;
+ unsigned long end_pfn;
+#ifdef CONFIG_SHADOW_PAGING
+ paddr_t *sl1ma;
+ uint8_t *dirty_bitmap;
+ s_time_t last_dirty;
+#endif
+};
+#endif
-/* get the dirty bitmap for a specific range of pfns */
-void paging_log_dirty_range(struct domain *d,
- unsigned long begin_pfn,
- unsigned long nr,
- uint8_t *dirty_bitmap);
+#if PG_log_dirty
/* log dirty initialization */
void paging_log_dirty_init(struct domain *d, const struct log_dirty_ops *ops);
@@ -171,19 +178,6 @@ bool paging_mfn_is_dirty(const struct domain *d, mfn_t
gmfn);
#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER
* 2)) & \
(LOGDIRTY_NODE_ENTRIES-1))
-#ifdef CONFIG_HVM
-/* VRAM dirty tracking support */
-struct sh_dirty_vram {
- unsigned long begin_pfn;
- unsigned long end_pfn;
-#ifdef CONFIG_SHADOW_PAGING
- paddr_t *sl1ma;
- uint8_t *dirty_bitmap;
- s_time_t last_dirty;
-#endif
-};
-#endif
-
#else /* !PG_log_dirty */
static inline void paging_log_dirty_init(struct domain *d,
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index ec5043a8aa..668a233923 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -36,6 +36,38 @@
/* HAP VRAM TRACKING SUPPORT */
/************************************************/
+#ifdef CONFIG_HVM
+static void paging_log_dirty_range(struct domain *d,
+ unsigned long begin_pfn,
+ unsigned long nr,
+ uint8_t *dirty_bitmap)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ int i;
+ unsigned long pfn;
+
+ /*
+ * Set l1e entries of P2M table to be read-only.
+ *
+ * On first write, it page faults, its entry is changed to read-write,
+ * and on retry the write succeeds.
+ *
+ * We populate dirty_bitmap by looking for entries that have been
+ * switched to read-write.
+ */
+
+ p2m_lock(p2m);
+
+ for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ )
+ if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) )
+ dirty_bitmap[i >> 3] |= (1 << (i & 7));
+
+ p2m_unlock(p2m);
+
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
+}
+#endif /* CONFIG_HVM */
+
/*
* hap_track_dirty_vram()
* Create the domain's dv_dirty_vram struct on demand.
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index c77f4c1dac..1216a181c3 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -583,38 +583,6 @@ static int paging_log_dirty_op(struct domain *d,
return rv;
}
-#ifdef CONFIG_HVM
-void paging_log_dirty_range(struct domain *d,
- unsigned long begin_pfn,
- unsigned long nr,
- uint8_t *dirty_bitmap)
-{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
- int i;
- unsigned long pfn;
-
- /*
- * Set l1e entries of P2M table to be read-only.
- *
- * On first write, it page faults, its entry is changed to read-write,
- * and on retry the write succeeds.
- *
- * We populate dirty_bitmap by looking for entries that have been
- * switched to read-write.
- */
-
- p2m_lock(p2m);
-
- for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ )
- if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) )
- dirty_bitmap[i >> 3] |= (1 << (i & 7));
-
- p2m_unlock(p2m);
-
- guest_flush_tlb_mask(d, d->dirty_cpumask);
-}
-#endif
-
/*
* Callers must supply log_dirty_ops for the log dirty code to call. This
* function usually is invoked when paging is enabled. Check shadow_enable()
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |