[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 03/26] xen/x86: consolidate vram tracking support
Flag PG_log_dirty is for paging log dirty support, not vram tracking support. However data structure sh_dirty_vram{} and function paging_log_dirty_range() designed for vram tracking support, are guarded with PG_log_dirty. We release both from PG_log_dirty, and also move paging_log_dirty_range(), remamed with p2m_log_dirty_range(), into p2m.c, where it logically belongs. Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx> --- v1 -> v2: - rename paging_log_dirty_range() into p2m_log_dirty_range(), and move it to p2m.c, where it logically belongs - remove #ifdef / #endif - change int to unsigned int --- xen/arch/x86/include/asm/p2m.h | 4 ++++ xen/arch/x86/include/asm/paging.h | 32 +++++++++++++------------------ xen/arch/x86/mm/hap/hap.c | 4 ++-- xen/arch/x86/mm/p2m.c | 28 +++++++++++++++++++++++++++ xen/arch/x86/mm/paging.c | 32 ------------------------------- 5 files changed, 47 insertions(+), 53 deletions(-) diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h index 3b860e30c3..1856cc396c 100644 --- a/xen/arch/x86/include/asm/p2m.h +++ b/xen/arch/x86/include/asm/p2m.h @@ -1110,6 +1110,10 @@ static inline int p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt, #endif /* CONFIG_HVM */ +/* get the dirty bitmap for a specific range of pfns */ +void p2m_log_dirty_range(struct domain *d, unsigned long begin_pfn, + unsigned long nr, uint8_t *dirty_bitmap); + #endif /* _XEN_ASM_X86_P2M_H */ /* diff --git a/xen/arch/x86/include/asm/paging.h b/xen/arch/x86/include/asm/paging.h index 768b077ebd..1b0694bb36 100644 --- a/xen/arch/x86/include/asm/paging.h +++ b/xen/arch/x86/include/asm/paging.h @@ -133,13 +133,20 @@ struct paging_mode { (DIV_ROUND_UP(PADDR_BITS - PAGE_SHIFT - (PAGE_SHIFT + 3), \ PAGE_SHIFT - ilog2(sizeof(mfn_t))) + 1) -#if PG_log_dirty +#ifdef CONFIG_HVM +/* VRAM dirty tracking support */ +struct sh_dirty_vram { + unsigned long begin_pfn; + unsigned long end_pfn; +#ifdef CONFIG_SHADOW_PAGING + paddr_t *sl1ma; + uint8_t *dirty_bitmap; + s_time_t last_dirty; +#endif +}; +#endif -/* get the dirty bitmap for a specific range of pfns */ -void paging_log_dirty_range(struct domain *d, - unsigned long begin_pfn, - unsigned long nr, - uint8_t *dirty_bitmap); +#if PG_log_dirty /* log dirty initialization */ void paging_log_dirty_init(struct domain *d, const struct log_dirty_ops *ops); @@ -171,19 +178,6 @@ bool paging_mfn_is_dirty(const struct domain *d, mfn_t gmfn); #define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \ (LOGDIRTY_NODE_ENTRIES-1)) -#ifdef CONFIG_HVM -/* VRAM dirty tracking support */ -struct sh_dirty_vram { - unsigned long begin_pfn; - unsigned long end_pfn; -#ifdef CONFIG_SHADOW_PAGING - paddr_t *sl1ma; - uint8_t *dirty_bitmap; - s_time_t last_dirty; -#endif -}; -#endif - #else /* !PG_log_dirty */ static inline void paging_log_dirty_init(struct domain *d, diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 4aec98109d..2f69ff9c7b 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -42,7 +42,7 @@ * Create a dirty vram range on demand when some [begin_pfn:begin_pfn+nr] is * first encountered. * Collect the guest_dirty bitmask, a bit mask of the dirty vram pages, by - * calling paging_log_dirty_range(), which interrogates each vram + * calling p2m_log_dirty_range(), which interrogates each vram * page's p2m type looking for pages that have been made writable. */ @@ -119,7 +119,7 @@ int hap_track_dirty_vram(struct domain *d, p2m_flush_hardware_cached_dirty(d); /* get the bitmap */ - paging_log_dirty_range(d, begin_pfn, nr_frames, dirty_bitmap); + p2m_log_dirty_range(d, begin_pfn, nr_frames, dirty_bitmap); domain_unpause(d); } diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index e802f2e4e6..e2a00a0efd 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -2161,6 +2161,34 @@ int relinquish_p2m_mapping(struct domain *d) return rc; } +void p2m_log_dirty_range(struct domain *d, unsigned long begin_pfn, + unsigned long nr, uint8_t *dirty_bitmap) +{ + struct p2m_domain *p2m = p2m_get_hostp2m(d); + unsigned int i; + unsigned long pfn; + + /* + * Set l1e entries of P2M table to be read-only. + * + * On first write, it page faults, its entry is changed to read-write, + * and on retry the write succeeds. + * + * We populate dirty_bitmap by looking for entries that have been + * switched to read-write. + */ + + p2m_lock(p2m); + + for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ ) + if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) ) + dirty_bitmap[i >> 3] |= (1 << (i & 7)); + + p2m_unlock(p2m); + + guest_flush_tlb_mask(d, d->dirty_cpumask); +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c index 3aafb0990b..65455a6867 100644 --- a/xen/arch/x86/mm/paging.c +++ b/xen/arch/x86/mm/paging.c @@ -583,38 +583,6 @@ static int paging_log_dirty_op(struct domain *d, return rv; } -#ifdef CONFIG_HVM -void paging_log_dirty_range(struct domain *d, - unsigned long begin_pfn, - unsigned long nr, - uint8_t *dirty_bitmap) -{ - struct p2m_domain *p2m = p2m_get_hostp2m(d); - int i; - unsigned long pfn; - - /* - * Set l1e entries of P2M table to be read-only. - * - * On first write, it page faults, its entry is changed to read-write, - * and on retry the write succeeds. - * - * We populate dirty_bitmap by looking for entries that have been - * switched to read-write. - */ - - p2m_lock(p2m); - - for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ ) - if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) ) - dirty_bitmap[i >> 3] |= (1 << (i & 7)); - - p2m_unlock(p2m); - - guest_flush_tlb_mask(d, d->dirty_cpumask); -} -#endif - /* * Callers must supply log_dirty_ops for the log dirty code to call. This * function usually is invoked when paging is enabled. Check shadow_enable() -- 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |