[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 12/13] Nested Virtualization: vram
I still nack this idea; logdirty (and almost everything else) should deal in N1 PFNs. And in particular there isn't a per-p2m framebuffer so there shouldn't be per-p2m vram tracking. See the thread from a previous revision, ending in message-id <20100913161756.GM3844@xxxxxxxxxxxxxxxxxxxxxxx> Cheers, Tim. At 18:45 +0000 on 12 Nov (1289587503), Christoph Egger wrote: > > -- > ---to satisfy European Law for business letters: > Advanced Micro Devices GmbH > Einsteinring 24, 85609 Dornach b. Muenchen > Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd > Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen > Registergericht Muenchen, HRB Nr. 43632 > # HG changeset patch > # User cegger > # Date 1289584017 -3600 > Move dirty_vram from struct hvm_domain to struct p2m_domain > > Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx> > > diff -r 8eab8e9ce98e -r 2aafa38f2390 xen/arch/x86/mm/hap/hap.c > --- a/xen/arch/x86/mm/hap/hap.c > +++ b/xen/arch/x86/mm/hap/hap.c > @@ -58,7 +58,8 @@ > static int hap_enable_vram_tracking(struct domain *d) > { > int i; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct p2m_domain *p2m = p2m_get_hostp2m(d); > + struct sh_dirty_vram *dirty_vram = p2m->dirty_vram; > > if ( !dirty_vram ) > return -EINVAL; > @@ -70,7 +71,7 @@ static int hap_enable_vram_tracking(stru > > /* set l1e entries of P2M table to be read-only. */ > for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) > - p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty); > + p2m_change_type(p2m, i, p2m_ram_rw, p2m_ram_logdirty); > > flush_tlb_mask(&d->domain_dirty_cpumask); > return 0; > @@ -79,7 +80,8 @@ static int hap_enable_vram_tracking(stru > static int hap_disable_vram_tracking(struct domain *d) > { > int i; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct p2m_domain *p2m = p2m_get_hostp2m(d); > + struct sh_dirty_vram *dirty_vram = p2m->dirty_vram; > > if ( !dirty_vram ) > return -EINVAL; > @@ -90,7 +92,7 @@ static int hap_disable_vram_tracking(str > > /* set l1e entries of P2M table with normal mode */ > for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) > - p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw); > + p2m_change_type(p2m, i, p2m_ram_logdirty, p2m_ram_rw); > > flush_tlb_mask(&d->domain_dirty_cpumask); > return 0; > @@ -99,14 +101,15 @@ static int hap_disable_vram_tracking(str > static void hap_clean_vram_tracking(struct domain *d) > { > int i; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct p2m_domain *p2m = p2m_get_hostp2m(d); > + struct sh_dirty_vram *dirty_vram = p2m->dirty_vram; > > if ( !dirty_vram ) > return; > > /* set l1e entries of P2M table to be read-only. */ > for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) > - p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty); > + p2m_change_type(p2m, i, p2m_ram_rw, p2m_ram_logdirty); > > flush_tlb_mask(&d->domain_dirty_cpumask); > } > @@ -124,7 +127,8 @@ int hap_track_dirty_vram(struct domain * > XEN_GUEST_HANDLE_64(uint8) dirty_bitmap) > { > long rc = 0; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct p2m_domain *p2m = p2m_get_hostp2m(d); > + struct sh_dirty_vram *dirty_vram = p2m->dirty_vram; > > if ( nr ) > { > @@ -149,7 +153,7 @@ int hap_track_dirty_vram(struct domain * > > dirty_vram->begin_pfn = begin_pfn; > dirty_vram->end_pfn = begin_pfn + nr; > - d->arch.hvm_domain.dirty_vram = dirty_vram; > + p2m->dirty_vram = dirty_vram; > hap_vram_tracking_init(d); > rc = paging_log_dirty_enable(d); > if (rc != 0) > @@ -171,7 +175,7 @@ int hap_track_dirty_vram(struct domain * > if ( paging_mode_log_dirty(d) && dirty_vram ) { > rc = paging_log_dirty_disable(d); > xfree(dirty_vram); > - dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; > + dirty_vram = p2m->dirty_vram = NULL; > } else > rc = 0; > } > @@ -182,7 +186,7 @@ param_fail: > if ( dirty_vram ) > { > xfree(dirty_vram); > - dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; > + dirty_vram = p2m->dirty_vram = NULL; > } > return rc; > } > @@ -228,12 +232,13 @@ static void hap_clean_dirty_bitmap(struc > > void hap_logdirty_init(struct domain *d) > { > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct p2m_domain *p2m = p2m_get_hostp2m(d); > + struct sh_dirty_vram *dirty_vram = p2m->dirty_vram; > if ( paging_mode_log_dirty(d) && dirty_vram ) > { > paging_log_dirty_disable(d); > xfree(dirty_vram); > - dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; > + dirty_vram = p2m->dirty_vram = NULL; > } > > /* Reinitialize logdirty mechanism */ > diff -r 8eab8e9ce98e -r 2aafa38f2390 xen/arch/x86/mm/shadow/common.c > --- a/xen/arch/x86/mm/shadow/common.c > +++ b/xen/arch/x86/mm/shadow/common.c > @@ -3159,6 +3159,7 @@ void shadow_teardown(struct domain *d) > struct vcpu *v; > mfn_t mfn; > struct page_info *unpaged_pagetable = NULL; > + struct p2m_domain *p2m = p2m_get_hostp2m(d); > > ASSERT(d->is_dying); > ASSERT(d != current->domain); > @@ -3254,18 +3255,18 @@ void shadow_teardown(struct domain *d) > * calls now that we've torn down the bitmap */ > d->arch.paging.mode &= ~PG_log_dirty; > > - if (d->arch.hvm_domain.dirty_vram) { > - xfree(d->arch.hvm_domain.dirty_vram->sl1ma); > - xfree(d->arch.hvm_domain.dirty_vram->dirty_bitmap); > - xfree(d->arch.hvm_domain.dirty_vram); > - d->arch.hvm_domain.dirty_vram = NULL; > + if (p2m->dirty_vram) { > + xfree(p2m->dirty_vram->sl1ma); > + xfree(p2m->dirty_vram->dirty_bitmap); > + xfree(p2m->dirty_vram); > + p2m->dirty_vram = NULL; > } > > shadow_unlock(d); > > /* Must be called outside the lock */ > if ( unpaged_pagetable ) > - shadow_free_p2m_page(p2m_get_hostp2m(d), unpaged_pagetable); > + shadow_free_p2m_page(p2m, unpaged_pagetable); > } > > void shadow_final_teardown(struct domain *d) > @@ -3609,8 +3610,8 @@ int shadow_track_dirty_vram(struct domai > int flush_tlb = 0; > unsigned long i; > p2m_type_t t; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > struct p2m_domain *p2m = p2m_get_hostp2m(d); > + struct sh_dirty_vram *dirty_vram = p2m->dirty_vram; > > if (end_pfn < begin_pfn > || begin_pfn > p2m->max_mapped_pfn > @@ -3624,11 +3625,12 @@ int shadow_track_dirty_vram(struct domai > || end_pfn != dirty_vram->end_pfn )) ) > { > /* Different tracking, tear the previous down. */ > - gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", > dirty_vram->begin_pfn, dirty_vram->end_pfn); > + gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", > + dirty_vram->begin_pfn, dirty_vram->end_pfn); > xfree(dirty_vram->sl1ma); > xfree(dirty_vram->dirty_bitmap); > xfree(dirty_vram); > - dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; > + dirty_vram = p2m->dirty_vram = NULL; > } > > if ( !nr ) > @@ -3652,7 +3654,7 @@ int shadow_track_dirty_vram(struct domai > goto out; > dirty_vram->begin_pfn = begin_pfn; > dirty_vram->end_pfn = end_pfn; > - d->arch.hvm_domain.dirty_vram = dirty_vram; > + p2m->dirty_vram = dirty_vram; > > if ( (dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL ) > goto out_dirty_vram; > @@ -3785,7 +3787,7 @@ out_sl1ma: > xfree(dirty_vram->sl1ma); > out_dirty_vram: > xfree(dirty_vram); > - dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; > + dirty_vram = p2m->dirty_vram = NULL; > > out: > shadow_unlock(d); > diff -r 8eab8e9ce98e -r 2aafa38f2390 xen/arch/x86/mm/shadow/multi.c > --- a/xen/arch/x86/mm/shadow/multi.c > +++ b/xen/arch/x86/mm/shadow/multi.c > @@ -515,7 +515,7 @@ _sh_propagate(struct vcpu *v, > guest_l1e_t guest_entry = { guest_intpte }; > shadow_l1e_t *sp = shadow_entry_ptr; > struct domain *d = v->domain; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct sh_dirty_vram *dirty_vram = p2m_get_hostp2m(d)->dirty_vram; > gfn_t target_gfn = guest_l1e_get_gfn(guest_entry); > u32 pass_thru_flags; > u32 gflags, sflags; > @@ -1107,7 +1107,7 @@ static inline void shadow_vram_get_l1e(s > mfn_t mfn = shadow_l1e_get_mfn(new_sl1e); > int flags = shadow_l1e_get_flags(new_sl1e); > unsigned long gfn; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct sh_dirty_vram *dirty_vram = p2m_get_hostp2m(d)->dirty_vram; > > if ( !dirty_vram /* tracking disabled? */ > || !(flags & _PAGE_RW) /* read-only mapping? */ > @@ -1138,7 +1138,7 @@ static inline void shadow_vram_put_l1e(s > mfn_t mfn = shadow_l1e_get_mfn(old_sl1e); > int flags = shadow_l1e_get_flags(old_sl1e); > unsigned long gfn; > - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; > + struct sh_dirty_vram *dirty_vram = p2m_get_hostp2m(d)->dirty_vram; > > if ( !dirty_vram /* tracking disabled? */ > || !(flags & _PAGE_RW) /* read-only mapping? */ > diff -r 8eab8e9ce98e -r 2aafa38f2390 xen/include/asm-x86/hvm/domain.h > --- a/xen/include/asm-x86/hvm/domain.h > +++ b/xen/include/asm-x86/hvm/domain.h > @@ -69,9 +69,6 @@ struct hvm_domain { > /* Memory ranges with pinned cache attributes. */ > struct list_head pinned_cacheattr_ranges; > > - /* VRAM dirty support. */ > - struct sh_dirty_vram *dirty_vram; > - > /* If one of vcpus of this domain is in no_fill_mode or > * mtrr/pat between vcpus is not the same, set is_in_uc_mode > */ > diff -r 8eab8e9ce98e -r 2aafa38f2390 xen/include/asm-x86/p2m.h > --- a/xen/include/asm-x86/p2m.h > +++ b/xen/include/asm-x86/p2m.h > @@ -174,6 +174,9 @@ struct p2m_domain { > /* Shadow translated domain: p2m mapping */ > pagetable_t phys_table; > > + /* VRAM dirty support. */ > + struct sh_dirty_vram *dirty_vram; > + > struct domain *domain; /* back pointer to domain */ > > /* Pages used to construct the p2m */ > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxxxxxxxx > http://lists.xensource.com/xen-devel -- Tim Deegan <Tim.Deegan@xxxxxxxxxx> Principal Software Engineer, Xen Platform Team Citrix Systems UK Ltd. (Company #02937203, SL9 0BG) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |