[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/2] x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()



On 12/08/15 15:19, Jan Beulich wrote:
> Rather than dirtying a page when establishing a (permanent) mapping,
> dirty it when the page gets unmapped, or - if still mapped - on the
> final iteration of a save operation. (Transient mappings continue to
> get dirtied upon getting mapped, to avoid the overhead of tracking.)
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1556,6 +1556,8 @@ int hvm_domain_initialise(struct domain 
>      INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
>      spin_lock_init(&d->arch.hvm_domain.irq_lock);
>      spin_lock_init(&d->arch.hvm_domain.uc_lock);
> +    spin_lock_init(&d->arch.hvm_domain.write_map.lock);
> +    INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
>  
>      hvm_init_cacheattr_region_list(d);
>  
> @@ -3667,6 +3669,11 @@ int hvm_virtual_to_linear_addr(
>      return 1;
>  }
>  
> +struct hvm_write_map {
> +    struct list_head list;
> +    struct page_info *page;
> +};
> +
>  /* On non-NULL return, we leave this function holding an additional 
>   * ref on the underlying mfn, if any */
>  static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
> @@ -3694,15 +3701,30 @@ static void *_hvm_map_guest_frame(unsign
>  
>      if ( writable )
>      {
> -        if ( !p2m_is_discard_write(p2mt) )
> -            paging_mark_dirty(d, page_to_mfn(page));
> -        else
> +        if ( unlikely(p2m_is_discard_write(p2mt)) )
>              *writable = 0;
> +        else if ( !permanent )
> +            paging_mark_dirty(d, page_to_mfn(page));
>      }
>  
>      if ( !permanent )
>          return __map_domain_page(page);
>  
> +    if ( writable && *writable )
> +    {
> +        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
> +
> +        if ( !track )
> +        {
> +            put_page(page);
> +            return NULL;
> +        }
> +        track->page = page;
> +        spin_lock(&d->arch.hvm_domain.write_map.lock);
> +        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);

Map and unmap of non-permenant pages will happen in a stacked fashion,
so putting non-persistent pages on the head of the list will be more
efficient when walking the list for removal.

> +        spin_unlock(&d->arch.hvm_domain.write_map.lock);
> +    }
> +
>      map = __map_domain_page_global(page);
>      if ( !map )
>          put_page(page);
> @@ -3725,18 +3747,45 @@ void *hvm_map_guest_frame_ro(unsigned lo
>  void hvm_unmap_guest_frame(void *p, bool_t permanent)
>  {
>      unsigned long mfn;
> +    struct page_info *page;
>  
>      if ( !p )
>          return;
>  
>      mfn = domain_page_map_to_mfn(p);
> +    page = mfn_to_page(mfn);
>  
>      if ( !permanent )
>          unmap_domain_page(p);
>      else
> +    {
> +        struct domain *d = page_get_owner(page);
> +        struct hvm_write_map *track;
> +
>          unmap_domain_page_global(p);
> +        spin_lock(&d->arch.hvm_domain.write_map.lock);
> +        list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
> +            if ( track->page == page )
> +            {
> +                paging_mark_dirty(d, mfn);
> +                list_del(&track->list);
> +                xfree(track);
> +                break;
> +            }
> +        spin_unlock(&d->arch.hvm_domain.write_map.lock);
> +    }
> +
> +    put_page(page);
> +}
> +
> +void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
> +{
> +    struct hvm_write_map *track;
>  
> -    put_page(mfn_to_page(mfn));
> +    spin_lock(&d->arch.hvm_domain.write_map.lock);
> +    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
> +        paging_mark_dirty(d, page_to_mfn(track->page));

This is potentially a long running operation.  It might be easier to
ASSERT() an upper limit of mapped pages than to make this restartable.

> +    spin_unlock(&d->arch.hvm_domain.write_map.lock);
>  }
>  
>  static void *hvm_map_entry(unsigned long va, bool_t *writable)
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -29,6 +29,7 @@
>  #include <asm/hvm/nestedhvm.h>
>  #include <xen/numa.h>
>  #include <xsm/xsm.h>
> +#include <public/sched.h> /* SHUTDOWN_suspend */
>  
>  #include "mm-locks.h"
>  
> @@ -422,6 +423,14 @@ static int paging_log_dirty_op(struct do
>  
>      if ( !resuming )
>      {
> +        /*
> +         * Mark dirty all currently write-mapped pages on the final iteration
> +         * of a HVM save operation.
> +         */
> +        if ( has_hvm_container_domain(d) && d->is_shut_down &&
> +             d->shutdown_code == SHUTDOWN_suspend )
> +            hvm_mapped_guest_frames_mark_dirty(d);

I am not sure whether this is useful.  There are situations such as
memory snapshot where it is insufficient, as the domain doesn't suspend.

It would probably be better to hook this off a specific request from the
toolstack, as the migration code is in a far better position to know
whether this information is needed or can be deferred to the next iteration.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.