[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC Patch v3 01/18] copy the correct page to memory



On Fri, 2014-09-05 at 17:10 +0800, Wen Congyang wrote:
> From: Hong Tao <bobby.hong@xxxxxxxxxx>
> 
> apply_batch() only handles MAX_BATCH_SIZE pages at one time. If
> there is some bogus/unmapped/allocate-only/broken page, we will
> skip it. So when we call apply_batch() again, the first page's
> index is curbatch - invalid_pages. invalid_pages stores the number
> of bogus/unmapped/allocate-only/broken pages we have found.
> 
> In many cases, invalid_pages is 0, so we don't catch this error.
> 
> Signed-off-by: Hong Tao <bobby.hong@xxxxxxxxxx>
> Signed-off-by: Wen Congyang <wency@xxxxxxxxxxxxxx>

Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

Hopefully all of this stuff will soon be replaced by migration v2 but we
may as well apply this fix in the meantime. CCing Andy C on the unlikely
chance that he carried this misbehaviour over into v2.

> ---
>  tools/libxc/xc_domain_restore.c | 24 +++++++++++++++++++-----
>  1 file changed, 19 insertions(+), 5 deletions(-)
> 
> diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
> index b9a56d5..bec716c 100644
> --- a/tools/libxc/xc_domain_restore.c
> +++ b/tools/libxc/xc_domain_restore.c
> @@ -1106,7 +1106,7 @@ static int pagebuf_get(xc_interface *xch, struct 
> restore_ctx *ctx,
>  static int apply_batch(xc_interface *xch, uint32_t dom, struct restore_ctx 
> *ctx,
>                         xen_pfn_t* region_mfn, unsigned long* pfn_type, int 
> pae_extended_cr3,
>                         struct xc_mmu* mmu,
> -                       pagebuf_t* pagebuf, int curbatch)
> +                       pagebuf_t* pagebuf, int curbatch, int *invalid_pages)
>  {
>      int i, j, curpage, nr_mfns;
>      int k, scount;
> @@ -1121,6 +1121,12 @@ static int apply_batch(xc_interface *xch, uint32_t 
> dom, struct restore_ctx *ctx,
>      struct domain_info_context *dinfo = &ctx->dinfo;
>      int* pfn_err = NULL;
>      int rc = -1;
> +    int local_invalid_pages = 0;
> +    /* We have handled curbatch pages before this batch, and there are
> +     * *invalid_pages pages that are not in pagebuf->pages. So the first
> +     * page for this page is (curbatch - *invalid_pages) page.
> +     */
> +    int first_page = curbatch - *invalid_pages;
>  
>      unsigned long mfn, pfn, pagetype;
>  
> @@ -1293,10 +1299,13 @@ static int apply_batch(xc_interface *xch, uint32_t 
> dom, struct restore_ctx *ctx,
>          pfn      = pagebuf->pfn_types[i + curbatch] & 
> ~XEN_DOMCTL_PFINFO_LTAB_MASK;
>          pagetype = pagebuf->pfn_types[i + curbatch] &  
> XEN_DOMCTL_PFINFO_LTAB_MASK;
>  
> -        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB 
> +        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
>               || pagetype == XEN_DOMCTL_PFINFO_XALLOC)
> +        {
> +            local_invalid_pages++;
>              /* a bogus/unmapped/allocate-only page: skip it */
>              continue;
> +        }
>  
>          if ( pagetype == XEN_DOMCTL_PFINFO_BROKEN )
>          {
> @@ -1306,6 +1315,8 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
> struct restore_ctx *ctx,
>                        "dom=%d, pfn=%lx\n", dom, pfn);
>                  goto err_mapped;
>              }
> +
> +            local_invalid_pages++;
>              continue;
>          }
>  
> @@ -1344,7 +1355,7 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
> struct restore_ctx *ctx,
>              }
>          }
>          else
> -            memcpy(page, pagebuf->pages + (curpage + curbatch) * PAGE_SIZE,
> +            memcpy(page, pagebuf->pages + (first_page + curpage) * PAGE_SIZE,
>                     PAGE_SIZE);
>  
>          pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
> @@ -1418,6 +1429,7 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
> struct restore_ctx *ctx,
>      } /* end of 'batch' for loop */
>  
>      rc = nraces;
> +    *invalid_pages += local_invalid_pages;
>  
>    err_mapped:
>      munmap(region_base, j*PAGE_SIZE);
> @@ -1621,7 +1633,7 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
> uint32_t dom,
>   loadpages:
>      for ( ; ; )
>      {
> -        int j, curbatch;
> +        int j, curbatch, invalid_pages;
>  
>          xc_report_progress_step(xch, n, dinfo->p2m_size);
>  
> @@ -1665,11 +1677,13 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
> uint32_t dom,
>  
>          /* break pagebuf into batches */
>          curbatch = 0;
> +        invalid_pages = 0;
>          while ( curbatch < j ) {
>              int brc;
>  
>              brc = apply_batch(xch, dom, ctx, region_mfn, pfn_type,
> -                              pae_extended_cr3, mmu, &pagebuf, curbatch);
> +                              pae_extended_cr3, mmu, &pagebuf, curbatch,
> +                              &invalid_pages);
>              if ( brc < 0 )
>                  goto out;
>  



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.