[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 10/14] privcmd: refer to autotranslate not PVH in arch interfaces / comments.



On Thu, 4 Oct 2012, Ian Campbell wrote:
> PVH is X86 specific while this functionality is also used on ARM.

I really think that this should be merged with the orignal PVH patch


> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> ---
>  arch/x86/xen/mmu.c    |   10 +++++-----
>  drivers/xen/privcmd.c |   46 ++++++++++++++++++++++------------------------
>  include/xen/xen-ops.h |    8 ++++----
>  3 files changed, 31 insertions(+), 33 deletions(-)
> 
> diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
> index 26097cb..3e781f9 100644
> --- a/arch/x86/xen/mmu.c
> +++ b/arch/x86/xen/mmu.c
> @@ -2506,7 +2506,7 @@ struct pvh_remap_data {
>       unsigned long fgmfn;            /* foreign domain's gmfn */
>       pgprot_t prot;
>       domid_t  domid;
> -     struct xen_pvh_pfn_info *pvhinfop;
> +     struct xen_remap_mfn_info *pvhinfop;
>  };
>  
>  static int pvh_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 
> @@ -2514,7 +2514,7 @@ static int pvh_map_pte_fn(pte_t *ptep, pgtable_t token, 
> unsigned long addr,
>  {
>       int rc;
>       struct pvh_remap_data *remapp = data;
> -     struct xen_pvh_pfn_info *pvhp = remapp->pvhinfop;
> +     struct xen_remap_mfn_info *pvhp = remapp->pvhinfop;
>       unsigned long pfn = page_to_pfn(pvhp->pi_paga[pvhp->pi_next_todo++]);
>       pte_t pteval = pte_mkspecial(pfn_pte(pfn, remapp->prot));
>  
> @@ -2531,7 +2531,7 @@ static int pvh_map_pte_fn(pte_t *ptep, pgtable_t token, 
> unsigned long addr,
>  static int pvh_remap_gmfn_range(struct vm_area_struct *vma,
>                               unsigned long addr, unsigned long mfn, int nr,
>                               pgprot_t prot, unsigned domid,
> -                             struct xen_pvh_pfn_info *pvhp)
> +                             struct xen_remap_mfn_info *pvhp)
>  {
>       int err;
>       struct pvh_remap_data pvhdata;
> @@ -2574,7 +2574,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct 
> *vma,
>                              unsigned long addr,
>                              unsigned long mfn, int nr,
>                              pgprot_t prot, unsigned domid,
> -                            struct xen_pvh_pfn_info *pvhp)
> +                            struct xen_remap_mfn_info *pvhp)
>  
>  {
>       struct remap_data rmd;
> @@ -2629,7 +2629,7 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
>  
>  /* Returns: Number of pages unmapped */
>  int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
> -                            struct xen_pvh_pfn_info *pvhp)
> +                            struct xen_remap_mfn_info *pvhp)
>  {
>       int count = 0;
>  
> diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
> index bf4d62a..ebf3c8d 100644
> --- a/drivers/xen/privcmd.c
> +++ b/drivers/xen/privcmd.c
> @@ -265,18 +265,16 @@ struct mmap_batch_state {
>       xen_pfn_t __user *user_mfn;
>  };
>  
> -/* PVH dom0 fyi: if domU being created is PV, then mfn is mfn(addr on bus). 
> If
> - * it's PVH then mfn is pfn (input to HAP). */
>  static int mmap_batch_fn(void *data, void *state)
>  {
>       xen_pfn_t *mfnp = data;
>       struct mmap_batch_state *st = state;
>       struct vm_area_struct *vma = st->vma;
> -     struct xen_pvh_pfn_info *pvhp = vma ? vma->vm_private_data : NULL;
> +     struct xen_remap_mfn_info *info = vma ? vma->vm_private_data : NULL;
>       int ret;
>  
>       ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
> -                                      st->vma->vm_page_prot, st->domain, 
> pvhp);
> +                                      st->vma->vm_page_prot, st->domain, 
> info);
>  
>       /* Store error code for second pass. */
>       *(st->err++) = ret;
> @@ -315,33 +313,33 @@ static int mmap_return_errors_v1(void *data, void 
> *state)
>  /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
>   * the vma with the page info to use later.
>   * Returns: 0 if success, otherwise -errno
> - */ 
> -static int pvh_privcmd_resv_pfns(struct vm_area_struct *vma, int numpgs)
> + */
> +static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
>  {
>       int rc;
> -     struct xen_pvh_pfn_info *pvhp;
> +     struct xen_remap_mfn_info *info;
>  
> -     pvhp = kzalloc(sizeof(struct xen_pvh_pfn_info), GFP_KERNEL);
> -     if (pvhp == NULL)
> +     info = kzalloc(sizeof(struct xen_remap_mfn_info), GFP_KERNEL);
> +     if (info == NULL)
>               return -ENOMEM;
>  
> -     pvhp->pi_paga = kcalloc(numpgs, sizeof(pvhp->pi_paga[0]), GFP_KERNEL);
> -     if (pvhp->pi_paga == NULL) {
> -             kfree(pvhp);
> +     info->pi_paga = kcalloc(numpgs, sizeof(info->pi_paga[0]), GFP_KERNEL);
> +     if (info->pi_paga == NULL) {
> +             kfree(info);
>               return -ENOMEM;
>       }
>  
> -     rc = alloc_xenballooned_pages(numpgs, pvhp->pi_paga, 0);
> +     rc = alloc_xenballooned_pages(numpgs, info->pi_paga, 0);
>       if (rc != 0) {
>               pr_warn("%s Could not alloc %d pfns rc:%d\n", __FUNCTION__, 
>                       numpgs, rc);
> -             kfree(pvhp->pi_paga);
> -             kfree(pvhp);
> +             kfree(info->pi_paga);
> +             kfree(info);
>               return -ENOMEM;
>       }
> -     pvhp->pi_num_pgs = numpgs;
> +     info->pi_num_pgs = numpgs;
>       BUG_ON(vma->vm_private_data != (void *)1);
> -     vma->vm_private_data = pvhp;
> +     vma->vm_private_data = info;
>  
>       return 0;
>  }
> @@ -414,7 +412,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, 
> int version)
>               goto out;
>       }
>       if (xen_feature(XENFEAT_auto_translated_physmap)) {
> -             if ((ret = pvh_privcmd_resv_pfns(vma, m.num))) {
> +             if ((ret = alloc_empty_pages(vma, m.num))) {
>                       up_write(&mm->mmap_sem);
>                       goto out;
>               }
> @@ -490,16 +488,16 @@ static long privcmd_ioctl(struct file *file,
>  static void privcmd_close(struct vm_area_struct *vma)
>  {
>       int count;
> -     struct xen_pvh_pfn_info *pvhp = vma ? vma->vm_private_data : NULL;
> +     struct xen_remap_mfn_info *info = vma ? vma->vm_private_data : NULL;
>  
> -     if (!pvhp || !xen_feature(XENFEAT_auto_translated_physmap))
> +     if (!info || !xen_feature(XENFEAT_auto_translated_physmap))
>               return;
>  
> -     count = xen_unmap_domain_mfn_range(vma, pvhp);
> +     count = xen_unmap_domain_mfn_range(vma, info);
>       while (count--)
> -             free_xenballooned_pages(1, &pvhp->pi_paga[count]);
> -     kfree(pvhp->pi_paga);
> -     kfree(pvhp);
> +             free_xenballooned_pages(1, &info->pi_paga[count]);
> +     kfree(info->pi_paga);
> +     kfree(info);
>  }
>  
>  static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
> diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
> index 6c5ad83..2f3cb06 100644
> --- a/include/xen/xen-ops.h
> +++ b/include/xen/xen-ops.h
> @@ -24,16 +24,16 @@ int xen_create_contiguous_region(unsigned long vstart, 
> unsigned int order,
>  void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
>  
>  struct vm_area_struct;
> -struct xen_pvh_pfn_info;
> +struct xen_remap_mfn_info;
>  int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
>                              unsigned long addr,
>                              unsigned long mfn, int nr,
>                              pgprot_t prot, unsigned domid,
> -                            struct xen_pvh_pfn_info *pvhp);
> +                            struct xen_remap_mfn_info *pvhp);
>  int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
> -                            struct xen_pvh_pfn_info *pvhp);
> +                            struct xen_remap_mfn_info *pvhp);
>  
> -struct xen_pvh_pfn_info {
> +struct xen_remap_mfn_info {
>       struct page **pi_paga;          /* pfn info page array */
>       int           pi_num_pgs;
>       int           pi_next_todo;
> -- 
> 1.7.2.5
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.