[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2] xen: support priv-mapping in an HVM tools domain



On 01/11/17 12:31, Paul Durrant wrote:
> If the domain has XENFEAT_auto_translated_physmap then use of the PV-
> specific HYPERVISOR_mmu_update hypercall is clearly incorrect.
> 
> This patch adds checks in xen_remap_domain_gfn_array() and
> xen_unmap_domain_gfn_array() which call through to the approprate
> xlate_mmu function if the feature is present.
> 
> This patch also moves xen_remap_domain_gfn_range() into the PV-only MMU
> code and #ifdefs the (only) calling code in privcmd accordingly.
> 
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> Cc: Juergen Gross <jgross@xxxxxxxx>
> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> Cc: Ingo Molnar <mingo@xxxxxxxxxx>
> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
> ---
>  arch/x86/xen/mmu.c    | 36 +++++++++++++++++-------------------
>  arch/x86/xen/mmu_pv.c | 11 +++++++++++
>  drivers/xen/privcmd.c | 17 +++++++++++++----
>  include/xen/xen-ops.h |  7 +++++++
>  4 files changed, 48 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
> index 3e15345abfe7..01837c36e293 100644
> --- a/arch/x86/xen/mmu.c
> +++ b/arch/x86/xen/mmu.c
> @@ -91,12 +91,12 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t 
> token,
>       return 0;
>  }
>  
> -static int do_remap_gfn(struct vm_area_struct *vma,
> -                     unsigned long addr,
> -                     xen_pfn_t *gfn, int nr,
> -                     int *err_ptr, pgprot_t prot,
> -                     unsigned domid,
> -                     struct page **pages)
> +int xen_remap_gfn(struct vm_area_struct *vma,
> +               unsigned long addr,
> +               xen_pfn_t *gfn, int nr,
> +               int *err_ptr, pgprot_t prot,
> +               unsigned int domid,
> +               struct page **pages)
>  {
>       int err = 0;
>       struct remap_data rmd;
> @@ -166,36 +166,34 @@ static int do_remap_gfn(struct vm_area_struct *vma,
>       return err < 0 ? err : mapped;
>  }
>  
> -int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
> -                            unsigned long addr,
> -                            xen_pfn_t gfn, int nr,
> -                            pgprot_t prot, unsigned domid,
> -                            struct page **pages)
> -{
> -     return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
> -}
> -EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
> -
>  int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
>                              unsigned long addr,
>                              xen_pfn_t *gfn, int nr,
>                              int *err_ptr, pgprot_t prot,
>                              unsigned domid, struct page **pages)
>  {
> +     if (xen_feature(XENFEAT_auto_translated_physmap))
> +             return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
> +                                              prot, domid, pages);
> +
>       /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
>        * and the consequences later is quite hard to detect what the actual
>        * cause of "wrong memory was mapped in".
>        */
>       BUG_ON(err_ptr == NULL);
> -     return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
> +     return xen_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid,
> +                          pages);
>  }
>  EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
>  
>  /* Returns: 0 success */
>  int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
> -                            int numpgs, struct page **pages)
> +                            int nr, struct page **pages)
>  {
> -     if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
> +     if (xen_feature(XENFEAT_auto_translated_physmap))
> +             return xen_xlate_unmap_gfn_range(vma, nr, pages);
> +
> +     if (!pages)
>               return 0;
>  
>       return -EINVAL;
> diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
> index 71495f1a86d7..4974d8a6c2b4 100644
> --- a/arch/x86/xen/mmu_pv.c
> +++ b/arch/x86/xen/mmu_pv.c
> @@ -2670,3 +2670,14 @@ phys_addr_t paddr_vmcoreinfo_note(void)
>               return __pa(vmcoreinfo_note);
>  }
>  #endif /* CONFIG_KEXEC_CORE */
> +
> +int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
> +                            unsigned long addr,
> +                            xen_pfn_t gfn, int nr,
> +                            pgprot_t prot, unsigned int domid,
> +                            struct page **pages)
> +{
> +     return xen_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid,
> +                          pages);
> +}
> +EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
> diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
> index feca75b07fdd..b58a1719b606 100644
> --- a/drivers/xen/privcmd.c
> +++ b/drivers/xen/privcmd.c
> @@ -215,6 +215,8 @@ static int traverse_pages_block(unsigned nelem, size_t 
> size,
>       return ret;
>  }
>  
> +#ifdef CONFIG_XEN_PV
> +
>  struct mmap_gfn_state {
>       unsigned long va;
>       struct vm_area_struct *vma;
> @@ -261,10 +263,6 @@ static long privcmd_ioctl_mmap(struct file *file, void 
> __user *udata)
>       LIST_HEAD(pagelist);
>       struct mmap_gfn_state state;
>  
> -     /* We only support privcmd_ioctl_mmap_batch for auto translated. */
> -     if (xen_feature(XENFEAT_auto_translated_physmap))
> -             return -ENOSYS;
> -

CONFIG_XEN_PV doesn't mean we don't support running as HVM guest.

I don't think you can remove this test, nor do I think it makes sense
to put privcmd_ioctl_mmap() inside an #ifdef.

You should rather add something like privcmd_ioctl_mmap_pv() with all
the PV-specific functionality and call this here. For !CONFIG_XEN_PV
it can e.g. just BUG().


Juergen

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.