[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 01/18] xen: reinstate previously unused XENMEM_remove_from_physmap hypercall



On Thu, 2012-01-12 at 23:35 +0000, Daniel De Graaf wrote:
> From: Alex Zeffertt <alex.zeffertt@xxxxxxxxxxxxx>
> 
> This patch reinstates the XENMEM_remove_from_physmap hypercall
> which was removed in 19041:ee62aaafff46 because it was not used.
> 
> However, is now needed in order to support xenstored stub domains.
> The xenstored stub domain is not priviliged like dom0 and so cannot
> unilaterally map the xenbus page of other guests into it's address
> space.  Therefore, before creating a domU the domain builder needs to
> seed its grant table with a grant ref allowing the xenstored stub
> domain to access the new domU's xenbus page.
> 
> At present domU's do not start with their grant table mapped.
> Instead it gets mapped when the guest requests a grant table from
> the hypervisor.
> 
> In order to seed the grant table, the domain builder first needs to
> map it into dom0 address space.  But the hypercall to do this
> requires a gpfn (guest pfn), which is an mfn for PV guest, but a pfn
> for HVM guests.  Therfore, in order to seed the grant table of an
> HVM guest, dom0 needs to *temporarily* map it into the guest's
> "physical" address space.
> 
> Hence the need to reinstate the XENMEM_remove_from_physmap hypercall.
> 
> Signed-off-by: Alex Zeffertt <alex.zeffertt@xxxxxxxxxxxxx>
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>

Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> (modulo Jan's comment
about ordering in xlat.lst)

BTW, since Alex and Diego have subsequently left Citrix you could take
my Acked-by's in this series as Signed-of-by on behalf of Citrix. I've
no idea if that's necessary though, I expect not.

Ian.

> ---
>  xen/arch/ia64/xen/mm.c          |   34 ++++++++++++++++++++++++++++++++++
>  xen/arch/x86/mm.c               |   35 +++++++++++++++++++++++++++++++++++
>  xen/arch/x86/x86_64/compat/mm.c |   14 ++++++++++++++
>  xen/include/public/memory.h     |   16 ++++++++++++++++
>  xen/include/xlat.lst            |    1 +
>  xen/include/xsm/xsm.h           |    6 ++++++
>  xen/xsm/dummy.c                 |    6 ++++++
>  xen/xsm/flask/hooks.c           |    6 ++++++
>  8 files changed, 118 insertions(+), 0 deletions(-)
> 
> diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
> index 84f6a61..a40f96a 100644
> --- a/xen/arch/ia64/xen/mm.c
> +++ b/xen/arch/ia64/xen/mm.c
> @@ -3448,6 +3448,40 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
>          break;
>      }
>  
> +    case XENMEM_remove_from_physmap:
> +    {
> +        struct xen_remove_from_physmap xrfp;
> +        unsigned long mfn;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&xrfp, arg, 1) )
> +            return -EFAULT;
> +
> +        rc = rcu_lock_target_domain_by_id(xrfp.domid, &d);
> +        if ( rc != 0 )
> +            return rc;
> +
> +        if ( xsm_remove_from_physmap(current->domain, d) )
> +        {
> +            rcu_unlock_domain(d);
> +            return -EPERM;
> +        }
> +
> +        domain_lock(d);
> +
> +        mfn = gmfn_to_mfn(d, xrfp.gpfn);
> +
> +        if ( mfn_valid(mfn) )
> +            guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
> +
> +        domain_unlock(d);
> +
> +        rcu_unlock_domain(d);
> +
> +        break;
> +    }
> +
> +
>      case XENMEM_machine_memory_map:
>      {
>          struct xen_memory_map memmap;
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 1f996e0..39cc3c0 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4871,6 +4871,41 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
>          return rc;
>      }
>  
> +    case XENMEM_remove_from_physmap:
> +    {
> +        struct xen_remove_from_physmap xrfp;
> +        unsigned long mfn;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&xrfp, arg, 1) )
> +            return -EFAULT;
> +
> +        rc = rcu_lock_target_domain_by_id(xrfp.domid, &d);
> +        if ( rc != 0 )
> +            return rc;
> +
> +        if ( xsm_remove_from_physmap(current->domain, d) )
> +        {
> +            rcu_unlock_domain(d);
> +            return -EPERM;
> +        }
> +
> +        domain_lock(d);
> +
> +        mfn = get_gfn_untyped(d, xrfp.gpfn);
> +
> +        if ( mfn_valid(mfn) )
> +            guest_physmap_remove_page(d, xrfp.gpfn, mfn, PAGE_ORDER_4K);
> +
> +        put_gfn(d, xrfp.gpfn);
> +
> +        domain_unlock(d);
> +
> +        rcu_unlock_domain(d);
> +
> +        break;
> +    }
> +
>      case XENMEM_set_memory_map:
>      {
>          struct xen_foreign_memory_map fmap;
> diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
> index bea94fe..dde5430 100644
> --- a/xen/arch/x86/x86_64/compat/mm.c
> +++ b/xen/arch/x86/x86_64/compat/mm.c
> @@ -82,6 +82,20 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) 
> arg)
>          break;
>      }
>  
> +    case XENMEM_remove_from_physmap:
> +    {
> +        struct compat_remove_from_physmap cmp;
> +        struct xen_remove_from_physmap *nat = (void 
> *)COMPAT_ARG_XLAT_VIRT_BASE;
> +
> +        if ( copy_from_guest(&cmp, arg, 1) )
> +            return -EFAULT;
> +
> +        XLAT_remove_from_physmap(nat, &cmp);
> +        rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
> +
> +        break;
> +    }
> +
>      case XENMEM_set_memory_map:
>      {
>          struct compat_foreign_memory_map cmp;
> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
> index c5b78a8..308deff 100644
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -229,6 +229,22 @@ struct xen_add_to_physmap {
>  typedef struct xen_add_to_physmap xen_add_to_physmap_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
>  
> +/*
> + * Unmaps the page appearing at a particular GPFN from the specified guest's
> + * pseudophysical address space.
> + * arg == addr of xen_remove_from_physmap_t.
> + */
> +#define XENMEM_remove_from_physmap      15
> +struct xen_remove_from_physmap {
> +    /* Which domain to change the mapping for. */
> +    domid_t domid;
> +
> +    /* GPFN of the current mapping of the page. */
> +    xen_pfn_t     gpfn;
> +};
> +typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
> +
>  /*** REMOVED ***/
>  /*#define XENMEM_translate_gpfn_list  8*/
>  
> diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
> index 3d92175..ee1772c 100644
> --- a/xen/include/xlat.lst
> +++ b/xen/include/xlat.lst
> @@ -81,6 +81,7 @@
>  !    processor_power                 platform.h
>  ?    processor_px                    platform.h
>  !    psd_package                     platform.h
> +!    remove_from_physmap             memory.h
>  ?    xenpf_pcpuinfo                  platform.h
>  ?    xenpf_pcpu_version              platform.h
>  !    sched_poll                      sched.h
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index df6cec2..566c808 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -169,6 +169,7 @@ struct xsm_operations {
>      int (*update_va_mapping) (struct domain *d, struct domain *f, 
>                                                              l1_pgentry_t 
> pte);
>      int (*add_to_physmap) (struct domain *d1, struct domain *d2);
> +    int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
>      int (*sendtrigger) (struct domain *d);
>      int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq 
> *bind);
>      int (*unbind_pt_irq) (struct domain *d);
> @@ -738,6 +739,11 @@ static inline int xsm_add_to_physmap(struct domain *d1, 
> struct domain *d2)
>      return xsm_call(add_to_physmap(d1, d2));
>  }
>  
> +static inline int xsm_remove_from_physmap(struct domain *d1, struct domain 
> *d2)
> +{
> +    return xsm_call(remove_from_physmap(d1, d2));
> +}
> +
>  static inline int xsm_sendtrigger(struct domain *d)
>  {
>      return xsm_call(sendtrigger(d));
> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
> index 4bbfbff..65daa4e 100644
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -529,6 +529,11 @@ static int dummy_add_to_physmap (struct domain *d1, 
> struct domain *d2)
>      return 0;
>  }
>  
> +static int dummy_remove_from_physmap (struct domain *d1, struct domain *d2)
> +{
> +    return 0;
> +}
> +
>  static int dummy_sendtrigger (struct domain *d)
>  {
>      return 0;
> @@ -690,6 +695,7 @@ void xsm_fixup_ops (struct xsm_operations *ops)
>      set_to_dummy_if_null(ops, mmu_machphys_update);
>      set_to_dummy_if_null(ops, update_va_mapping);
>      set_to_dummy_if_null(ops, add_to_physmap);
> +    set_to_dummy_if_null(ops, remove_from_physmap);
>      set_to_dummy_if_null(ops, sendtrigger);
>      set_to_dummy_if_null(ops, bind_pt_irq);
>      set_to_dummy_if_null(ops, pin_mem_cacheattr);
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 0d35767..a2020a9 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -1283,6 +1283,11 @@ static int flask_add_to_physmap(struct domain *d1, 
> struct domain *d2)
>      return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
>  }
>  
> +static int flask_remove_from_physmap(struct domain *d1, struct domain *d2)
> +{
> +    return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
> +}
> +
>  static int flask_sendtrigger(struct domain *d)
>  {
>      return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, 
> DOMAIN__TRIGGER);
> @@ -1550,6 +1555,7 @@ static struct xsm_operations flask_ops = {
>      .mmu_machphys_update = flask_mmu_machphys_update,
>      .update_va_mapping = flask_update_va_mapping,
>      .add_to_physmap = flask_add_to_physmap,
> +    .remove_from_physmap = flask_remove_from_physmap,
>      .sendtrigger = flask_sendtrigger,
>      .get_device_group = flask_get_device_group,
>      .test_assign_device = flask_test_assign_device,



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.