[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 02/10] xsm bits for vNUMA hypercalls



On Fri, Jul 18, 2014 at 01:50:01AM -0400, Elena Ufimtseva wrote:
> Define xsm_get_vnumainfo hypercall used for domain which
> wish to receive vnuma topology. Add xsm hook for
> XEN_DOMCTL_setvnumainfo. Also adds basic policies.

CC-ing Daniel - the XSM maintainer.

> 
> Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
> ---
>  xen/common/memory.c                 |    7 +++++++
>  xen/include/xsm/dummy.h             |    6 ++++++
>  xen/include/xsm/xsm.h               |    7 +++++++
>  xen/xsm/dummy.c                     |    1 +
>  xen/xsm/flask/hooks.c               |   10 ++++++++++
>  xen/xsm/flask/policy/access_vectors |    4 ++++
>  6 files changed, 35 insertions(+)
> 
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index 925b9fc..9a87aa8 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -988,6 +988,13 @@ long do_memory_op(unsigned long cmd, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          if ( (d = rcu_lock_domain_by_any_id(topology.domid)) == NULL )
>              return -ESRCH;
>  
> +        rc = xsm_get_vnumainfo(XSM_PRIV, d);
> +        if ( rc )
> +        {
> +            rcu_unlock_domain(d);
> +            return rc;
> +        }
> +
>          rc = -EOPNOTSUPP;
>          if ( d->vnuma == NULL )
>              goto vnumainfo_out;
> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> index c5aa316..4262fd8 100644
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -317,6 +317,12 @@ static XSM_INLINE int xsm_set_pod_target(XSM_DEFAULT_ARG 
> struct domain *d)
>      return xsm_default_action(action, current->domain, d);
>  }
>  
> +static XSM_INLINE int xsm_get_vnumainfo(XSM_DEFAULT_ARG struct domain *d)
> +{
> +    XSM_ASSERT_ACTION(XSM_PRIV);
> +    return xsm_default_action(action, current->domain, d);
> +}
> +
>  #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
>  static XSM_INLINE int xsm_get_device_group(XSM_DEFAULT_ARG uint32_t 
> machine_bdf)
>  {
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index a85045d..c7ec562 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -169,6 +169,7 @@ struct xsm_operations {
>      int (*unbind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq 
> *bind);
>      int (*ioport_permission) (struct domain *d, uint32_t s, uint32_t e, 
> uint8_t allow);
>      int (*ioport_mapping) (struct domain *d, uint32_t s, uint32_t e, uint8_t 
> allow);
> +    int (*get_vnumainfo) (struct domain *d);
>  #endif
>  };
>  
> @@ -653,6 +654,12 @@ static inline int xsm_ioport_mapping (xsm_default_t def, 
> struct domain *d, uint3
>  {
>      return xsm_ops->ioport_mapping(d, s, e, allow);
>  }
> +
> +static inline int xsm_get_vnumainfo (xsm_default_t def, struct domain *d)
> +{
> +    return xsm_ops->get_vnumainfo(d);
> +}
> +
>  #endif /* CONFIG_X86 */
>  
>  #endif /* XSM_NO_WRAPPERS */
> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
> index c95c803..0826a8b 100644
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -85,6 +85,7 @@ void xsm_fixup_ops (struct xsm_operations *ops)
>      set_to_dummy_if_null(ops, iomem_permission);
>      set_to_dummy_if_null(ops, iomem_mapping);
>      set_to_dummy_if_null(ops, pci_config_permission);
> +    set_to_dummy_if_null(ops, get_vnumainfo);
>  
>  #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
>      set_to_dummy_if_null(ops, get_device_group);
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index f2f59ea..00efba1 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -404,6 +404,11 @@ static int flask_claim_pages(struct domain *d)
>      return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETCLAIM);
>  }
>  
> +static int flask_get_vnumainfo(struct domain *d)
> +{
> +    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__GET_VNUMAINFO);
> +}
> +
>  static int flask_console_io(struct domain *d, int cmd)
>  {
>      u32 perm;
> @@ -715,6 +720,9 @@ static int flask_domctl(struct domain *d, int cmd)
>      case XEN_DOMCTL_cacheflush:
>          return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__CACHEFLUSH);
>  
> +    case XEN_DOMCTL_setvnumainfo:
> +        return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN2__SET_VNUMAINFO);
> +
>      default:
>          printk("flask_domctl: Unknown op %d\n", cmd);
>          return -EPERM;
> @@ -1552,6 +1560,8 @@ static struct xsm_operations flask_ops = {
>      .hvm_param_nested = flask_hvm_param_nested,
>  
>      .do_xsm_op = do_flask_op,
> +    .get_vnumainfo = flask_get_vnumainfo,
> +
>  #ifdef CONFIG_COMPAT
>      .do_compat_op = compat_flask_op,
>  #endif
> diff --git a/xen/xsm/flask/policy/access_vectors 
> b/xen/xsm/flask/policy/access_vectors
> index 32371a9..d279841 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -200,6 +200,10 @@ class domain2
>      cacheflush
>  # Creation of the hardware domain when it is not dom0
>      create_hardware_domain
> +# XEN_DOMCTL_setvnumainfo
> +    set_vnumainfo
> +# XENMEM_getvnumainfo
> +    get_vnumainfo
>  }
>  
>  # Similar to class domain, but primarily contains domctls related to HVM 
> domains
> -- 
> 1.7.10.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.