[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/2] xen/xsm: add hooks for claim and debug port



On Thu, 2013-04-11 at 16:35 +0100, Daniel De Graaf wrote:
> This replaces the IS_PRIV checks on these newly introduced operations

grep finds 33 instances of IS_PRIV (which includes IS_PRIV_FOR, and the
#defines and the ones you are removing here). Should we be working to
remove all of those and replace with xsm?. They aren't all h/call perms
checks so I guest not? 

I just fear you are going to face a never ending trickle of people
adding new uses of IS_PRIV, perhaps a quick rename might help?

> with equivalent XSM hooks, and adds FLASK access vectors for them.
> 
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Keir Fraser <keir@xxxxxxx>
> Cc: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
> ---
>  tools/flask/policy/policy/modules/xen/xen.if |  2 +-
>  tools/flask/policy/policy/modules/xen/xen.te |  3 +++
>  xen/arch/x86/physdev.c                       | 15 +++++++++------
>  xen/common/memory.c                          | 15 ++++++++-------
>  xen/include/xsm/dummy.h                      | 18 ++++++++++++++++++
>  xen/include/xsm/xsm.h                        | 18 ++++++++++++++++++
>  xen/xsm/dummy.c                              |  3 +++
>  xen/xsm/flask/hooks.c                        | 20 ++++++++++++++++++++
>  xen/xsm/flask/policy/access_vectors          | 18 ++++++++++++++----
>  xen/xsm/flask/policy/security_classes        |  1 +
>  10 files changed, 95 insertions(+), 18 deletions(-)
> 
> diff --git a/tools/flask/policy/policy/modules/xen/xen.if 
> b/tools/flask/policy/policy/modules/xen/xen.if
> index 2ce2212..fbb329d 100644
> --- a/tools/flask/policy/policy/modules/xen/xen.if
> +++ b/tools/flask/policy/policy/modules/xen/xen.if
> @@ -49,7 +49,7 @@ define(`create_domain_common', `
>                         getdomaininfo hypercall setvcpucontext 
> setextvcpucontext
>                         getscheduler getvcpuinfo getvcpuextstate getaddrsize
>                         getvcpuaffinity setvcpuaffinity };
> -       allow $1 $2:domain2 { set_cpuid settsc setscheduler };
> +       allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim };
>         allow $1 $2:security check_context;
>         allow $1 $2:shadow enable;
>         allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage 
> mmuext_op };
> diff --git a/tools/flask/policy/policy/modules/xen/xen.te 
> b/tools/flask/policy/policy/modules/xen/xen.te
> index 454e27e..c852214 100644
> --- a/tools/flask/policy/policy/modules/xen/xen.te
> +++ b/tools/flask/policy/policy/modules/xen/xen.te
> @@ -64,6 +64,9 @@ allow dom0_t xen_t:xen {
>         getidle debug getcpuinfo heap pm_op mca_op lockprof cpupool_op tmem_op
>         tmem_control getscheduler setscheduler
>  };
> +allow dom0_t xen_t:xen2 {
> +       debug_port_op
> +};
>  allow dom0_t xen_t:mmu memorymap;
> 
>  # Allow dom0 to use these domctls on itself. For domctls acting on other
> diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
> index 876ac9d..13296cc 100644
> --- a/xen/arch/x86/physdev.c
> +++ b/xen/arch/x86/physdev.c
> @@ -691,12 +691,15 @@ ret_t do_physdev_op(int cmd, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>      case PHYSDEVOP_dbgp_op: {
>          struct physdev_dbgp_op op;
> 
> -        if ( !IS_PRIV(v->domain) )
> -            ret = -EPERM;
> -        else if ( copy_from_guest(&op, arg, 1) )
> -            ret = -EFAULT;
> -        else
> -            ret = dbgp_op(&op);
> +        ret = -EFAULT;
> +        if ( copy_from_guest(&op, arg, 1) )
> +            break;
> +
> +        ret = xsm_physdev_dbgp_op(XSM_PRIV, &op);
> +        if ( ret )
> +            break;
> +
> +        ret = dbgp_op(&op);
>          break;
>      }
> 
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index 68501d1..3239d53 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -712,9 +712,6 @@ long do_memory_op(unsigned long cmd, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>      }
> 
>      case XENMEM_claim_pages:
> -        if ( !IS_PRIV(current->domain) )
> -            return -EPERM;
> -
>          if ( copy_from_guest(&reservation, arg, 1) )
>              return -EFAULT;
> 
> @@ -731,17 +728,21 @@ long do_memory_op(unsigned long cmd, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          if ( d == NULL )
>              return -EINVAL;
> 
> -        rc = domain_set_outstanding_pages(d, reservation.nr_extents);
> +        rc = xsm_claim_pages(XSM_PRIV, d);
> +
> +        if ( !rc )
> +            rc = domain_set_outstanding_pages(d, reservation.nr_extents);
> 
>          rcu_unlock_domain(d);
> 
>          break;
> 
>      case XENMEM_get_outstanding_pages:
> -        if ( !IS_PRIV(current->domain) )
> -            return -EPERM;
> +        rc = xsm_xenmem_get_outstanding_pages(XSM_PRIV);
> +
> +        if ( !rc )
> +            rc = get_outstanding_claims();
> 
> -        rc = get_outstanding_claims();
>          break;
> 
>      default:
> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> index 025936a..7986cda 100644
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -247,6 +247,24 @@ static XSM_INLINE int 
> xsm_memory_pin_page(XSM_DEFAULT_ARG struct domain *d1, str
>      return xsm_default_action(action, d1, d2);
>  }
> 
> +static XSM_INLINE int xsm_claim_pages(XSM_DEFAULT_ARG struct domain *d)
> +{
> +    XSM_ASSERT_ACTION(XSM_PRIV);
> +    return xsm_default_action(action, current->domain, d);
> +}
> +
> +static XSM_INLINE int xsm_xenmem_get_outstanding_pages(XSM_DEFAULT_VOID)
> +{
> +    XSM_ASSERT_ACTION(XSM_PRIV);
> +    return xsm_default_action(action, current->domain, NULL);
> +}
> +
> +static XSM_INLINE int xsm_physdev_dbgp_op(XSM_DEFAULT_ARG void *op)
> +{
> +    XSM_ASSERT_ACTION(XSM_PRIV);
> +    return xsm_default_action(action, current->domain, NULL);
> +}
> +
>  static XSM_INLINE int xsm_evtchn_unbound(XSM_DEFAULT_ARG struct domain *d, 
> struct evtchn *chn,
>                                           domid_t id2)
>  {
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index cba744c..d8ee0c8 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -92,6 +92,9 @@ struct xsm_operations {
>      int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct 
> page_info *page);
>      int (*add_to_physmap) (struct domain *d1, struct domain *d2);
>      int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
> +    int (*claim_pages) (struct domain *d);
> +    int (*xenmem_get_outstanding_pages) (void);
> +    int (*physdev_dbgp_op) (void *op);
> 
>      int (*console_io) (struct domain *d, int cmd);
> 
> @@ -348,6 +351,21 @@ static inline int xsm_remove_from_physmap(xsm_default_t 
> def, struct domain *d1,
>      return xsm_ops->remove_from_physmap(d1, d2);
>  }
> 
> +static inline int xsm_claim_pages(xsm_default_t def, struct domain *d)
> +{
> +    return xsm_ops->claim_pages(d);
> +}
> +
> +static inline int xsm_xenmem_get_outstanding_pages(xsm_default_t def)
> +{
> +    return xsm_ops->xenmem_get_outstanding_pages();
> +}
> +
> +static inline int xsm_physdev_dbgp_op(xsm_default_t def, void *op)
> +{
> +    return xsm_ops->physdev_dbgp_op(op);
> +}
> +
>  static inline int xsm_console_io (xsm_default_t def, struct domain *d, int 
> cmd)
>  {
>      return xsm_ops->console_io(d, cmd);
> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
> index 6f1e0b4..7235d82 100644
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -66,6 +66,9 @@ void xsm_fixup_ops (struct xsm_operations *ops)
>      set_to_dummy_if_null(ops, memory_adjust_reservation);
>      set_to_dummy_if_null(ops, memory_stat_reservation);
>      set_to_dummy_if_null(ops, memory_pin_page);
> +    set_to_dummy_if_null(ops, claim_pages);
> +    set_to_dummy_if_null(ops, xenmem_get_outstanding_pages);
> +    set_to_dummy_if_null(ops, physdev_dbgp_op);
> 
>      set_to_dummy_if_null(ops, console_io);
> 
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 29a78dd..ca42891 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -417,6 +417,23 @@ static int flask_memory_pin_page(struct domain *d1, 
> struct domain *d2,
>      return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PINPAGE);
>  }
> 
> +static int flask_claim_pages(struct domain *d)
> +{
> +    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETCLAIM);
> +}
> +
> +static int flask_xenmem_get_outstanding_pages(void)
> +{
> +    return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN,
> +                                XEN__HEAP, NULL);
> +}
> +
> +static int flask_physdev_dbgp_op(void *op)
> +{
> +    return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN2,
> +                                XEN2__DEBUG_PORT_OP, NULL);
> +}
> +
>  static int flask_console_io(struct domain *d, int cmd)
>  {
>      u32 perm;
> @@ -1473,6 +1490,9 @@ static struct xsm_operations flask_ops = {
>      .memory_adjust_reservation = flask_memory_adjust_reservation,
>      .memory_stat_reservation = flask_memory_stat_reservation,
>      .memory_pin_page = flask_memory_pin_page,
> +    .claim_pages = flask_claim_pages,
> +    .xenmem_get_outstanding_pages = flask_xenmem_get_outstanding_pages,
> +    .physdev_dbgp_op = flask_physdev_dbgp_op,
> 
>      .console_io = flask_console_io,
> 
> diff --git a/xen/xsm/flask/policy/access_vectors 
> b/xen/xsm/flask/policy/access_vectors
> index 36cbacf..05bb52e 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -3,9 +3,9 @@
>  #
>  # class class_name { permission_name ... }
> 
> -# Class xen consists of dom0-only operations dealing with the hypervisor 
> itself.
> -# Unless otherwise specified, the source is the domain executing the 
> hypercall,
> -# and the target is the xen initial sid (type xen_t).
> +# Classes xen and xen2 consist of dom0-only operations dealing with the
> +# hypervisor itself.  Unless otherwise specified, the source is the domain
> +# executing the hypercall, and the target is the xen initial sid (type 
> xen_t).
>  class xen
>  {
>  # XENPF_settime
> @@ -54,7 +54,7 @@ class xen
>      debug
>  # XEN_SYSCTL_getcpuinfo, XENPF_get_cpu_version, XENPF_get_cpuinfo
>      getcpuinfo
> -# XEN_SYSCTL_availheap
> +# XEN_SYSCTL_availheap, XENMEM_get_outstanding_pages
>      heap
>  # XEN_SYSCTL_get_pmstat, XEN_SYSCTL_pm_op, XENPF_set_processor_pminfo,
>  # XENPF_core_parking
> @@ -75,6 +75,14 @@ class xen
>      setscheduler
>  }
> 
> +# This is a continuation of class xen, since only 32 permissions can be 
> defined
> +# per class
> +class xen2
> +{
> +# PHYSDEVOP_dbgp_op
> +    debug_port_op
> +}
> +
>  # Classes domain and domain2 consist of operations that a domain performs on
>  # another domain or on itself.  Unless otherwise specified, the source is the
>  # domain executing the hypercall, and the target is the domain being 
> operated on
> @@ -190,6 +198,8 @@ class domain2
>      settsc
>  # XEN_DOMCTL_scheduler_op with XEN_DOMCTL_SCHEDOP_putinfo
>      setscheduler
> +# XENMEM_claim_pages
> +    setclaim
>  }
> 
>  # Similar to class domain, but primarily contains domctls related to HVM 
> domains
> diff --git a/xen/xsm/flask/policy/security_classes 
> b/xen/xsm/flask/policy/security_classes
> index ef134a7..ca191db 100644
> --- a/xen/xsm/flask/policy/security_classes
> +++ b/xen/xsm/flask/policy/security_classes
> @@ -8,6 +8,7 @@
>  # for userspace object managers
> 
>  class xen
> +class xen2
>  class domain
>  class domain2
>  class hvm
> --
> 1.8.1.4
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.