|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 09/11] xen: Add DOMCTL to limit the number of event channels a domain may use
>>> On 04.10.13 at 13:56, David Vrabel <david.vrabel@xxxxxxxxxx> wrote:
> On 02/10/13 18:06, David Vrabel wrote:
>> On 02/10/13 17:35, David Vrabel wrote:
>>>
>>> --- a/xen/xsm/flask/hooks.c
>>> +++ b/xen/xsm/flask/hooks.c
>>> @@ -727,6 +727,9 @@ static int flask_domctl(struct domain *d, int cmd)
>>> case XEN_DOMCTL_audit_p2m:
>>> return current_has_perm(d, SECCLASS_HVM, HVM__AUDIT_P2M);
>>>
>>> + case XEN_DOMCTL_set_max_evtchn:
>>> + return current_has_perm(d, SECCLASS_DOMAIN,
> DOMAIN__SET_MAX_EVTCHN);#
>>
>> Sorry, I forgot to try a build with XSM and FLASK enabled. This should
>> have been SECCLASS_DOMAIN2 and DOMAIN2__SET_MAX_EVTCHN.
>
> And here's a fixed version of the patch.
>
> Daniel, can you review the XSM parts of this, please?
>
> 8<-----------------------------------
> xen: Add DOMCTL to limit the number of event channels a domain may use
>
> Add XEN_DOMCTL_set_max_evtchn which may be used during domain creation to
> set the maximum event channel port a domain may use. This may be used to
> limit the amount of Xen resources (global mapping space and xenheap) that
> a domain may use for event channels.
>
> A domain that does not have a limit set may use all the event channels
> supported by the event channel ABI in use.
>
> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Just to clarify once more: This is only for the non-XSM parts; I'm
relying on Daniel to do the review on that front.
Jan
> Cc: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> ---
> tools/flask/policy/policy/mls | 2 +-
> tools/flask/policy/policy/modules/xen/xen.if | 2 +-
> tools/flask/policy/policy/modules/xen/xen.te | 2 +-
> xen/common/domctl.c | 8 ++++++++
> xen/common/event_channel.c | 7 ++++++-
> xen/include/public/domctl.h | 13 +++++++++++++
> xen/include/xen/sched.h | 1 +
> xen/xsm/flask/hooks.c | 3 +++
> xen/xsm/flask/policy/access_vectors | 2 ++
> 9 files changed, 36 insertions(+), 4 deletions(-)
>
> diff --git a/tools/flask/policy/policy/mls b/tools/flask/policy/policy/mls
> index 9290a76..fb603cd 100644
> --- a/tools/flask/policy/policy/mls
> +++ b/tools/flask/policy/policy/mls
> @@ -74,7 +74,7 @@ mlsconstrain domain { getaffinity getdomaininfo getvcpuinfo
> getvcpucontext getad
> ((l1 dom l2) or (t1 == mls_priv));
>
> # all the domain "write" ops
> -mlsconstrain domain { setvcpucontext pause unpause resume create max_vcpus
> destroy setaffinity scheduler setdomainmaxmem setdomainhandle setdebugging
> hypercall settime set_target shutdown setaddrsize trigger setextvcpucontext }
> +mlsconstrain domain { setvcpucontext pause unpause resume create max_vcpus
> destroy setaffinity scheduler setdomainmaxmem setdomainhandle setdebugging
> hypercall settime set_target shutdown setaddrsize trigger setextvcpucontext
> set_max_evtchn }
> ((l1 eq l2) or (t1 == mls_priv));
>
> # This is incomplete - similar constraints must be written for all classes
> diff --git a/tools/flask/policy/policy/modules/xen/xen.if
> b/tools/flask/policy/policy/modules/xen/xen.if
> index 97af0a8..63e40f0 100644
> --- a/tools/flask/policy/policy/modules/xen/xen.if
> +++ b/tools/flask/policy/policy/modules/xen/xen.if
> @@ -48,7 +48,7 @@ define(`create_domain_common', `
> allow $1 $2:domain { create max_vcpus setdomainmaxmem setaddrsize
> getdomaininfo hypercall setvcpucontext setextvcpucontext
> getscheduler getvcpuinfo getvcpuextstate getaddrsize
> - getaffinity setaffinity };
> + getaffinity setaffinity set_max_evtchn };
> allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim };
> allow $1 $2:security check_context;
> allow $1 $2:shadow enable;
> diff --git a/tools/flask/policy/policy/modules/xen/xen.te
> b/tools/flask/policy/policy/modules/xen/xen.te
> index c89ce28..5f9de5c 100644
> --- a/tools/flask/policy/policy/modules/xen/xen.te
> +++ b/tools/flask/policy/policy/modules/xen/xen.te
> @@ -73,7 +73,7 @@ allow dom0_t dom0_t:domain {
> getdomaininfo getvcpuinfo getvcpucontext setdomainmaxmem setdomainhandle
> setdebugging hypercall settime setaddrsize getaddrsize trigger
> getextvcpucontext setextvcpucontext getvcpuextstate setvcpuextstate
> - getpodtarget setpodtarget set_misc_info set_virq_handler
> + getpodtarget setpodtarget set_misc_info set_virq_handler set_max_evtchn
> };
> allow dom0_t dom0_t:domain2 {
> set_cpuid gettsc settsc setscheduler
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> index 9760d50..870eef1 100644
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -863,6 +863,14 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
> u_domctl)
> }
> break;
>
> + case XEN_DOMCTL_set_max_evtchn:
> + {
> + d->max_evtchn_port = min_t(unsigned int,
> + op->u.set_max_evtchn.max_port,
> + INT_MAX);
> + }
> + break;
> +
> default:
> ret = arch_do_domctl(op, d, u_domctl);
> break;
> diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
> index 0c0bbe4..34efd24 100644
> --- a/xen/common/event_channel.c
> +++ b/xen/common/event_channel.c
> @@ -168,10 +168,14 @@ static int get_free_port(struct domain *d)
> return -EINVAL;
>
> for ( port = 0; port_is_valid(d, port); port++ )
> + {
> + if ( port > d->max_evtchn_port )
> + return -ENOSPC;
> if ( evtchn_from_port(d, port)->state == ECS_FREE )
> return port;
> + }
>
> - if ( port == d->max_evtchns )
> + if ( port == d->max_evtchns || port > d->max_evtchn_port )
> return -ENOSPC;
>
> if ( !group_from_port(d, port) )
> @@ -1230,6 +1234,7 @@ void evtchn_check_pollers(struct domain *d, unsigned
> int port)
> int evtchn_init(struct domain *d)
> {
> evtchn_2l_init(d);
> + d->max_evtchn_port = INT_MAX;
>
> d->evtchn = alloc_evtchn_bucket(d, 0);
> if ( !d->evtchn )
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index 4c5b2bb..d4e479f 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -852,6 +852,17 @@ struct xen_domctl_set_broken_page_p2m {
> typedef struct xen_domctl_set_broken_page_p2m
> xen_domctl_set_broken_page_p2m_t;
> DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
>
> +/*
> + * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
> + * number the guest may use. Use this limit the amount of resources
> + * (global mapping space, xenheap) a guest may use for event channels.
> + */
> +struct xen_domctl_set_max_evtchn {
> + uint32_t max_port;
> +};
> +typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
> +
> struct xen_domctl {
> uint32_t cmd;
> #define XEN_DOMCTL_createdomain 1
> @@ -920,6 +931,7 @@ struct xen_domctl {
> #define XEN_DOMCTL_set_broken_page_p2m 67
> #define XEN_DOMCTL_setnodeaffinity 68
> #define XEN_DOMCTL_getnodeaffinity 69
> +#define XEN_DOMCTL_set_max_evtchn 70
> #define XEN_DOMCTL_gdbsx_guestmemio 1000
> #define XEN_DOMCTL_gdbsx_pausevcpu 1001
> #define XEN_DOMCTL_gdbsx_unpausevcpu 1002
> @@ -975,6 +987,7 @@ struct xen_domctl {
> struct xen_domctl_set_access_required access_required;
> struct xen_domctl_audit_p2m audit_p2m;
> struct xen_domctl_set_virq_handler set_virq_handler;
> + struct xen_domctl_set_max_evtchn set_max_evtchn;
> struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
> struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
> struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index ab7be82..0da0096 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -291,6 +291,7 @@ struct domain
> struct evtchn *evtchn; /* first bucket only
> */
> struct evtchn **evtchn_group[NR_EVTCHN_GROUPS]; /* all other buckets
> */
> unsigned int max_evtchns;
> + unsigned int max_evtchn_port;
> spinlock_t event_lock;
> const struct evtchn_port_ops *evtchn_port_ops;
> struct evtchn_fifo_domain *evtchn_fifo;
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index fa0589a..b1e2593 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -727,6 +727,9 @@ static int flask_domctl(struct domain *d, int cmd)
> case XEN_DOMCTL_audit_p2m:
> return current_has_perm(d, SECCLASS_HVM, HVM__AUDIT_P2M);
>
> + case XEN_DOMCTL_set_max_evtchn:
> + return current_has_perm(d, SECCLASS_DOMAIN2,
> DOMAIN2__SET_MAX_EVTCHN);
> +
> default:
> printk("flask_domctl: Unknown op %d\n", cmd);
> return -EPERM;
> diff --git a/xen/xsm/flask/policy/access_vectors
> b/xen/xsm/flask/policy/access_vectors
> index 5dfe13b..1fbe241 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -194,6 +194,8 @@ class domain2
> setscheduler
> # XENMEM_claim_pages
> setclaim
> +# XEN_DOMCTL_set_max_evtchn
> + set_max_evtchn
> }
>
> # Similar to class domain, but primarily contains domctls related to HVM
> domains
> --
> 1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |