[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 09/11] xen: Add DOMCTL to limit the number of event channels a domain may use
From: David Vrabel <david.vrabel@xxxxxxxxxx> Add XEN_DOMCTL_set_max_evtchn which may be used during domain creation to set the maximum event channel port a domain may use. This may be used to limit the amount of Xen resources (global mapping space and xenheap) that a domain may use for event channels. A domain that does not have a limit set may use all the event channels supported by the event channel ABI in use. Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> --- tools/flask/policy/policy/modules/xen/xen.if | 2 +- tools/flask/policy/policy/modules/xen/xen.te | 2 +- xen/common/domctl.c | 8 ++++++++ xen/common/event_channel.c | 7 ++++++- xen/include/public/domctl.h | 13 +++++++++++++ xen/include/xen/sched.h | 1 + xen/xsm/flask/hooks.c | 3 +++ xen/xsm/flask/policy/access_vectors | 2 ++ 8 files changed, 35 insertions(+), 3 deletions(-) diff --git a/tools/flask/policy/policy/modules/xen/xen.if b/tools/flask/policy/policy/modules/xen/xen.if index 97af0a8..dedc035 100644 --- a/tools/flask/policy/policy/modules/xen/xen.if +++ b/tools/flask/policy/policy/modules/xen/xen.if @@ -49,7 +49,7 @@ define(`create_domain_common', ` getdomaininfo hypercall setvcpucontext setextvcpucontext getscheduler getvcpuinfo getvcpuextstate getaddrsize getaffinity setaffinity }; - allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim }; + allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim set_max_evtchn }; allow $1 $2:security check_context; allow $1 $2:shadow enable; allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op }; diff --git a/tools/flask/policy/policy/modules/xen/xen.te b/tools/flask/policy/policy/modules/xen/xen.te index c89ce28..bb59fe8 100644 --- a/tools/flask/policy/policy/modules/xen/xen.te +++ b/tools/flask/policy/policy/modules/xen/xen.te @@ -76,7 +76,7 @@ allow dom0_t dom0_t:domain { getpodtarget setpodtarget set_misc_info set_virq_handler }; allow dom0_t dom0_t:domain2 { - set_cpuid gettsc settsc setscheduler + set_cpuid gettsc settsc setscheduler set_max_evtchn }; allow dom0_t dom0_t:resource { add remove }; diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 9760d50..870eef1 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -863,6 +863,14 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) } break; + case XEN_DOMCTL_set_max_evtchn: + { + d->max_evtchn_port = min_t(unsigned int, + op->u.set_max_evtchn.max_port, + INT_MAX); + } + break; + default: ret = arch_do_domctl(op, d, u_domctl); break; diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 0c0bbe4..34efd24 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -168,10 +168,14 @@ static int get_free_port(struct domain *d) return -EINVAL; for ( port = 0; port_is_valid(d, port); port++ ) + { + if ( port > d->max_evtchn_port ) + return -ENOSPC; if ( evtchn_from_port(d, port)->state == ECS_FREE ) return port; + } - if ( port == d->max_evtchns ) + if ( port == d->max_evtchns || port > d->max_evtchn_port ) return -ENOSPC; if ( !group_from_port(d, port) ) @@ -1230,6 +1234,7 @@ void evtchn_check_pollers(struct domain *d, unsigned int port) int evtchn_init(struct domain *d) { evtchn_2l_init(d); + d->max_evtchn_port = INT_MAX; d->evtchn = alloc_evtchn_bucket(d, 0); if ( !d->evtchn ) diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 4c5b2bb..d4e479f 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -852,6 +852,17 @@ struct xen_domctl_set_broken_page_p2m { typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t); +/* + * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port + * number the guest may use. Use this limit the amount of resources + * (global mapping space, xenheap) a guest may use for event channels. + */ +struct xen_domctl_set_max_evtchn { + uint32_t max_port; +}; +typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t); + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -920,6 +931,7 @@ struct xen_domctl { #define XEN_DOMCTL_set_broken_page_p2m 67 #define XEN_DOMCTL_setnodeaffinity 68 #define XEN_DOMCTL_getnodeaffinity 69 +#define XEN_DOMCTL_set_max_evtchn 70 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -975,6 +987,7 @@ struct xen_domctl { struct xen_domctl_set_access_required access_required; struct xen_domctl_audit_p2m audit_p2m; struct xen_domctl_set_virq_handler set_virq_handler; + struct xen_domctl_set_max_evtchn set_max_evtchn; struct xen_domctl_gdbsx_memio gdbsx_guest_memio; struct xen_domctl_set_broken_page_p2m set_broken_page_p2m; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index c9c3bc2..8c7d963 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -291,6 +291,7 @@ struct domain struct evtchn *evtchn; /* first bucket only */ struct evtchn **evtchn_group[NR_EVTCHN_GROUPS]; /* all other buckets */ unsigned int max_evtchns; + unsigned int max_evtchn_port; spinlock_t event_lock; const struct evtchn_port_ops *evtchn_port_ops; struct evtchn_fifo_domain *evtchn_fifo; diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index fa0589a..b1e2593 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -727,6 +727,9 @@ static int flask_domctl(struct domain *d, int cmd) case XEN_DOMCTL_audit_p2m: return current_has_perm(d, SECCLASS_HVM, HVM__AUDIT_P2M); + case XEN_DOMCTL_set_max_evtchn: + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_MAX_EVTCHN); + default: printk("flask_domctl: Unknown op %d\n", cmd); return -EPERM; diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors index 5dfe13b..1fbe241 100644 --- a/xen/xsm/flask/policy/access_vectors +++ b/xen/xsm/flask/policy/access_vectors @@ -194,6 +194,8 @@ class domain2 setscheduler # XENMEM_claim_pages setclaim +# XEN_DOMCTL_set_max_evtchn + set_max_evtchn } # Similar to class domain, but primarily contains domctls related to HVM domains -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |