|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 6/7] xen: Allow hardare domain != dom0
>>> On 18.03.14 at 22:34, Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> wrote:
> This adds a hypervisor command line option "hardware_dom=" which takes a
> domain ID. When the domain with this ID is created, it will be used
> as the hardware domain.
>
> This is intended to be used when domain 0 is a dedicated stub domain for
> domain building, allowing the hardware domain to be de-privileged and
> act only as a driver domain.
>
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> Cc: Keir Fraser <keir@xxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
(assuming the LATE_HWDOM_ENABLE -> CONFIG_LATE_HWDOM
will get done)
> ---
> xen/arch/x86/domain_build.c | 4 +++-
> xen/arch/x86/setup.c | 2 ++
> xen/common/domain.c | 3 ++-
> xen/common/domctl.c | 40
> +++++++++++++++++++++++++++++++++++++
> xen/common/rangeset.c | 40
> +++++++++++++++++++++++++++++++++++++
> xen/include/xen/rangeset.h | 3 +++
> xen/include/xen/sched.h | 3 ++-
> xen/include/xsm/dummy.h | 6 ++++++
> xen/include/xsm/xsm.h | 6 ++++++
> xen/xsm/dummy.c | 2 ++
> xen/xsm/flask/hooks.c | 6 ++++++
> xen/xsm/flask/policy/access_vectors | 2 ++
> 12 files changed, 114 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
> index f75f6e7..a554d3b 100644
> --- a/xen/arch/x86/domain_build.c
> +++ b/xen/arch/x86/domain_build.c
> @@ -1150,7 +1150,9 @@ int __init construct_dom0(
> printk(" Xen warning: dom0 kernel broken ELF: %s\n",
> elf_check_broken(&elf));
>
> - iommu_hwdom_init(hardware_domain);
> + if ( is_hardware_domain(d) )
> + iommu_hwdom_init(d);
> +
> return 0;
>
> out:
> diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
> index 75cf212..f246ac3 100644
> --- a/xen/arch/x86/setup.c
> +++ b/xen/arch/x86/setup.c
> @@ -84,6 +84,8 @@ unsigned long __initdata highmem_start;
> size_param("highmem-start", highmem_start);
> #endif
>
> +integer_param("hardware_dom", hardware_domid);
> +
> cpumask_t __read_mostly cpu_present_map;
>
> unsigned long __read_mostly xen_phys_start;
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index c8414ed..a77f8af 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -59,7 +59,8 @@ DEFINE_RCU_READ_LOCK(domlist_read_lock);
> static struct domain *domain_hash[DOMAIN_HASH_SIZE];
> struct domain *domain_list;
>
> -struct domain *hardware_domain;
> +struct domain *hardware_domain __read_mostly;
> +domid_t hardware_domid __read_mostly;
>
> struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
>
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> index eebeee7..9af24bf 100644
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -472,6 +472,46 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
> u_domctl)
> break;
> }
>
> + /*
> + * Late initialization of the hardware domain is only supported on
> x86,
> + * so only check for it there.
> + */
> +#ifdef LATE_HWDOM_ENABLE
> + if ( is_hardware_domain(d) )
> + {
> + struct domain *dom0 = hardware_domain;
> + ASSERT(dom0->domain_id == 0);
> +
> + ret = xsm_init_hardware_domain(XSM_HOOK, d);
> + if ( ret )
> + {
> + domain_kill(d);
> + d = NULL;
> + break;
> + }
> +
> + printk("Initialising hardware domain %d\n", hardware_domid);
> + hardware_domain = d;
> +
> + /* Hardware resource ranges for domain 0 have been set up from
> + * various sources intended to restrict the hardware domain's
> + * access. Apply these ranges to the actual hardware domain.
> + *
> + * Because the lists are being swapped, a side effect of this
> + * operation is that Domain 0's rangesets are cleared. Since
> domain
> + * 0 should not be accessing the hardware when it constructs a
> + * hardware domain, this should not be a problem. Both lists
> should
> + * be modified after this hypercall returns if a more complex
> + * device model is desired.
> + */
> + rangeset_swap(d->irq_caps, dom0->irq_caps);
> + rangeset_swap(d->iomem_caps, dom0->iomem_caps);
> + rangeset_swap(d->arch.ioport_caps, dom0->arch.ioport_caps);
> +
> + iommu_hwdom_init(d);
> + }
> +#endif
> +
> ret = 0;
>
> memcpy(d->handle, op->u.createdomain.handle,
> diff --git a/xen/common/rangeset.c b/xen/common/rangeset.c
> index f09c0c4..52fae1f 100644
> --- a/xen/common/rangeset.c
> +++ b/xen/common/rangeset.c
> @@ -438,3 +438,43 @@ void rangeset_domain_printk(
>
> spin_unlock(&d->rangesets_lock);
> }
> +
> +void rangeset_swap(struct rangeset *a, struct rangeset *b)
> +{
> + struct list_head tmp;
> + if ( &a < &b )
> + {
> + spin_lock(&a->lock);
> + spin_lock(&b->lock);
> + }
> + else
> + {
> + spin_lock(&b->lock);
> + spin_lock(&a->lock);
> + }
> + memcpy(&tmp, &a->range_list, sizeof(tmp));
> + memcpy(&a->range_list, &b->range_list, sizeof(tmp));
> + memcpy(&b->range_list, &tmp, sizeof(tmp));
> + if ( a->range_list.next == &b->range_list )
> + {
> + a->range_list.next = &a->range_list;
> + a->range_list.prev = &a->range_list;
> + }
> + else
> + {
> + a->range_list.next->prev = &a->range_list;
> + a->range_list.prev->next = &a->range_list;
> + }
> + if ( b->range_list.next == &a->range_list )
> + {
> + b->range_list.next = &b->range_list;
> + b->range_list.prev = &b->range_list;
> + }
> + else
> + {
> + b->range_list.next->prev = &b->range_list;
> + b->range_list.prev->next = &b->range_list;
> + }
> + spin_unlock(&a->lock);
> + spin_unlock(&b->lock);
> +}
> diff --git a/xen/include/xen/rangeset.h b/xen/include/xen/rangeset.h
> index 1e16a6b..805ebde 100644
> --- a/xen/include/xen/rangeset.h
> +++ b/xen/include/xen/rangeset.h
> @@ -73,4 +73,7 @@ void rangeset_printk(
> void rangeset_domain_printk(
> struct domain *d);
>
> +/* swap contents */
> +void rangeset_swap(struct rangeset *a, struct rangeset *b);
> +
> #endif /* __XEN_RANGESET_H__ */
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 146514d..ce37c77 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -45,6 +45,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);
>
> /* A global pointer to the hardware domain (usually DOM0). */
> extern struct domain *hardware_domain;
> +extern domid_t hardware_domid;
>
> #ifndef CONFIG_COMPAT
> #define BITS_PER_EVTCHN_WORD(d) BITS_PER_XEN_ULONG
> @@ -778,7 +779,7 @@ void watchdog_domain_destroy(struct domain *d);
> * (that is, this would not be suitable for a driver domain)
> * - There is never a reason to deny dom0 access to this
> */
> -#define is_hardware_domain(_d) ((_d)->domain_id == 0)
> +#define is_hardware_domain(d) ((d)->domain_id == hardware_domid)
>
> /* This check is for functionality specific to a control domain */
> #define is_control_domain(_d) ((_d)->is_privileged)
> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> index 3bcd941..180cc88 100644
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -305,6 +305,12 @@ static XSM_INLINE char *xsm_show_security_evtchn(struct
> domain *d, const struct
> return NULL;
> }
>
> +static XSM_INLINE int xsm_init_hardware_domain(XSM_DEFAULT_ARG struct
> domain *d)
> +{
> + XSM_ASSERT_ACTION(XSM_HOOK);
> + return xsm_default_action(action, current->domain, d);
> +}
> +
> static XSM_INLINE int xsm_get_pod_target(XSM_DEFAULT_ARG struct domain *d)
> {
> XSM_ASSERT_ACTION(XSM_PRIV);
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index de9cf86..6ab9ed1 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -83,6 +83,7 @@ struct xsm_operations {
> int (*alloc_security_evtchn) (struct evtchn *chn);
> void (*free_security_evtchn) (struct evtchn *chn);
> char *(*show_security_evtchn) (struct domain *d, const struct evtchn
> *chn);
> + int (*init_hardware_domain) (struct domain *d);
>
> int (*get_pod_target) (struct domain *d);
> int (*set_pod_target) (struct domain *d);
> @@ -314,6 +315,11 @@ static inline char *xsm_show_security_evtchn (struct
> domain *d, const struct evt
> return xsm_ops->show_security_evtchn(d, chn);
> }
>
> +static inline int xsm_init_hardware_domain (xsm_default_t def, struct
> domain *d)
> +{
> + return xsm_ops->init_hardware_domain(d);
> +}
> +
> static inline int xsm_get_pod_target (xsm_default_t def, struct domain *d)
> {
> return xsm_ops->get_pod_target(d);
> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
> index 3fe4c59..689af3d 100644
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -59,6 +59,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
> set_to_dummy_if_null(ops, alloc_security_evtchn);
> set_to_dummy_if_null(ops, free_security_evtchn);
> set_to_dummy_if_null(ops, show_security_evtchn);
> + set_to_dummy_if_null(ops, init_hardware_domain);
> +
> set_to_dummy_if_null(ops, get_pod_target);
> set_to_dummy_if_null(ops, set_pod_target);
>
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 96276ac..5b906d6 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -344,6 +344,11 @@ static char *flask_show_security_evtchn(struct domain
> *d, const struct evtchn *c
> return ctx;
> }
>
> +static int flask_init_hardware_domain(struct domain *d)
> +{
> + return current_has_perm(d, SECCLASS_DOMAIN2,
> DOMAIN2__CREATE_HARDWARE_DOMAIN);
> +}
> +
> static int flask_grant_mapref(struct domain *d1, struct domain *d2,
> uint32_t flags)
> {
> @@ -1491,6 +1496,7 @@ static struct xsm_operations flask_ops = {
> .alloc_security_evtchn = flask_alloc_security_evtchn,
> .free_security_evtchn = flask_free_security_evtchn,
> .show_security_evtchn = flask_show_security_evtchn,
> + .init_hardware_domain = flask_init_hardware_domain,
>
> .get_pod_target = flask_get_pod_target,
> .set_pod_target = flask_set_pod_target,
> diff --git a/xen/xsm/flask/policy/access_vectors
> b/xen/xsm/flask/policy/access_vectors
> index a0ed13d..32371a9 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -198,6 +198,8 @@ class domain2
> set_max_evtchn
> # XEN_DOMCTL_cacheflush
> cacheflush
> +# Creation of the hardware domain when it is not dom0
> + create_hardware_domain
> }
>
> # Similar to class domain, but primarily contains domctls related to HVM
> domains
> --
> 1.8.5.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |