|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 6/7] xen: Allow hardare domain != dom0
This adds a hypervisor command line option "hardware_dom=" which takes a
domain ID. When the domain with this ID is created, it will be used
as the hardware domain.
This is intended to be used when domain 0 is a dedicated stub domain for
domain building, allowing the hardware domain to be de-privileged and
act only as a driver domain.
Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
---
xen/arch/x86/domain_build.c | 4 +++-
xen/arch/x86/setup.c | 2 ++
xen/common/domain.c | 1 +
xen/common/domctl.c | 41 +++++++++++++++++++++++++++++++++++++
xen/common/rangeset.c | 40 ++++++++++++++++++++++++++++++++++++
xen/include/xen/rangeset.h | 3 +++
xen/include/xen/sched.h | 3 ++-
xen/include/xsm/dummy.h | 6 ++++++
xen/include/xsm/xsm.h | 6 ++++++
xen/xsm/dummy.c | 2 ++
xen/xsm/flask/hooks.c | 6 ++++++
xen/xsm/flask/policy/access_vectors | 2 ++
12 files changed, 114 insertions(+), 2 deletions(-)
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index f75f6e7..a554d3b 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1150,7 +1150,9 @@ int __init construct_dom0(
printk(" Xen warning: dom0 kernel broken ELF: %s\n",
elf_check_broken(&elf));
- iommu_hwdom_init(hardware_domain);
+ if ( is_hardware_domain(d) )
+ iommu_hwdom_init(d);
+
return 0;
out:
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 75cf212..f246ac3 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -84,6 +84,8 @@ unsigned long __initdata highmem_start;
size_param("highmem-start", highmem_start);
#endif
+integer_param("hardware_dom", hardware_domid);
+
cpumask_t __read_mostly cpu_present_map;
unsigned long __read_mostly xen_phys_start;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 6a282a2..a77f8af 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -60,6 +60,7 @@ static struct domain *domain_hash[DOMAIN_HASH_SIZE];
struct domain *domain_list;
struct domain *hardware_domain __read_mostly;
+domid_t hardware_domid __read_mostly;
struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index eebeee7..57bb026 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -472,6 +472,47 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
break;
}
+#ifdef CONFIG_LATE_HWDOM
+ if ( is_hardware_domain(d) )
+ {
+ struct domain *dom0 = hardware_domain;
+ ASSERT(dom0->domain_id == 0);
+
+ ret = xsm_init_hardware_domain(XSM_HOOK, d);
+ if ( ret )
+ {
+ domain_kill(d);
+ d = NULL;
+ break;
+ }
+
+ printk("Initialising hardware domain %d\n", hardware_domid);
+ hardware_domain = d;
+
+ /*
+ * Hardware resource ranges for domain 0 have been set up from
+ * various sources intended to restrict the hardware domain's
+ * access. Apply these ranges to the actual hardware domain.
+ *
+ * Because the lists are being swapped, a side effect of this
+ * operation is that Domain 0's rangesets are cleared. Since
+ * domain 0 should not be accessing the hardware when it constructs
+ * a hardware domain, this should not be a problem. Both lists
+ * may be modified after this hypercall returns if a more complex
+ * device model is desired.
+ *
+ * Since late hardware domain initialization is only supported on
+ * x86, the reference to arch.ioport_caps does not need its own
+ * preprocessor conditional.
+ */
+ rangeset_swap(d->irq_caps, dom0->irq_caps);
+ rangeset_swap(d->iomem_caps, dom0->iomem_caps);
+ rangeset_swap(d->arch.ioport_caps, dom0->arch.ioport_caps);
+
+ iommu_hwdom_init(d);
+ }
+#endif
+
ret = 0;
memcpy(d->handle, op->u.createdomain.handle,
diff --git a/xen/common/rangeset.c b/xen/common/rangeset.c
index f09c0c4..52fae1f 100644
--- a/xen/common/rangeset.c
+++ b/xen/common/rangeset.c
@@ -438,3 +438,43 @@ void rangeset_domain_printk(
spin_unlock(&d->rangesets_lock);
}
+
+void rangeset_swap(struct rangeset *a, struct rangeset *b)
+{
+ struct list_head tmp;
+ if (&a < &b)
+ {
+ spin_lock(&a->lock);
+ spin_lock(&b->lock);
+ }
+ else
+ {
+ spin_lock(&b->lock);
+ spin_lock(&a->lock);
+ }
+ memcpy(&tmp, &a->range_list, sizeof(tmp));
+ memcpy(&a->range_list, &b->range_list, sizeof(tmp));
+ memcpy(&b->range_list, &tmp, sizeof(tmp));
+ if ( a->range_list.next == &b->range_list )
+ {
+ a->range_list.next = &a->range_list;
+ a->range_list.prev = &a->range_list;
+ }
+ else
+ {
+ a->range_list.next->prev = &a->range_list;
+ a->range_list.prev->next = &a->range_list;
+ }
+ if ( b->range_list.next == &a->range_list )
+ {
+ b->range_list.next = &b->range_list;
+ b->range_list.prev = &b->range_list;
+ }
+ else
+ {
+ b->range_list.next->prev = &b->range_list;
+ b->range_list.prev->next = &b->range_list;
+ }
+ spin_unlock(&a->lock);
+ spin_unlock(&b->lock);
+}
diff --git a/xen/include/xen/rangeset.h b/xen/include/xen/rangeset.h
index 1e16a6b..805ebde 100644
--- a/xen/include/xen/rangeset.h
+++ b/xen/include/xen/rangeset.h
@@ -73,4 +73,7 @@ void rangeset_printk(
void rangeset_domain_printk(
struct domain *d);
+/* swap contents */
+void rangeset_swap(struct rangeset *a, struct rangeset *b);
+
#endif /* __XEN_RANGESET_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 91adc8c..8b28f79 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -45,6 +45,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);
/* A global pointer to the hardware domain (usually DOM0). */
extern struct domain *hardware_domain;
+extern domid_t hardware_domid;
#ifndef CONFIG_COMPAT
#define BITS_PER_EVTCHN_WORD(d) BITS_PER_XEN_ULONG
@@ -794,7 +795,7 @@ void watchdog_domain_destroy(struct domain *d);
* (that is, this would not be suitable for a driver domain)
* - There is never a reason to deny dom0 access to this
*/
-#define is_hardware_domain(_d) ((_d)->domain_id == 0)
+#define is_hardware_domain(d) ((d)->domain_id == hardware_domid)
/* This check is for functionality specific to a control domain */
#define is_control_domain(_d) ((_d)->is_privileged)
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index e722155..8ca1117 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -299,6 +299,12 @@ static XSM_INLINE char *xsm_show_security_evtchn(struct
domain *d, const struct
return NULL;
}
+static XSM_INLINE int xsm_init_hardware_domain(XSM_DEFAULT_ARG struct domain
*d)
+{
+ XSM_ASSERT_ACTION(XSM_HOOK);
+ return xsm_default_action(action, current->domain, d);
+}
+
static XSM_INLINE int xsm_get_pod_target(XSM_DEFAULT_ARG struct domain *d)
{
XSM_ASSERT_ACTION(XSM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 15acb3b..bff0574 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -82,6 +82,7 @@ struct xsm_operations {
int (*alloc_security_evtchn) (struct evtchn *chn);
void (*free_security_evtchn) (struct evtchn *chn);
char *(*show_security_evtchn) (struct domain *d, const struct evtchn *chn);
+ int (*init_hardware_domain) (struct domain *d);
int (*get_pod_target) (struct domain *d);
int (*set_pod_target) (struct domain *d);
@@ -311,6 +312,11 @@ static inline char *xsm_show_security_evtchn (struct
domain *d, const struct evt
return xsm_ops->show_security_evtchn(d, chn);
}
+static inline int xsm_init_hardware_domain (xsm_default_t def, struct domain
*d)
+{
+ return xsm_ops->init_hardware_domain(d);
+}
+
static inline int xsm_get_pod_target (xsm_default_t def, struct domain *d)
{
return xsm_ops->get_pod_target(d);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index b79e10f..c2804f2 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -58,6 +58,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, alloc_security_evtchn);
set_to_dummy_if_null(ops, free_security_evtchn);
set_to_dummy_if_null(ops, show_security_evtchn);
+ set_to_dummy_if_null(ops, init_hardware_domain);
+
set_to_dummy_if_null(ops, get_pod_target);
set_to_dummy_if_null(ops, set_pod_target);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 4ce31c9..f1a4a2d 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -327,6 +327,11 @@ static char *flask_show_security_evtchn(struct domain *d,
const struct evtchn *c
return ctx;
}
+static int flask_init_hardware_domain(struct domain *d)
+{
+ return current_has_perm(d, SECCLASS_DOMAIN2,
DOMAIN2__CREATE_HARDWARE_DOMAIN);
+}
+
static int flask_grant_mapref(struct domain *d1, struct domain *d2,
uint32_t flags)
{
@@ -1500,6 +1505,7 @@ static struct xsm_operations flask_ops = {
.alloc_security_evtchn = flask_alloc_security_evtchn,
.free_security_evtchn = flask_free_security_evtchn,
.show_security_evtchn = flask_show_security_evtchn,
+ .init_hardware_domain = flask_init_hardware_domain,
.get_pod_target = flask_get_pod_target,
.set_pod_target = flask_set_pod_target,
diff --git a/xen/xsm/flask/policy/access_vectors
b/xen/xsm/flask/policy/access_vectors
index a0ed13d..32371a9 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -198,6 +198,8 @@ class domain2
set_max_evtchn
# XEN_DOMCTL_cacheflush
cacheflush
+# Creation of the hardware domain when it is not dom0
+ create_hardware_domain
}
# Similar to class domain, but primarily contains domctls related to HVM
domains
--
1.8.5.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |