[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH for Xen 4.5] xen/arm: Add support for GICv3 for domU
On Thu, 30 Oct 2014, Julien Grall wrote: > The vGIC will emulate the same version as the hardware. The toolstack has > to retrieve the version of the vGIC in order to be able to create the > corresponding device tree node. > > A new DOMCTL has been introduced for ARM to configure the domain. For now > it only allow the toolstack to retrieve the version of vGIC. > This DOMCTL will be extend later to let the user choose the version of the > emulated GIC. > > Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx> > Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx> > Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> > Cc: Jan Beulich <jbeulich@xxxxxxxx> > Cc: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> > Thanks for acting quickly on this patch > This patch is based on Vijay's GICv3 guest support patch [1] and my patch to > defer the initialization of the vGIC [2]. > > Xen 4.5 has already support for the hardware GICv3. While it's possible to > boot and use DOM0, there is no guest support. The first version of this patch > has been sent a couple of months ago, but has never reached unstable for > various reasons (based on deferred series, lake of review at that time...). > Without it, platform with GICv3 support won't be able to boot any guest. > > The patch has been reworked to make it acceptable for Xen 4.5. Except the new > DOMCTL to retrieve the GIC version and one check. The code is purely GICv3. > > Features such as adding a new config option to let the user choose the GIC > version are deferred to Xen 4.6. > > It has been tested on both GICv2/GICv3 platform. And build tested for x86. > > [1] http://lists.xen.org/archives/html/xen-devel/2014-10/msg00583.html > [2] https://patches.linaro.org/34664/ > --- > tools/flask/policy/policy/modules/xen/xen.if | 2 +- > tools/libxc/include/xenctrl.h | 6 ++ > tools/libxc/xc_domain.c | 19 +++++ > tools/libxl/libxl_arch.h | 4 + > tools/libxl/libxl_arm.c | 111 > ++++++++++++++++++++++++-- > tools/libxl/libxl_dom.c | 6 +- > tools/libxl/libxl_internal.h | 2 + > tools/libxl/libxl_x86.c | 8 ++ > xen/arch/arm/domctl.c | 35 ++++++++ > xen/arch/arm/gic-v3.c | 16 +++- > xen/include/asm-arm/domain.h | 1 - > xen/include/public/arch-arm.h | 16 ++++ > xen/include/public/domctl.h | 17 ++++ > xen/xsm/flask/hooks.c | 3 + > xen/xsm/flask/policy/access_vectors | 2 + > 15 files changed, 238 insertions(+), 10 deletions(-) > > diff --git a/tools/flask/policy/policy/modules/xen/xen.if > b/tools/flask/policy/policy/modules/xen/xen.if > index 641c797..fa69c9d 100644 > --- a/tools/flask/policy/policy/modules/xen/xen.if > +++ b/tools/flask/policy/policy/modules/xen/xen.if > @@ -49,7 +49,7 @@ define(`create_domain_common', ` > getdomaininfo hypercall setvcpucontext setextvcpucontext > getscheduler getvcpuinfo getvcpuextstate getaddrsize > getaffinity setaffinity }; > - allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim > set_max_evtchn set_vnumainfo get_vnumainfo psr_cmt_op }; > + allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim > set_max_evtchn set_vnumainfo get_vnumainfo psr_cmt_op configure_domain }; > allow $1 $2:security check_context; > allow $1 $2:shadow enable; > allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage > mmuext_op }; > diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h > index 564e187..e813a64 100644 > --- a/tools/libxc/include/xenctrl.h > +++ b/tools/libxc/include/xenctrl.h > @@ -483,6 +483,12 @@ int xc_domain_create(xc_interface *xch, > uint32_t flags, > uint32_t *pdomid); > > +#if defined(__arm__) || defined(__aarch64__) > +typedef xen_domctl_configuredomain_t xc_domain_configuration_t; > + > +int xc_domain_configure(xc_interface *xch, uint32_t domid, > + xc_domain_configuration_t *config); > +#endif > > /* Functions to produce a dump of a given domain > * xc_domain_dumpcore - produces a dump to a specified file > diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c > index a9bcd4a..7cba5df 100644 > --- a/tools/libxc/xc_domain.c > +++ b/tools/libxc/xc_domain.c > @@ -48,6 +48,25 @@ int xc_domain_create(xc_interface *xch, > return 0; > } > > +#if defined(__arm__) || defined(__aarch64__) > +int xc_domain_configure(xc_interface *xch, uint32_t domid, > + xc_domain_configuration_t *config) > +{ > + int rc; > + DECLARE_DOMCTL; > + > + domctl.cmd = XEN_DOMCTL_configure_domain; > + domctl.domain = (domid_t)domid; > + domctl.u.configuredomain.gic_version = config->gic_version; > + > + rc = do_domctl(xch, &domctl); > + if ( !rc ) > + config->gic_version = domctl.u.configuredomain.gic_version; > + > + return rc; > +} > +#endif > + > int xc_domain_cacheflush(xc_interface *xch, uint32_t domid, > xen_pfn_t start_pfn, xen_pfn_t nr_pfns) > { > diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h > index d3bc136..0783c13 100644 > --- a/tools/libxl/libxl_arch.h > +++ b/tools/libxl/libxl_arch.h > @@ -16,12 +16,16 @@ > #define LIBXL_ARCH_H > > /* arch specific internal domain creation function */ > +int libxl__arch_domain_create_pre(libxl__gc *gc, libxl_domain_config > *d_config, > + libxl__domain_build_state *state, > + uint32_t domid); > int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config, > uint32_t domid); > > /* setup arch specific hardware description, i.e. DTB on ARM */ > int libxl__arch_domain_init_hw_description(libxl__gc *gc, > libxl_domain_build_info *info, > + libxl__domain_build_state *state, > struct xc_dom_image *dom); > /* finalize arch specific hardware description. */ > int libxl__arch_domain_finalise_hw_description(libxl__gc *gc, > diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c > index a122e4a..686a420 100644 > --- a/tools/libxl/libxl_arm.c > +++ b/tools/libxl/libxl_arm.c > @@ -23,6 +23,40 @@ > #define DT_IRQ_TYPE_LEVEL_HIGH 0x00000004 > #define DT_IRQ_TYPE_LEVEL_LOW 0x00000008 > > +static const char *gicv_to_string(uint8_t gic_version) > +{ > + switch (gic_version) { > + case XEN_DOMCTL_CONFIG_GIC_V2: > + return "V2"; > + case XEN_DOMCTL_CONFIG_GIC_V3: > + return "V3"; > + default: > + return "unknown"; > + } > +} > + > +int libxl__arch_domain_create_pre(libxl__gc *gc, libxl_domain_config > *d_config, > + libxl__domain_build_state *state, > + uint32_t domid) > +{ > + xc_domain_configuration_t config; > + > + LOG(DEBUG, "Configure the domain"); > + config.gic_version = XEN_DOMCTL_CONFIG_GIC_DEFAULT; > + > + if (xc_domain_configure(CTX->xch, domid, &config) != 0) { > + LOG(ERROR, "Couldn't configure the domain"); > + return ERROR_FAIL; > + } > + > + LOG(DEBUG, " - vGIC version: %s", gicv_to_string(config.gic_version)); > + > + state->gic_version = config.gic_version; > + > + return 0; > +} > + > + > int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config, > uint32_t domid) > { > @@ -307,9 +341,9 @@ static int make_memory_nodes(libxl__gc *gc, void *fdt, > return 0; > } > > -static int make_intc_node(libxl__gc *gc, void *fdt, > - uint64_t gicd_base, uint64_t gicd_size, > - uint64_t gicc_base, uint64_t gicc_size) > +static int make_gicv2_node(libxl__gc *gc, void *fdt, > + uint64_t gicd_base, uint64_t gicd_size, > + uint64_t gicc_base, uint64_t gicc_size) > { > int res; > const char *name = GCSPRINTF("interrupt-controller@%"PRIx64, gicd_base); > @@ -350,6 +384,57 @@ static int make_intc_node(libxl__gc *gc, void *fdt, > return 0; > } > > +static int make_gicv3_node(libxl__gc *gc, void *fdt) > +{ > + int res; > + const uint64_t gicd_base = GUEST_GICV3_GICD_BASE; > + const uint64_t gicd_size = GUEST_GICV3_GICD_SIZE; > + const uint64_t gicr0_base = GUEST_GICV3_GICR0_BASE; > + const uint64_t gicr0_size = GUEST_GICV3_GICR0_SIZE; > + const char *name = GCSPRINTF("interrupt-controller@%"PRIx64, gicd_base); > + > + res = fdt_begin_node(fdt, name); > + if (res) return res; > + > + res = fdt_property_compat(gc, fdt, 1, > + "arm,gic-v3"); > + if (res) return res; > + > + res = fdt_property_cell(fdt, "#interrupt-cells", 3); > + if (res) return res; > + > + res = fdt_property_cell(fdt, "#address-cells", 0); > + if (res) return res; > + > + res = fdt_property(fdt, "interrupt-controller", NULL, 0); > + if (res) return res; > + > + res = fdt_property_cell(fdt, "redistributor-stride", > + GUEST_GICV3_RDIST_STRIDE); > + if (res) return res; > + > + res = fdt_property_cell(fdt, "#redistributor-regions", > + GUEST_GICV3_RDIST_REGIONS); > + if (res) return res; > + > + res = fdt_property_regs(gc, fdt, ROOT_ADDRESS_CELLS, ROOT_SIZE_CELLS, > + 2, > + gicd_base, gicd_size, > + gicr0_base, gicr0_size); > + if (res) return res; > + > + res = fdt_property_cell(fdt, "linux,phandle", PHANDLE_GIC); > + if (res) return res; > + > + res = fdt_property_cell(fdt, "phandle", PHANDLE_GIC); > + if (res) return res; > + > + res = fdt_end_node(fdt); > + if (res) return res; > + > + return 0; > +} > + > static int make_timer_node(libxl__gc *gc, void *fdt, const struct arch_info > *ainfo) > { > int res; > @@ -454,6 +539,7 @@ out: > > int libxl__arch_domain_init_hw_description(libxl__gc *gc, > libxl_domain_build_info *info, > + libxl__domain_build_state *state, > struct xc_dom_image *dom) > { > void *fdt = NULL; Can't you just call xc_configure_domain from here? Why do you need to introduce libxl__arch_domain_create_pre? > @@ -520,9 +606,22 @@ next_resize: > FDT( make_psci_node(gc, fdt) ); > > FDT( make_memory_nodes(gc, fdt, dom) ); > - FDT( make_intc_node(gc, fdt, > - GUEST_GICD_BASE, GUEST_GICD_SIZE, > - GUEST_GICC_BASE, GUEST_GICD_SIZE) ); > + > + switch (state->gic_version) { > + case XEN_DOMCTL_CONFIG_GIC_V2: > + FDT( make_gicv2_node(gc, fdt, > + GUEST_GICD_BASE, GUEST_GICD_SIZE, > + GUEST_GICC_BASE, GUEST_GICC_SIZE) ); > + break; > + case XEN_DOMCTL_CONFIG_GIC_V3: > + FDT( make_gicv3_node(gc, fdt) ); > + break; > + default: > + LOG(ERROR, "Unknown how to create the DT node for GIC %s\n", > + gicv_to_string(state->gic_version)); > + rc = ERROR_FAIL; > + goto out; > + } > > FDT( make_timer_node(gc, fdt, ainfo) ); > FDT( make_hypervisor_node(gc, fdt, vers) ); > diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c > index 74ea84b..4669b62 100644 > --- a/tools/libxl/libxl_dom.c > +++ b/tools/libxl/libxl_dom.c > @@ -307,6 +307,10 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid, > char *xs_domid, *con_domid; > int rc; > > + rc = libxl__arch_domain_create_pre(gc, d_config, state, domid); > + if (rc != 0) > + return rc; > + > if (xc_domain_max_vcpus(ctx->xch, domid, info->max_vcpus) != 0) { > LOG(ERROR, "Couldn't set max vcpu count"); > return ERROR_FAIL; > @@ -583,7 +587,7 @@ int libxl__build_pv(libxl__gc *gc, uint32_t domid, > LOGE(ERROR, "xc_dom_parse_image failed"); > goto out; > } > - if ( (ret = libxl__arch_domain_init_hw_description(gc, info, dom)) != 0 > ) { > + if ( (ret = libxl__arch_domain_init_hw_description(gc, info, state, > dom)) != 0 ) { > LOGE(ERROR, "libxl__arch_domain_init_hw_description failed"); > goto out; > } > diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h > index 4361421..e7068bc 100644 > --- a/tools/libxl/libxl_internal.h > +++ b/tools/libxl/libxl_internal.h > @@ -971,6 +971,8 @@ typedef struct { > libxl__file_reference pv_ramdisk; > const char * pv_cmdline; > bool pvh_enabled; > + > + uint8_t gic_version; > } libxl__domain_build_state; > > _hidden int libxl__build_pre(libxl__gc *gc, uint32_t domid, > diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c > index 7589060..6a35694 100644 > --- a/tools/libxl/libxl_x86.c > +++ b/tools/libxl/libxl_x86.c > @@ -244,6 +244,13 @@ static int libxl__e820_alloc(libxl__gc *gc, uint32_t > domid, > return 0; > } > > +int libxl__arch_domain_create_pre(libxl__gc *gc, libxl_domain_config > *d_config, > + libxl__domain_build_state *state, > + uint32_t domid) > +{ > + return 0; > +} > + > int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config, > uint32_t domid) > { > @@ -313,6 +320,7 @@ int libxl__arch_domain_create(libxl__gc *gc, > libxl_domain_config *d_config, > > int libxl__arch_domain_init_hw_description(libxl__gc *gc, > libxl_domain_build_info *info, > + libxl__domain_build_state *state, > struct xc_dom_image *dom) > { > return 0; > diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c > index 45974e7..a7e5b5f 100644 > --- a/xen/arch/arm/domctl.c > +++ b/xen/arch/arm/domctl.c > @@ -10,6 +10,8 @@ > #include <xen/errno.h> > #include <xen/sched.h> > #include <xen/hypercall.h> > +#include <asm/gic.h> > +#include <xen/guest_access.h> > #include <public/domctl.h> > > long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, > @@ -30,6 +32,39 @@ long arch_do_domctl(struct xen_domctl *domctl, struct > domain *d, > > return p2m_cache_flush(d, s, e); > } > + case XEN_DOMCTL_configure_domain: > + { > + uint8_t gic_version; > + > + /* > + * Xen 4.5: The vGIC is emulating the same version of the > + * hardware GIC. Only the value XEN_DOMCTL_CONFIG_GIC_DEFAULT > + * is allowed. The DOMCTL will return the actual version of the > + * GIC. > + */ > + if ( domctl->u.configuredomain.gic_version != > XEN_DOMCTL_CONFIG_GIC_DEFAULT ) > + return -EINVAL; -EOPNOTSUPP is a better choice I think > + switch ( gic_hw_version() ) > + { > + case GIC_V3: > + gic_version = XEN_DOMCTL_CONFIG_GIC_V3; > + break; > + case GIC_V2: > + gic_version = XEN_DOMCTL_CONFIG_GIC_V2; > + break; > + default: > + BUG(); > + } > + > + domctl->u.configuredomain.gic_version = gic_version; > + > + /* TODO: Make the copy generic for all ARCH domctl */ > + if ( __copy_to_guest(u_domctl, domctl, 1) ) > + return -EFAULT; > + > + return 0; > + } > > default: > return subarch_do_domctl(domctl, d, u_domctl); > diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c > index 91161a2..076aa62 100644 > --- a/xen/arch/arm/gic-v3.c > +++ b/xen/arch/arm/gic-v3.c > @@ -906,7 +906,21 @@ static int gicv_v3_init(struct domain *d) > d->arch.vgic.rdist_count = gicv3.rdist_count; > } > else > - d->arch.vgic.dbase = GUEST_GICD_BASE; > + { > + d->arch.vgic.dbase = GUEST_GICV3_GICD_BASE; > + d->arch.vgic.dbase_size = GUEST_GICV3_GICD_SIZE; > + > + /* XXX: Only one Re-distributor region mapped for the guest */ > + BUILD_BUG_ON(GUEST_GICV3_RDIST_REGIONS != 1); I think that the comment is enough: GUEST_GICV3_RDIST_REGIONS is already #define to be 1. > + d->arch.vgic.rdist_count = GUEST_GICV3_RDIST_REGIONS; > + d->arch.vgic.rdist_stride = GUEST_GICV3_RDIST_STRIDE; > + > + /* The first redistributor should contain enough space for all CPUs > */ > + BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GUEST_GICV3_RDIST_STRIDE) < > MAX_VIRT_CPUS); > + d->arch.vgic.rbase[0] = GUEST_GICV3_GICR0_BASE; > + d->arch.vgic.rbase_size[0] = GUEST_GICV3_GICR0_SIZE; > + } > > d->arch.vgic.nr_lines = 0; > > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h > index 787e93c..4b81e70 100644 > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -47,7 +47,6 @@ struct arch_domain > #ifdef CONFIG_ARM_64 > enum domain_type type; > #endif > - > /* Virtual MMU */ > struct p2m_domain p2m; > uint64_t vttbr; spurious change > diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h > index eecc561..d7df274 100644 > --- a/xen/include/public/arch-arm.h > +++ b/xen/include/public/arch-arm.h > @@ -373,11 +373,27 @@ typedef uint64_t xen_callback_t; > */ > > /* Physical Address Space */ > + > +/* vGIC mappings: Only one set of mapping is used by the guest. > + * Therefore they can overlap. > + */ > + > +/* vGIC v2 mappings */ > #define GUEST_GICD_BASE 0x03001000ULL > #define GUEST_GICD_SIZE 0x00001000ULL > #define GUEST_GICC_BASE 0x03002000ULL > #define GUEST_GICC_SIZE 0x00000100ULL > > +/* vGIC v3 mappings */ > +#define GUEST_GICV3_GICD_BASE 0x03001000ULL > +#define GUEST_GICV3_GICD_SIZE 0x00010000ULL > + > +#define GUEST_GICV3_RDIST_STRIDE 0x20000ULL > +#define GUEST_GICV3_RDIST_REGIONS 1 > + > +#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU15 */ > +#define GUEST_GICV3_GICR0_SIZE 0x00200000ULL > + > /* 16MB == 4096 pages reserved for guest to use as a region to map its > * grant table in. > */ > diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h > index 58b19e7..8e9545d3 100644 > --- a/xen/include/public/domctl.h > +++ b/xen/include/public/domctl.h > @@ -68,6 +68,19 @@ struct xen_domctl_createdomain { > typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; > DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); > > +#if defined(__arm__) || defined(__aarch64__) > +#define XEN_DOMCTL_CONFIG_GIC_DEFAULT 0 > +#define XEN_DOMCTL_CONFIG_GIC_V2 1 > +#define XEN_DOMCTL_CONFIG_GIC_V3 2 > +/* XEN_DOMCTL_configure_domain */ > +struct xen_domctl_configuredomain { > + /* IN/OUT parameters */ > + uint8_t gic_version; > +}; > +typedef struct xen_domctl_configuredomain xen_domctl_configuredomain_t; > +DEFINE_XEN_GUEST_HANDLE(xen_domctl_configuredomain_t); > +#endif > + > /* XEN_DOMCTL_getdomaininfo */ > struct xen_domctl_getdomaininfo { > /* OUT variables. */ > @@ -1056,6 +1069,7 @@ struct xen_domctl { > #define XEN_DOMCTL_set_vcpu_msrs 73 > #define XEN_DOMCTL_setvnumainfo 74 > #define XEN_DOMCTL_psr_cmt_op 75 > +#define XEN_DOMCTL_configure_domain 76 > #define XEN_DOMCTL_gdbsx_guestmemio 1000 > #define XEN_DOMCTL_gdbsx_pausevcpu 1001 > #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 > @@ -1064,6 +1078,9 @@ struct xen_domctl { > domid_t domain; > union { > struct xen_domctl_createdomain createdomain; > +#if defined(__arm__) || defined(__aarch64__) > + struct xen_domctl_configuredomain configuredomain; > +#endif > struct xen_domctl_getdomaininfo getdomaininfo; > struct xen_domctl_getmemlist getmemlist; > struct xen_domctl_getpageframeinfo getpageframeinfo; > diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c > index 6d0fe72..846cf88 100644 > --- a/xen/xsm/flask/hooks.c > +++ b/xen/xsm/flask/hooks.c > @@ -727,6 +727,9 @@ static int flask_domctl(struct domain *d, int cmd) > case XEN_DOMCTL_psr_cmt_op: > return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__PSR_CMT_OP); > > + case XEN_DOMCTL_configure_domain: > + return current_has_perm(d, SECCLASS_DOMAIN2, > DOMAIN2__CONFIGURE_DOMAIN); > + > default: > printk("flask_domctl: Unknown op %d\n", cmd); > return -EPERM; > diff --git a/xen/xsm/flask/policy/access_vectors > b/xen/xsm/flask/policy/access_vectors > index de0c707..bfe2fa5 100644 > --- a/xen/xsm/flask/policy/access_vectors > +++ b/xen/xsm/flask/policy/access_vectors > @@ -216,6 +216,8 @@ class domain2 > get_vnumainfo > # XEN_DOMCTL_psr_cmt_op > psr_cmt_op > +# XEN_DOMCTL_configure_domain > + configure_domain > } > > # Similar to class domain, but primarily contains domctls related to HVM > domains > -- > 1.7.10.4 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |