[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 15/21] xen/arm: generate vpl011 node on device tree for domU
Introduce vpl011 support to guests started from Xen: it provides a simple way to print output from a guest, as most guests come with a pl011 driver. It is also able to provide a working console with interrupt support. The UART exposed to the guest is a SBSA compatible UART and not a PL011. SBSA UART is a subset of PL011 r1p5. A full PL011 implementation in Xen would just be too difficult, so guests may require some drivers changes. Enable vpl011 conditionally if the user requested it. Make set_interrupt_ppi able to handle non-PPI and rename it set_interrupt. Signed-off-by: Stefano Stabellini <stefanos@xxxxxxxxxx> --- Changes in v2: - code style fixes - make set_interrupt_ppi generic - rename set_interrupt_ppi to set_interrupt - only make the vpl011 node if the option was enabled --- xen/arch/arm/domain_build.c | 90 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 75 insertions(+), 15 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 48a91ad..718be48 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -519,17 +519,17 @@ static int write_properties(struct domain *d, struct kernel_info *kinfo, typedef __be32 gic_interrupt_t[3]; -static void set_interrupt_ppi(gic_interrupt_t interrupt, unsigned int irq, - unsigned int cpumask, unsigned int level) +static void set_interrupt(gic_interrupt_t interrupt, unsigned int irq, + unsigned int cpumask, unsigned int level) { __be32 *cells = interrupt; + int is_ppi = (irq < 32); - BUG_ON(irq < 16); - BUG_ON(irq >= 32); + irq -= (is_ppi) ? 16: 32; /* PPIs start at 16, SPIs at 32 */ /* See linux Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt */ - dt_set_cell(&cells, 1, 1); /* is a PPI */ - dt_set_cell(&cells, 1, irq - 16); /* PPIs start at 16 */ + dt_set_cell(&cells, 1, is_ppi); /* is a PPI? */ + dt_set_cell(&cells, 1, irq); dt_set_cell(&cells, 1, (cpumask << 8) | level); } @@ -648,7 +648,7 @@ static int make_hypervisor_node(struct domain *d, * - All CPUs * TODO: Handle properly the cpumask; */ - set_interrupt_ppi(intr, d->arch.evtchn_irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intr, d->arch.evtchn_irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); res = fdt_property_interrupts(fdt, &intr, 1); if ( res ) return res; @@ -924,15 +924,15 @@ static int make_timer_node(const struct domain *d, void *fdt, irq = timer_get_irq(TIMER_PHYS_SECURE_PPI); dt_dprintk(" Secure interrupt %u\n", irq); - set_interrupt_ppi(intrs[0], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intrs[0], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); irq = timer_get_irq(TIMER_PHYS_NONSECURE_PPI); dt_dprintk(" Non secure interrupt %u\n", irq); - set_interrupt_ppi(intrs[1], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intrs[1], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); irq = timer_get_irq(TIMER_VIRT_PPI); dt_dprintk(" Virt interrupt %u\n", irq); - set_interrupt_ppi(intrs[2], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intrs[2], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW); res = fdt_property_interrupts(fdt, intrs, 3); if ( res ) @@ -1503,9 +1503,9 @@ static int make_timer_domU_node(const struct domain *d, void *fdt) return res; } - set_interrupt_ppi(intrs[0], GUEST_TIMER_PHYS_S_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); - set_interrupt_ppi(intrs[1], GUEST_TIMER_PHYS_NS_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); - set_interrupt_ppi(intrs[2], GUEST_TIMER_VIRT_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intrs[0], GUEST_TIMER_PHYS_S_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intrs[1], GUEST_TIMER_PHYS_NS_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); + set_interrupt(intrs[2], GUEST_TIMER_VIRT_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); res = fdt_property(fdt, "interrupts", intrs, sizeof (intrs[0]) * 3); if ( res ) @@ -1520,12 +1520,63 @@ static int make_timer_domU_node(const struct domain *d, void *fdt) return res; } +#ifdef CONFIG_SBSA_VUART_CONSOLE +static int make_vpl011_uart_node(const struct domain *d, void *fdt, + int addrcells, int sizecells) +{ + int res; + gic_interrupt_t intr; + int reg_size = addrcells + sizecells; + int nr_cells = reg_size; + __be32 reg[nr_cells]; + __be32 *cells; + + res = fdt_begin_node(fdt, "sbsa-pl011"); + if ( res ) + return res; + + res = fdt_property_string(fdt, "compatible", "arm,sbsa-uart"); + if ( res ) + return res; + + cells = ®[0]; + dt_child_set_range(&cells, addrcells, sizecells, GUEST_PL011_BASE, + GUEST_PL011_SIZE); + if ( res ) + return res; + res = fdt_property(fdt, "reg", reg, sizeof(reg)); + if ( res ) + return res; + + set_interrupt(intr, GUEST_VPL011_SPI, 0xf, DT_IRQ_TYPE_LEVEL_HIGH); + + res = fdt_property(fdt, "interrupts", intr, sizeof (intr)); + if ( res ) + return res; + + res = fdt_property_cell(fdt, "interrupt-parent", + GUEST_PHANDLE_GIC); + if ( res ) + return res; + + /* Use a default baud rate of 115200. */ + fdt_property_u32(fdt, "current-speed", 115200); + + res = fdt_end_node(fdt); + if ( res ) + return res; + + return 0; +} +#endif + /* * The max size for DT is 2MB. However, the generated DT is small, 4KB * are enough for now, but we might have to increase it in the feature. */ #define DOMU_DTB_SIZE 4096 -static int prepare_dtb_domU(struct domain *d, struct kernel_info *kinfo) +static int prepare_dtb_domU(struct domain *d, struct kernel_info *kinfo, + bool vpl011) { int addrcells, sizecells; int ret; @@ -1585,6 +1636,15 @@ static int prepare_dtb_domU(struct domain *d, struct kernel_info *kinfo) if ( ret ) goto err; +#ifdef CONFIG_SBSA_VUART_CONSOLE + if ( vpl011 ) + { + ret = make_vpl011_uart_node(d, kinfo->fdt, addrcells, sizecells); + if ( ret ) + goto err; + } +#endif + ret = fdt_end_node(kinfo->fdt); if ( ret < 0 ) goto err; @@ -2467,7 +2527,7 @@ static int __init construct_domU(struct domain *d, struct dt_device_node *node) d->arch.type = kinfo.type; allocate_memory(d, &kinfo); - rc = prepare_dtb_domU(d, &kinfo); + rc = prepare_dtb_domU(d, &kinfo, vpl011); if ( rc < 0 ) return rc; -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |