[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen/riscv: implementation of aplic and imsic operations
commit d4676a1398bc50717e1ef1f39da95b885b404c3e Author: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx> AuthorDate: Thu Jul 10 13:39:53 2025 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu Jul 10 13:39:53 2025 +0200 xen/riscv: implementation of aplic and imsic operations Introduce interrupt controller descriptor for host APLIC to describe the low-lovel hardare. It includes implementation of the following functions: - aplic_irq_startup() - aplic_irq_enable() - aplic_irq_disable() - aplic_set_irq_affinity() As APLIC is used in MSI mode it requires to enable/disable interrupts not only for APLIC but also for IMSIC. Thereby for the purpose of aplic_irq_{enable,disable}() it is introduced imsic_irq_{enable,disable)(). For the purpose of aplic_set_irq_affinity() aplic_get_cpu_from_mask() is introduced to get hart id. Also, introduce additional interrupt controller h/w operations and host_irq_type for APLIC: - aplic_host_irq_type Patch is based on the code from [1]. [1] https://gitlab.com/xen-project/people/olkur/xen/-/commit/7390e2365828b83e27ead56b03114a56e3699dd5 Co-developed-by: Romain Caritey <Romain.Caritey@xxxxxxxxxxxxx> Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/riscv/aplic-priv.h | 4 ++ xen/arch/riscv/aplic.c | 123 ++++++++++++++++++++++++++++++++++++- xen/arch/riscv/imsic.c | 122 +++++++++++++++++++++++++++++++++++- xen/arch/riscv/include/asm/aplic.h | 2 + xen/arch/riscv/include/asm/imsic.h | 18 ++++++ 5 files changed, 267 insertions(+), 2 deletions(-) diff --git a/xen/arch/riscv/aplic-priv.h b/xen/arch/riscv/aplic-priv.h index 551747a8b4..85e0d028d1 100644 --- a/xen/arch/riscv/aplic-priv.h +++ b/xen/arch/riscv/aplic-priv.h @@ -14,6 +14,7 @@ #ifndef ASM_RISCV_APLIC_PRIV_H #define ASM_RISCV_APLIC_PRIV_H +#include <xen/spinlock.h> #include <xen/types.h> #include <asm/aplic.h> @@ -26,6 +27,9 @@ struct aplic_priv { /* Registers */ volatile struct aplic_regs __iomem *regs; + /* Lock to protect access to APLIC's registers */ + spinlock_t lock; + /* IMSIC configuration */ const struct imsic_config *imsic_cfg; }; diff --git a/xen/arch/riscv/aplic.c b/xen/arch/riscv/aplic.c index 0b0101ebc1..edf4db3113 100644 --- a/xen/arch/riscv/aplic.c +++ b/xen/arch/riscv/aplic.c @@ -15,6 +15,7 @@ #include <xen/irq.h> #include <xen/mm.h> #include <xen/sections.h> +#include <xen/spinlock.h> #include <xen/types.h> #include <xen/vmap.h> @@ -23,11 +24,14 @@ #include <asm/device.h> #include <asm/imsic.h> #include <asm/intc.h> +#include <asm/io.h> #include <asm/riscv_encoding.h> #define APLIC_DEFAULT_PRIORITY 1 -static struct aplic_priv aplic; +static struct aplic_priv aplic = { + .lock = SPIN_LOCK_UNLOCKED, +}; static struct intc_info __ro_after_init aplic_info = { .hw_version = INTC_APLIC, @@ -116,9 +120,126 @@ static int __init cf_check aplic_init(void) return 0; } +static void cf_check aplic_irq_enable(struct irq_desc *desc) +{ + /* + * TODO: Currently, APLIC is supported only with MSI interrupts. + * If APLIC without MSI interrupts is required in the future, + * this function will need to be updated accordingly. + */ + ASSERT(readl(&aplic.regs->domaincfg) & APLIC_DOMAINCFG_DM); + + ASSERT(spin_is_locked(&desc->lock)); + + spin_lock(&aplic.lock); + + /* Enable interrupt in IMSIC */ + imsic_irq_enable(desc->irq); + + /* Enable interrupt in APLIC */ + writel(desc->irq, &aplic.regs->setienum); + + spin_unlock(&aplic.lock); +} + +static void cf_check aplic_irq_disable(struct irq_desc *desc) +{ + /* + * TODO: Currently, APLIC is supported only with MSI interrupts. + * If APLIC without MSI interrupts is required in the future, + * this function will need to be updated accordingly. + */ + ASSERT(readl(&aplic.regs->domaincfg) & APLIC_DOMAINCFG_DM); + + ASSERT(spin_is_locked(&desc->lock)); + + spin_lock(&aplic.lock); + + /* Disable interrupt in APLIC */ + writel(desc->irq, &aplic.regs->clrienum); + + /* Disable interrupt in IMSIC */ + imsic_irq_disable(desc->irq); + + spin_unlock(&aplic.lock); +} + +static unsigned int cf_check aplic_irq_startup(struct irq_desc *desc) +{ + aplic_irq_enable(desc); + + return 0; +} + +static unsigned int aplic_get_cpu_from_mask(const cpumask_t *cpumask) +{ + cpumask_t mask; + + cpumask_and(&mask, cpumask, &cpu_online_map); + + return cpumask_any(&mask); +} + +static void cf_check aplic_set_irq_affinity(struct irq_desc *desc, const cpumask_t *mask) +{ + unsigned int cpu; + uint64_t group_index, base_ppn; + uint32_t hhxw, lhxw, hhxs, value; + const struct imsic_config *imsic = aplic.imsic_cfg; + + /* + * TODO: Currently, APLIC is supported only with MSI interrupts. + * If APLIC without MSI interrupts is required in the future, + * this function will need to be updated accordingly. + */ + ASSERT(readl(&aplic.regs->domaincfg) & APLIC_DOMAINCFG_DM); + + ASSERT(!cpumask_empty(mask)); + + ASSERT(spin_is_locked(&desc->lock)); + + cpu = cpuid_to_hartid(aplic_get_cpu_from_mask(mask)); + hhxw = imsic->group_index_bits; + lhxw = imsic->hart_index_bits; + /* + * Although this variable is used only once in the calculation of + * group_index, and it might seem that hhxs could be defined as: + * hhxs = imsic->group_index_shift - IMSIC_MMIO_PAGE_SHIFT; + * and then the addition of IMSIC_MMIO_PAGE_SHIFT could be omitted + * when calculating the group index. + * It was done intentionally this way to follow the formula from + * the AIA specification for calculating the MSI address. + */ + hhxs = imsic->group_index_shift - IMSIC_MMIO_PAGE_SHIFT * 2; + base_ppn = imsic->msi[cpu].base_addr >> IMSIC_MMIO_PAGE_SHIFT; + + /* Update hart and EEID in the target register */ + group_index = (base_ppn >> (hhxs + IMSIC_MMIO_PAGE_SHIFT)) & + (BIT(hhxw, UL) - 1); + value = desc->irq; + value |= cpu << APLIC_TARGET_HART_IDX_SHIFT; + value |= group_index << (lhxw + APLIC_TARGET_HART_IDX_SHIFT); + + spin_lock(&aplic.lock); + + writel(value, &aplic.regs->target[desc->irq - 1]); + + spin_unlock(&aplic.lock); +} + +static const hw_irq_controller aplic_xen_irq_type = { + .typename = "aplic", + .startup = aplic_irq_startup, + .shutdown = aplic_irq_disable, + .enable = aplic_irq_enable, + .disable = aplic_irq_disable, + .set_affinity = aplic_set_irq_affinity, +}; + static const struct intc_hw_operations aplic_ops = { .info = &aplic_info, .init = aplic_init, + .host_irq_type = &aplic_xen_irq_type, }; static int cf_check aplic_irq_xlate(const uint32_t *intspec, diff --git a/xen/arch/riscv/imsic.c b/xen/arch/riscv/imsic.c index 63f4233035..a4460576f6 100644 --- a/xen/arch/riscv/imsic.c +++ b/xen/arch/riscv/imsic.c @@ -29,7 +29,124 @@ struct imsic_mmios { unsigned long size; }; -static struct imsic_config imsic_cfg; +static struct imsic_config imsic_cfg = { + .lock = SPIN_LOCK_UNLOCKED, +}; + +#define IMSIC_DISABLE_EIDELIVERY 0 +#define IMSIC_ENABLE_EIDELIVERY 1 +#define IMSIC_DISABLE_EITHRESHOLD 1 +#define IMSIC_ENABLE_EITHRESHOLD 0 + +#define imsic_csr_write(c, v) \ +do { \ + csr_write(CSR_SISELECT, c); \ + csr_write(CSR_SIREG, v); \ +} while (0) + +#define imsic_csr_set(c, v) \ +do { \ + csr_write(CSR_SISELECT, c); \ + csr_set(CSR_SIREG, v); \ +} while (0) + +#define imsic_csr_clear(c, v) \ +do { \ + csr_write(CSR_SISELECT, c); \ + csr_clear(CSR_SIREG, v); \ +} while (0) + +void __init imsic_ids_local_delivery(bool enable) +{ + if ( enable ) + { + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD); + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY); + } + else + { + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD); + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY); + } +} + +static void imsic_local_eix_update(unsigned long base_id, unsigned long num_id, + bool pend, bool val) +{ + unsigned long id = base_id, last_id = base_id + num_id; + + while ( id < last_id ) + { + unsigned long isel, ireg; + unsigned long start_id = id & (__riscv_xlen - 1); + unsigned long chunk = __riscv_xlen - start_id; + unsigned long count = min(last_id - id, chunk); + + isel = id / __riscv_xlen; + isel *= __riscv_xlen / IMSIC_EIPx_BITS; + isel += pend ? IMSIC_EIP0 : IMSIC_EIE0; + + ireg = GENMASK(start_id + count - 1, start_id); + + id += count; + + if ( val ) + imsic_csr_set(isel, ireg); + else + imsic_csr_clear(isel, ireg); + } +} + +void imsic_irq_enable(unsigned int irq) +{ + /* + * The only caller of imsic_irq_enable() is aplic_irq_enable(), which + * already runs with IRQs disabled. Therefore, there's no need to use + * spin_lock_irqsave() in this function. + * + * This ASSERT is added as a safeguard: if imsic_irq_enable() is ever + * called from a context where IRQs are not disabled, + * spin_lock_irqsave() should be used instead of spin_lock(). + */ + ASSERT(!local_irq_is_enabled()); + + spin_lock(&imsic_cfg.lock); + /* + * There is no irq - 1 here (look at aplic_set_irq_type()) because: + * From the spec: + * When an interrupt file supports distinct interrupt identities, + * valid identity numbers are between 1 and inclusive. The identity + * numbers within this range are said to be implemented by the interrupt + * file; numbers outside this range are not implemented. The number zero + * is never a valid interrupt identity. + * ... + * Bit positions in a valid eiek register that donâ??t correspond to a + * supported interrupt identity (such as bit 0 of eie0) are read-only zeros. + * + * So in EIx registers interrupt i corresponds to bit i in comparison wiht + * APLIC's sourcecfg which starts from 0. + */ + imsic_local_eix_update(irq, 1, false, true); + spin_unlock(&imsic_cfg.lock); +} + +void imsic_irq_disable(unsigned int irq) +{ + /* + * The only caller of imsic_irq_disable() is aplic_irq_disable(), which + * already runs with IRQs disabled. Therefore, there's no need to use + * spin_lock_irqsave() in this function. + * + * This ASSERT is added as a safeguard: if imsic_irq_disable() is ever + * called from a context where IRQs are not disabled, + * spin_lock_irqsave() should be used instead of spin_lock(). + */ + ASSERT(!local_irq_is_enabled()); + + spin_lock(&imsic_cfg.lock); + imsic_local_eix_update(irq, 1, false, false); + spin_unlock(&imsic_cfg.lock); +} /* Callers aren't intended to changed imsic_cfg so return const. */ const struct imsic_config *imsic_get_config(void) @@ -355,6 +472,9 @@ int __init imsic_init(const struct dt_device_node *node) goto imsic_init_err; } + /* Enable local interrupt delivery */ + imsic_ids_local_delivery(true); + imsic_cfg.msi = msi; xvfree(mmios); diff --git a/xen/arch/riscv/include/asm/aplic.h b/xen/arch/riscv/include/asm/aplic.h index 1388a977e6..7d811d3522 100644 --- a/xen/arch/riscv/include/asm/aplic.h +++ b/xen/arch/riscv/include/asm/aplic.h @@ -18,6 +18,8 @@ #define APLIC_DOMAINCFG_IE BIT(8, U) #define APLIC_DOMAINCFG_DM BIT(2, U) +#define APLIC_TARGET_HART_IDX_SHIFT 18 + struct aplic_regs { uint32_t domaincfg; /* 0x0000 */ uint32_t sourcecfg[1023]; /* 0x0004 */ diff --git a/xen/arch/riscv/include/asm/imsic.h b/xen/arch/riscv/include/asm/imsic.h index 9cd12365b1..378e49d933 100644 --- a/xen/arch/riscv/include/asm/imsic.h +++ b/xen/arch/riscv/include/asm/imsic.h @@ -11,6 +11,7 @@ #ifndef ASM_RISCV_IMSIC_H #define ASM_RISCV_IMSIC_H +#include <xen/spinlock.h> #include <xen/types.h> #define IMSIC_MMIO_PAGE_SHIFT 12 @@ -19,6 +20,15 @@ #define IMSIC_MIN_ID 63 #define IMSIC_MAX_ID 2047 +#define IMSIC_EIDELIVERY 0x70 + +#define IMSIC_EITHRESHOLD 0x72 + +#define IMSIC_EIP0 0x80 +#define IMSIC_EIPx_BITS 32 + +#define IMSIC_EIE0 0xC0 + struct imsic_msi { paddr_t base_addr; unsigned long offset; @@ -45,6 +55,9 @@ struct imsic_config { /* MSI */ const struct imsic_msi *msi; + + /* Lock to protect access to IMSIC's stuff */ + spinlock_t lock; }; struct dt_device_node; @@ -52,4 +65,9 @@ int imsic_init(const struct dt_device_node *node); const struct imsic_config *imsic_get_config(void); +void imsic_irq_enable(unsigned int hwirq); +void imsic_irq_disable(unsigned int hwirq); + +void imsic_ids_local_delivery(bool enable); + #endif /* ASM_RISCV_IMSIC_H */ -- generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |