[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 12/23] xen/arm: vsmmuv3: Add support for event queue and global error


  • To: Milan Djokic <milan_djokic@xxxxxxxx>
  • From: Luca Fancellu <Luca.Fancellu@xxxxxxx>
  • Date: Mon, 13 Apr 2026 11:06:33 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=2; mx.microsoft.com 1; spf=pass (sender ip is 4.158.2.129) smtp.rcpttodomain=epam.com smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=pass (signature was verified) header.d=arm.com; arc=pass (0 oda=1 ltdi=1 spf=[1,1,smtp.mailfrom=arm.com] dkim=[1,1,header.d=arm.com] dmarc=[1,1,header.from=arm.com])
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=2; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=PKm9WY0FDXfeIXUUjwNFNFIYohAbgJ+9fFMaoRS1vww=; b=MO4qNy7wJXk87VVBuhmzOtrm3mT7i66V+kHw+nJDOFEyDIivNEnBCkOnml++Nhls/8I1Myu11fqPyRcKIsMGxf5nrdBowK1xdPszU8oQsH/j1xdYJEbbkH5MFefhrTy71p17eS3U7EldTxW2TA5q2AW5n2nUgGwnrSxbDCfz7L0JsjUYPsZMjYt7I8j0fs5jiOpJBzsp8liMDDBYW4OSotm7pmPwU6ogqVhLM1+PKOoFjfX2KtPe68PUKDWowJloLDyfNy4EMkEYJx8AEF+BbxbQk3wKfuuDxLmQJQclfdElLAIrr/2VQhVCmwsapZwCY3uSdgpTtpQImOaDZ3DxCQ==
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=PKm9WY0FDXfeIXUUjwNFNFIYohAbgJ+9fFMaoRS1vww=; b=ZsRj6vASN0FxKSG0IsNS9XdtDc33OytSVqWAcmnLcYOOI/iZt4d2j3fmEAoYDqZyntWLQcrluGkVDzbLZIF6sT67/Yg8+hsxzN7R/NItnESJRutAd04vsBqF8NK7EVULWDK72JC+SEuaZPAcV31mJGgHiB7JqPpVqFyszuY3Nuf5Oed2+HDe/HCVzk/fFJP7FXb0GXx1E1PeMFTIY6E90FsQrYaMHlUkq020KndigNdW1tR9TUyslwl20RPyfjHxRY5B7xa9Fc+5gZu98rZ6qZZ7ycLDo+C6gIb1rIE1yQ1kJnviquMNajIGDRoaxImeIq9fc6ohDMN8gNO9n5yQXg==
  • Arc-seal: i=2; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=pass; b=ysuCD9PJCly+U3YTbYpjP4pVaZ3CAdwWz0h1XSMMj2mBfbfnpDMo3ulJl9FfRwnxVx+hn8rTY1Rz6DVH5TqoJdI43DHUVoVN2tfXwerUwZC0F2Hg1t71P2Rt/pV/pBKfgUFrKF70dVeUyYoq9W9w8ONzFR4rcGdOdCbSLa5Kuiykin1heZO4px9m3j0iFEygPDjV5JGLe8RlkCL6D/Y3BYLyL8rRYsRFfPTG0bq2h4JD7KC9aB1a183q1gs/ALkJAqborhqSWLg54KzdCXYf4HKesdCiXj/aR8s4jR8m+vV5k90ztJfsRUWabOmLgtcL1V5aWxUXci5QsZhdlYZlyA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=V5LhpyeoEw3rzihf9K+w3mQXrP3ZN17AkanExxjQouaoLJI1BjcSJBB0tcYqx9D/AF4Z1nvxMdeuxtIkQC22INhN80Iak4F1P7Do74EeAGdNMK7vQuJgrqNj28ukyBL5RQBorF+Tw2NOY0sSmZ16HOm6Q8VvLGDg3dw7Morw6dP1wvzJuQk0nYSyqGT0ILFQLesCpj/gKHvcyff7bN/p9ium7v/VEvvr8Q0kXPT/qqyOW+8rQIZJZZyvC9cP20iH0zp8Hb+MObuOjBSFeaFPncOL3alj9c7WoW7GwQvsd6PPkeAJ2AWyi58v7i+YwEcAM8f2wghBXYpYLoDFP5H4vg==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Rahul Singh <Rahul.Singh@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Bertrand Marquis <Bertrand.Marquis@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Mon, 13 Apr 2026 11:07:57 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Thread-index: AQHcyzWWF4JjVjjX1ESgGXK5vaYBUA==
  • Thread-topic: [PATCH v3 12/23] xen/arm: vsmmuv3: Add support for event queue and global error

Hi Milan,

> diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.c 
> b/xen/drivers/passthrough/arm/vsmmu-v3.c
> index 6d3636b18b..7a6c18df53 100644
> --- a/xen/drivers/passthrough/arm/vsmmu-v3.c
> +++ b/xen/drivers/passthrough/arm/vsmmu-v3.c
> @@ -44,6 +44,7 @@ extern const struct viommu_desc __read_mostly *cur_viommu;
> 
> /* Helper Macros */
> #define smmu_get_cmdq_enabled(x)    FIELD_GET(CR0_CMDQEN, x)
> +#define smmu_get_evtq_enabled(x)    FIELD_GET(CR0_EVTQEN, x)
> #define smmu_cmd_get_command(x)     FIELD_GET(CMDQ_0_OP, x)
> #define smmu_cmd_get_sid(x)         FIELD_GET(CMDQ_PREFETCH_0_SID, x)
> #define smmu_get_ste_s1cdmax(x)     FIELD_GET(STRTAB_STE_0_S1CDMAX, x)
> @@ -52,6 +53,35 @@ extern const struct viommu_desc __read_mostly *cur_viommu;
> #define smmu_get_ste_s1ctxptr(x)    FIELD_PREP(STRTAB_STE_0_S1CTXPTR_MASK, \
>                                     FIELD_GET(STRTAB_STE_0_S1CTXPTR_MASK, x))
> 
> +/* event queue entry */
> +struct arm_smmu_evtq_ent {
> +    /* Common fields */
> +    uint8_t     opcode;
> +    uint32_t    sid;
> +
> +    /* Event-specific fields */
> +    union {
> +        struct {
> +            uint32_t ssid;
> +            bool ssv;
> +        } c_bad_ste_streamid;
> +
> +        struct {
> +            bool stall;
> +            uint16_t stag;
> +            uint32_t ssid;
> +            bool ssv;
> +            bool s2;
> +            uint64_t addr;
> +            bool rnw;
> +            bool pnu;
> +            bool ind;
> +            uint8_t class;
> +            uint64_t addr2;
> +        } f_translation;
> +    };
> +};
> +
> /* stage-1 translation configuration */
> struct arm_vsmmu_s1_trans_cfg {
>     paddr_t s1ctxptr;
> @@ -82,6 +112,7 @@ struct virt_smmu {
>     uint32_t    strtab_base_cfg;
>     uint64_t    strtab_base;
>     uint32_t    irq_ctrl;
> +    uint32_t    virq;
>     uint64_t    gerror_irq_cfg0;
>     uint64_t    evtq_irq_cfg0;
>     struct      arm_vsmmu_queue evtq, cmdq;
> @@ -89,6 +120,12 @@ struct virt_smmu {
> };
> 
> /* Queue manipulation functions */
> +static bool queue_full(struct arm_vsmmu_queue *q)
> +{
> +    return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
> +           Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
> +}
> +
> static bool queue_empty(struct arm_vsmmu_queue *q)
> {
>     return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
> @@ -101,11 +138,105 @@ static void queue_inc_cons(struct arm_vsmmu_queue *q)
>     q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
> }
> 
> +static void queue_inc_prod(struct arm_vsmmu_queue *q)
> +{
> +    u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
> +    q->prod = Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
> +}
> +
> static void dump_smmu_command(uint64_t *command)
> {
>     gdprintk(XENLOG_ERR, "cmd 0x%02llx: %016lx %016lx\n",
>              smmu_cmd_get_command(command[0]), command[0], command[1]);
> }
> +
> +static void arm_vsmmu_inject_irq(struct virt_smmu *smmu, bool is_gerror,
> +                                uint32_t gerror_err)
> +{
> +    uint32_t new_gerrors, pending;
> +
> +    if ( is_gerror )
> +    {
> +        /* trigger global error irq to guest */
> +        pending = smmu->gerror ^ smmu->gerrorn;
> +        new_gerrors = ~pending & gerror_err;
> +
> +        /* only toggle non pending errors */
> +        if (!new_gerrors)

NIT: codestyle, spaces inside parentheses 

> +            return;
> +
> +        smmu->gerror ^= new_gerrors;
> +    }
> +
> +    vgic_inject_irq(smmu->d, NULL, smmu->virq, true);

I don’t understand this, shouldn’t we have the irq level high or low depending 
on
event queue or global error pending? Also this doesn’t take into consideration
irq enabled (IRQ_CTRL_EVTQ_IRQEN / IRQ_CTRL_GERROR_IRQEN)

> +}
> +
> +static int arm_vsmmu_write_evtq(struct virt_smmu *smmu, uint64_t *evt)
> +{
> +    struct arm_vsmmu_queue *q = &smmu->evtq;
> +    struct domain *d = smmu->d;
> +    paddr_t addr;
> +    int ret;
> +
> +    if ( !smmu_get_evtq_enabled(smmu->cr[0]) )
> +        return -EINVAL;
> +
> +    if ( queue_full(q) )
> +        return -EINVAL;
> +
> +    addr = Q_PROD_ENT(q);
> +    ret = access_guest_memory_by_gpa(d, addr, evt,
> +                                     sizeof(*evt) * EVTQ_ENT_DWORDS, true);
> +    if ( ret )
> +        return ret;
> +
> +    queue_inc_prod(q);
> +
> +    /* trigger eventq irq to guest */
> +    if ( !queue_empty(q) )
> +        arm_vsmmu_inject_irq(smmu, false, 0);
> +
> +    return 0;
> +}
> +
> +void arm_vsmmu_send_event(struct virt_smmu *smmu,
> +                          struct arm_smmu_evtq_ent *ent)
> +{
> +    uint64_t evt[EVTQ_ENT_DWORDS];
> +    int ret;
> +
> +    memset(evt, 0, 1 << EVTQ_ENT_SZ_SHIFT);
> +
> +    if ( !smmu_get_evtq_enabled(smmu->cr[0]) )
> +        return;
> +
> +    evt[0] |= FIELD_PREP(EVTQ_0_ID, ent->opcode);
> +    evt[0] |= FIELD_PREP(EVTQ_0_SID, ent->sid);
> +
> +    switch (ent->opcode)

NIT: codestyle, spaces inside parentheses 

> +    {
> +    case EVT_ID_BAD_STREAMID:
> +    case EVT_ID_BAD_STE:
> +        evt[0] |= FIELD_PREP(EVTQ_0_SSID, ent->c_bad_ste_streamid.ssid);
> +        evt[0] |= FIELD_PREP(EVTQ_0_SSV, ent->c_bad_ste_streamid.ssv);
> +        break;
> +    case EVT_ID_TRANSLATION_FAULT:
> +    case EVT_ID_ADDR_SIZE_FAULT:
> +    case EVT_ID_ACCESS_FAULT:
> +    case EVT_ID_PERMISSION_FAULT:
> +        break;
> +    default:
> +        gdprintk(XENLOG_WARNING, "vSMMUv3: event opcode is bad\n");
> +        break;
> +    }
> +
> +    ret = arm_vsmmu_write_evtq(smmu, evt);
> +    if ( ret )
> +        arm_vsmmu_inject_irq(smmu, true, GERROR_EVTQ_ABT_ERR);
> +
> +    return;
> +}
> +
> static int arm_vsmmu_find_ste(struct virt_smmu *smmu, uint32_t sid,
>                               uint64_t *ste)
> {
> @@ -114,11 +245,22 @@ static int arm_vsmmu_find_ste(struct virt_smmu *smmu, 
> uint32_t sid,
>     uint32_t log2size;
>     int strtab_size_shift;
>     int ret;
> +    struct arm_smmu_evtq_ent ent = {
> +        .sid = sid,
> +        .c_bad_ste_streamid = {
> +            .ssid = 0,
> +            .ssv = false,
> +        },
> +    };
> 
>     log2size = FIELD_GET(STRTAB_BASE_CFG_LOG2SIZE, smmu->strtab_base_cfg);
> 
>     if ( sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE)) )
> +    {
> +        ent.opcode = EVT_ID_BAD_STE;
> +        arm_vsmmu_send_event(smmu, &ent);
>         return -EINVAL;
> +    }
> 
>     if ( smmu->features & STRTAB_BASE_CFG_FMT_2LVL )
>     {
> @@ -156,6 +298,8 @@ static int arm_vsmmu_find_ste(struct virt_smmu *smmu, 
> uint32_t sid,
>         {
>             gdprintk(XENLOG_ERR, "idx=%d > max_l2_ste=%d\n",
>                      idx, max_l2_ste);
> +            ent.opcode = EVT_ID_BAD_STREAMID;
> +            arm_vsmmu_send_event(smmu, &ent);
>             return -EINVAL;
>         }
>         addr = l2ptr + idx * sizeof(*ste) * STRTAB_STE_DWORDS;
> @@ -183,6 +327,14 @@ static int arm_vsmmu_decode_ste(struct virt_smmu *smmu, 
> uint32_t sid,
>                                 uint64_t *ste)
> {
>     uint64_t val = ste[0];
> +    struct arm_smmu_evtq_ent ent = {
> +        .opcode = EVT_ID_BAD_STE,
> +        .sid = sid,
> +        .c_bad_ste_streamid = {
> +            .ssid = 0,
> +            .ssv = false,
> +        },
> +    };
> 
>     if ( !(val & STRTAB_STE_0_V) )
>         return -EAGAIN;
> @@ -217,6 +369,7 @@ static int arm_vsmmu_decode_ste(struct virt_smmu *smmu, 
> uint32_t sid,
>     return 0;
> 
> bad_ste:
> +    arm_vsmmu_send_event(smmu, &ent);
>     return -EINVAL;
> }
> 
> @@ -577,7 +730,8 @@ static const struct mmio_handler_ops vsmmuv3_mmio_handler 
> = {
>     .write = vsmmuv3_mmio_write,
> };
> 
> -static int vsmmuv3_init_single(struct domain *d, paddr_t addr, paddr_t size)
> +static int vsmmuv3_init_single(struct domain *d, paddr_t addr,
> +                               paddr_t size, uint32_t virq)
> {
>     struct virt_smmu *smmu;
> 
> @@ -586,6 +740,7 @@ static int vsmmuv3_init_single(struct domain *d, paddr_t 
> addr, paddr_t size)

We are now injecting irqs due to this patch, we should vgic_reserve_virq() it 
as we do in patch
“xen/arm: vsmmuv3: Alloc virq for virtual SMMUv3”, so maybe better to move 
these changes here.

>         return -ENOMEM;
> 
>     smmu->d = d;
> +    smmu->virq = virq;
>     smmu->cmdq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_CMDQS);
>     smmu->cmdq.ent_size = CMDQ_ENT_DWORDS * DWORDS_BYTES;
>     smmu->evtq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_EVTQS);
> @@ -612,14 +767,16 @@ int domain_vsmmuv3_init(struct domain *d)
> 
>         list_for_each_entry(hw_iommu, &host_iommu_list, entry)
>         {
> -            ret = vsmmuv3_init_single(d, hw_iommu->addr, hw_iommu->size);
> +            ret = vsmmuv3_init_single(d, hw_iommu->addr, hw_iommu->size,
> +                                      hw_iommu->irq);
>             if ( ret )
>                 return ret;
>         }
>     }
>     else
>     {
> -        ret = vsmmuv3_init_single(d, GUEST_VSMMUV3_BASE, GUEST_VSMMUV3_SIZE);
> +        ret = vsmmuv3_init_single(d, GUEST_VSMMUV3_BASE, GUEST_VSMMUV3_SIZE,
> +                                  GUEST_VSMMU_SPI);
>         if ( ret )
>             return ret;
>     }
> diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
> index ebac02ed63..1b606e20fd 100644
> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -527,9 +527,10 @@ typedef uint64_t xen_callback_t;
> #define GUEST_EVTCHN_PPI        31
> 
> #define GUEST_VPL011_SPI        32
> +#define GUEST_VSMMU_SPI         33

This change alone breaks already built toolstack, can we just
have GUEST_VSMMU_SPI as 44 and leave GUEST_VIRTIO_MMIO_SPI_FIRST and
GUEST_VIRTIO_MMIO_SPI_LAST as they are?

> 
> -#define GUEST_VIRTIO_MMIO_SPI_FIRST   33
> -#define GUEST_VIRTIO_MMIO_SPI_LAST    43
> +#define GUEST_VIRTIO_MMIO_SPI_FIRST   34
> +#define GUEST_VIRTIO_MMIO_SPI_LAST    44
> 
> /*
>  * SGI is the preferred delivery mechanism of FF-A pending notifications or
> 

Cheers,
Luca


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.