[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 18/23] xen/arm: vsmmuv3: Add support to send stage-1 event to guest


  • To: Milan Djokic <milan_djokic@xxxxxxxx>
  • From: Luca Fancellu <Luca.Fancellu@xxxxxxx>
  • Date: Mon, 13 Apr 2026 14:15:08 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=2; mx.microsoft.com 1; spf=pass (sender ip is 4.158.2.129) smtp.rcpttodomain=epam.com smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=pass (signature was verified) header.d=arm.com; arc=pass (0 oda=1 ltdi=1 spf=[1,1,smtp.mailfrom=arm.com] dkim=[1,1,header.d=arm.com] dmarc=[1,1,header.from=arm.com])
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=2; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=QZLZQdTkKwWkK4TbcgQ8zuEaVJVDCFbrEW8fwYaWNY0=; b=lCK0PdwyJQq9rYnw6T907Tg7/t/grsP2/tRyCPJScwCr36n/brzB2zlx4djb4vzEbLz6cbJqWyAsvFzhT7NyUaRlzeATGQ0bu59wxibaA41mNcLH9MqdGGaMkkCDciHQ3FWnLpyCSOf+6seAnh3ClmP5AibJVf5v+7V3lQk2pXw034nOy2WXBTKUO32r1VQANhw+DrZ5ueH/PvpwexBXjw9MYbW96sIXJ3DClI3ecMPuj+DDMKDFUNwnL08PTuw2nKaHNqXWfmwXFQRjunCBPGQBvg0Z/qn+I6P+8wYFoOuON6F2QpUr8Hdu1PZ1gm3wlyf3DjXM4lDdkETHjtBemg==
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=QZLZQdTkKwWkK4TbcgQ8zuEaVJVDCFbrEW8fwYaWNY0=; b=AWpZ2tpdUV/c73l4px/O/z+HINgC2VIBGz0HBSZ/SXaaq4/zzO5ZAnM6/Oa7v4g0V6ZI64N3x5FXte4VNgszsZd2Omqo1SN8In1V5HVsK/055yBTXJkf3AKYs54WXuFziWI68NYfASUc31p/THO9sQBkgtrYdzSz9iCRfggSMypQLe+jCxC+oZcys15/6jzkcHrQg/su3qWVon+ogSF2WZ1ERKusuc6ztzjrvTLhAk4zY3xH5E5FIu185TipTjHiVsUeCDNVAf/DprrBxmi47Wpc+FoRl2y2YFEZMOaaW5Ef1vRtsiY64nLGUmn7w9lFp9vOrsYe2zHpUrfk8wthdA==
  • Arc-seal: i=2; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=pass; b=VeggpSkADjoJX9UgixV1BSOLxoVxpB7xcR6tKCfTvWqt6QqJiomxBz90XTk/7Soa0LA+J3vp0DnwQBuXVD5aJE8V05rJsnjt+WTcBnWyTBuZQHSeuTRxUt/Jt67mg40stdOcr/gZAGWwDlfm+k1N0/wasR0QmIJ/m67Q9b4/PmOFUGwBl/ep8IPg+0zEj13zqA516sDmgGQWxMp1NWXeXKr1lp9SD0ME9ImSz4PSnIWo6FcPQQ0e+wQhGc2RJJZVInd5QDGI0uFFehVsqhgd7CLwJmmf0PIrdO0Ro7yNVlawoBj7fkY2dfHIqur4eQwcG9mXEMJaKI1CC31LNH4AAA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=WrNcMBrxCTjIHd+u+51j78ue4sI9+RIU4k53WwB69ektqSJdaHZAk9Nq2nSZTlNQ+glG/iGGS6Uv5NYZ98ADdRo+Op2D3X0AE6vTKrNGMosfq4R+iPkhpfddN5aWwZ/yruyYoirgCCqgBtCiiBJ+rioN9gqRXz6G/j6OjQyXASCZkj6bT+WV5lnzMj7EZT5j/SqrE8E0ZqLHagm/bydd1kQyGMnVyQftSXFXAzC5sfZZ57u30jZOeasieUBkxpk6lDUGP/jiqLofp8mvSBU8aI2NWA9Riw/h4l8AednsqRMTXRGg/HuDVZ4rOEkJW0+z/BVwts4nzx4bTIXIAY6B/w==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Rahul Singh <Rahul.Singh@xxxxxxx>, Bertrand Marquis <Bertrand.Marquis@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Mon, 13 Apr 2026 14:16:24 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Thread-index: AQHcy0/uQpbP6nU2r020PGJaL0V6Mg==
  • Thread-topic: [PATCH v3 18/23] xen/arm: vsmmuv3: Add support to send stage-1 event to guest

HI Milan,

> On 31 Mar 2026, at 02:52, Milan Djokic <milan_djokic@xxxxxxxx> wrote:
> 
> From: Rahul Singh <rahul.singh@xxxxxxx>
> 
> Stage-1 translation is handled by guest, therefore stage-1 fault has to
> be forwarded to guest.
> 
> Signed-off-by: Rahul Singh <rahul.singh@xxxxxxx>
> Signed-off-by: Milan Djokic <milan_djokic@xxxxxxxx>
> ---
> xen/drivers/passthrough/arm/smmu-v3.c  | 48 ++++++++++++++++++++++++--
> xen/drivers/passthrough/arm/vsmmu-v3.c | 45 ++++++++++++++++++++++++
> xen/drivers/passthrough/arm/vsmmu-v3.h | 12 +++++++
> 3 files changed, 103 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c 
> b/xen/drivers/passthrough/arm/smmu-v3.c
> index cf8f638a49..4c1951d753 100644
> --- a/xen/drivers/passthrough/arm/smmu-v3.c
> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
> @@ -869,7 +869,6 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device 
> *smmu, u32 sid)
> return 0;
> }
> 
> -__maybe_unused
> static struct arm_smmu_master *
> arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
> {
> @@ -890,10 +889,51 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 
> sid)
> return NULL;
> }
> 
> +static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
> +{
> + int ret;
> + struct arm_smmu_master *master;
> + u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
> +
> + switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
> + case EVT_ID_TRANSLATION_FAULT:
> + break;
> + case EVT_ID_ADDR_SIZE_FAULT:
> + break;
> + case EVT_ID_ACCESS_FAULT:
> + break;
> + case EVT_ID_PERMISSION_FAULT:
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
> +
> + /* Stage-2 event */
> + if (evt[1] & EVTQ_1_S2)
> + return -EFAULT;
> +
> + mutex_lock(&smmu->streams_mutex);
> + master = arm_smmu_find_master(smmu, sid);
> + if (!master) {
> + ret = -EINVAL;
> + goto out_unlock;
> + }
> +
> + ret = arm_vsmmu_handle_evt(master->domain->d, smmu->dev, evt);
> + if (ret) {
> + ret = -EINVAL;
> + goto out_unlock;
> + }
> +
> +out_unlock:
> + mutex_unlock(&smmu->streams_mutex);
> + return ret;
> +}
> +
> /* IRQ and event handlers */
> static void arm_smmu_evtq_tasklet(void *dev)
> {
> - int i;
> + int i, ret;
> struct arm_smmu_device *smmu = dev;
> struct arm_smmu_queue *q = &smmu->evtq.q;
> struct arm_smmu_ll_queue *llq = &q->llq;
> @@ -903,6 +943,10 @@ static void arm_smmu_evtq_tasklet(void *dev)
> while (!queue_remove_raw(q, evt)) {
> u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
> 
> + ret = arm_smmu_handle_evt(smmu, evt);
> + if (!ret)
> + continue;
> +
> dev_info(smmu->dev, "event 0x%02x received:\n", id);
> for (i = 0; i < ARRAY_SIZE(evt); ++i)
> dev_info(smmu->dev, "\t0x%016llx\n",
> diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.c 
> b/xen/drivers/passthrough/arm/vsmmu-v3.c
> index a5b9700369..5d0dabd2b2 100644
> --- a/xen/drivers/passthrough/arm/vsmmu-v3.c
> +++ b/xen/drivers/passthrough/arm/vsmmu-v3.c
> @@ -103,6 +103,7 @@ struct arm_vsmmu_queue {
> struct virt_smmu {
>     struct      domain *d;
>     struct      list_head viommu_list;
> +    paddr_t     addr;
>     uint8_t     sid_split;
>     uint32_t    features;
>     uint32_t    cr[3];
> @@ -237,6 +238,49 @@ void arm_vsmmu_send_event(struct virt_smmu *smmu,
>     return;
> }
> 
> +static struct virt_smmu *vsmmuv3_find_by_addr(struct domain *d, paddr_t 
> paddr)
> +{
> +    struct virt_smmu *smmu;
> +
> +    list_for_each_entry( smmu, &d->arch.viommu_list, viommu_list )
> +    {
> +        if ( smmu->addr == paddr )
> +            return smmu;
> +    }
> +
> +    return NULL;
> +}
> +
> +int arm_vsmmu_handle_evt(struct domain *d, struct device *dev, uint64_t *evt)
> +{
> +    int ret;
> +    struct virt_smmu *smmu;
> +
> +    if ( is_hardware_domain(d) )
> +    {
> +        paddr_t paddr;
> +        /* Base address */
> +        ret = dt_device_get_address(dev_to_dt(dev), 0, &paddr, NULL);
> +        if ( ret )
> +            return -EINVAL;
> +
> +        smmu = vsmmuv3_find_by_addr(d, paddr);
> +        if ( !smmu )
> +            return -ENODEV;
> +    }
> +    else
> +    {
> +        smmu = list_entry(d->arch.viommu_list.next,
> +                          struct virt_smmu, viommu_list);
> +    }
> +
> +    ret = arm_vsmmu_write_evtq(smmu, evt);
> +    if ( ret )
> +        arm_vsmmu_inject_irq(smmu, true, GERROR_EVTQ_ABT_ERR);
> +
> +    return 0;
> +}
> +
> static int arm_vsmmu_find_ste(struct virt_smmu *smmu, uint32_t sid,
>                               uint64_t *ste)
> {
> @@ -742,6 +786,7 @@ static int vsmmuv3_init_single(struct domain *d, paddr_t 
> addr,
> 
>     smmu->d = d;
>     smmu->virq = virq;
> +    smmu->addr = addr;
>     smmu->cmdq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_CMDQS);
>     smmu->cmdq.ent_size = CMDQ_ENT_DWORDS * DWORDS_BYTES;
>     smmu->evtq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_EVTQS);
> diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.h 
> b/xen/drivers/passthrough/arm/vsmmu-v3.h
> index e11f85b431..c7bfd3fb59 100644
> --- a/xen/drivers/passthrough/arm/vsmmu-v3.h
> +++ b/xen/drivers/passthrough/arm/vsmmu-v3.h
> @@ -8,6 +8,12 @@
> 
> void vsmmuv3_set_type(void);
> 
> +static inline int arm_vsmmu_handle_evt(struct domain *d,
> +                                       struct device *dev, uint64_t *evt)
> +{
> +    return -EINVAL;
> +}

Shouldn’t this be the prototype of arm_vsmmu_handle_evt() instead of a stub 
implementation?

> +
> #else
> 
> static inline void vsmmuv3_set_type(void)
> @@ -15,6 +21,12 @@ static inline void vsmmuv3_set_type(void)
>     return;
> }
> 
> +static inline int arm_vsmmu_handle_evt(struct domain *d,
> +                                       struct device *dev, uint64_t *evt)
> +{
> +    return -EINVAL;
> +}
> +
> #endif /* CONFIG_VIRTUAL_ARM_SMMU_V3 */
> 
> #endif /* __ARCH_ARM_VSMMU_V3_H__ */
> 

Cheers,
Luca


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.