[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v8 09/13] arm/smmu-v3: add suspend/resume handlers


  • To: Mykola Kvach <xakep.amatop@xxxxxxxxx>
  • From: Luca Fancellu <Luca.Fancellu@xxxxxxx>
  • Date: Mon, 27 Apr 2026 14:02:47 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=2; mx.microsoft.com 1; spf=pass (sender ip is 4.158.2.129) smtp.rcpttodomain=gmail.com smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=pass (signature was verified) header.d=arm.com; arc=pass (0 oda=1 ltdi=1 spf=[1,1,smtp.mailfrom=arm.com] dkim=[1,1,header.d=arm.com] dmarc=[1,1,header.from=arm.com])
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=2; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=zCYR6XQdZ+UWKfkuUQcgAMb8DAMnLPe/FWJmgYaUOcw=; b=qVLDrqeErkNJrQ9y7uSwxuRshzdj68K4HFgmU6rJvdoPzSQfcAe8agmpq1O8ANT1wHyM+4v+PCW224pVj3pLLCG5J3Q/nBSvIM+GDu99FOau5UkL99CURYW98jstEw1iiR4i2axXQUiPUQrDX9XlYDXxZEVRIi31p4mrCYQ/X/NXhuVOhXq0EUjyWlia/awAuNBP88cBh/KKKWeDiKRW6YNtYiiNrozrPkAskOCyzHjdB4ZOUUYGtYKqoTUdN53/tThPc1tQYTqnrrp3rhlqd4JH8uJqyV2EClNIQ53nypLljUnkeEgKQPvvaBPcdosmZuA14Omj9pWDOdNnPhqmeg==
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=zCYR6XQdZ+UWKfkuUQcgAMb8DAMnLPe/FWJmgYaUOcw=; b=xXidDk5YXTu4g05ysTSQtKV8OFJyKt57ko+woXaaP7HPpqHvoO35arJXUqL+f52wrxq+W3KPSrLMeOQZF5/r6v9fUUmdXF+6bi+w8XylXQOq5Pv9khtP0viSJ+CwHp+EuvhZib3bqE7qlzwcvyPSPJhVpOZmr6HW9090m+n6tOGRhlWYZjDr8y4WPFU1tP56nyV95DGpFFxs21rcccpTyHNfuooHC4vtixAxupyjjMTTDRN0urh0FAQIQNUK5MDtF59AEXyTmk9EJnOea40yzA3Kcj7+gAZzEEDbj5VT6z+dExpuDo4WdX4QTA3MQk/4S32tUNTlQhO3jOQX9n/c3Q==
  • Arc-seal: i=2; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=pass; b=T9n4w8k1abBPEDf93FDhNmlGR805H2ZbgJOdcEv7yeRIDicFjqNZzL5yTyuIKPGZr7VOE/C0EdDHHGtHuhyfBhF7A4OBRm/AKqUpXVZdAAdm5jRjEFZu5sIwM/8+KYIwmHTWbV53mupWE2EjdW/O59xV5TXK/cywIu9FvKrQhMWEd5bHp4HH4E0KkDYRVHFBYSyyUZ4ZPfdsBPBZ7QA+CrpEYoOAPjFMsYy0BmxiKI55SR1skeUpNj8Dp46aZJBu/47N016q0AtLBllwpJduC9cfDp265Xh5S6UEROWflCyz8nfaOD/senJjeZ1m4b15jdEuZn5kJyk46LRbTEldBA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=DZmb6oXEpIAJLojyHhWDtIMkno8q8RSs7c5HSWp+P8dNCpUw2j2Waw2oAJzpzxToIPCAa3u6CKLsVb7zVqPerb+iXGFvJHYbuDCpuDY29Uzum9M+Tba2Vh4xmfgsY8YRwbiUatBCPe5ILce8gPy49Rn2nVxX69d8CF+DU4LCWWO/FPJDsoqUsnmd2t6VBHg83KbreYxVOBHfqfyJTAdU7sVIKpfDVvmrPE/a7yYydsFxLXVs3nBrrqIKsArNuIsDfH5AE0vhXSbBEfKcSVh7wLL4KWSc7H69Z+ASN/C3ohTjZj0pcuknnJlSFymrXKa8xtYyykzs4BiPzvA0YdJR/Q==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Mykola Kvach <mykola_kvach@xxxxxxxx>, Bertrand Marquis <Bertrand.Marquis@xxxxxxx>, Rahul Singh <Rahul.Singh@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Mon, 27 Apr 2026 14:04:07 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Thread-index: AQHc1k50BMsvUkAtf0yXvEOre8XAaw==
  • Thread-topic: [PATCH v8 09/13] arm/smmu-v3: add suspend/resume handlers

Hi Mykola,

> 
> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c 
> b/xen/drivers/passthrough/arm/smmu-v3.c
> index bf153227db..7607ffc9ca 100644
> --- a/xen/drivers/passthrough/arm/smmu-v3.c
> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
> @@ -1814,8 +1814,7 @@ static int arm_smmu_write_reg_sync(struct 
> arm_smmu_device *smmu, u32 val,
> }
> 
> /* GBPA is "special" */
> -static int __init arm_smmu_update_gbpa(struct arm_smmu_device *smmu,
> -                                       u32 set, u32 clr)
> +static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 
> clr)
> {
> int ret;
> u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
> @@ -1995,10 +1994,29 @@ err_free_evtq_irq:
> return ret;
> }
> 
> +static int arm_smmu_enable_irqs(struct arm_smmu_device *smmu)
> +{
> + int ret;
> + u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
> +
> + if ( smmu->features & ARM_SMMU_FEAT_PRI )
> + irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
> +
> + /* Enable interrupt generation on the SMMU */
> + ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
> +      ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
> + if ( ret )
> + {
> + dev_warn(smmu->dev, "failed to enable irqs\n");
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> static int __init arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
> {
> int ret, irq;
> - u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
> 
> /* Disable IRQs first */
> ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
> @@ -2028,22 +2046,7 @@ static int __init arm_smmu_setup_irqs(struct 
> arm_smmu_device *smmu)
> }
> }
> 
> - if (smmu->features & ARM_SMMU_FEAT_PRI)
> - irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
> -
> - /* Enable interrupt generation on the SMMU */
> - ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
> -      ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
> - if (ret) {
> - dev_warn(smmu->dev, "failed to enable irqs\n");
> - goto err_free_irqs;
> - }
> -
> return 0;
> -
> -err_free_irqs:
> - arm_smmu_free_irqs(smmu);
> - return ret;
> }
> 
> static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
> @@ -2057,7 +2060,7 @@ static int arm_smmu_device_disable(struct 
> arm_smmu_device *smmu)
> return ret;
> }
> 
> -static int __init arm_smmu_device_reset(struct arm_smmu_device *smmu)
> +static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
> {
> int ret;
> u32 reg, enables;
> @@ -2163,17 +2166,9 @@ static int __init arm_smmu_device_reset(struct 
> arm_smmu_device *smmu)
> }
> }
> 
> - ret = arm_smmu_setup_irqs(smmu);
> - if (ret) {
> - dev_err(smmu->dev, "failed to setup irqs\n");

We are moving this one to the probe and .. 

> + ret = arm_smmu_enable_irqs(smmu);
> + if ( ret )

changing with this one, but arm_smmu_setup_irqs() also calls 
arm_smmu_setup_unique_irqs() which
calls arm_smmu_setup_msis(), are we sure that on resume we will get the same 
state?

> return ret;
> - }
> -
> - /* Initialize tasklets for threaded IRQs*/
> - tasklet_init(&smmu->evtq_irq_tasklet, arm_smmu_evtq_tasklet, smmu);
> - tasklet_init(&smmu->priq_irq_tasklet, arm_smmu_priq_tasklet, smmu);
> - tasklet_init(&smmu->combined_irq_tasklet, arm_smmu_combined_irq_tasklet,
> - smmu);
> 
> /* Enable the SMMU interface, or ensure bypass */
> if (disable_bypass) {
> @@ -2181,20 +2176,16 @@ static int __init arm_smmu_device_reset(struct 
> arm_smmu_device *smmu)
> } else {
> ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
> if (ret)
> - goto err_free_irqs;
> + return ret;
> }
> ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
>     ARM_SMMU_CR0ACK);
> if (ret) {
> dev_err(smmu->dev, "failed to enable SMMU interface\n");
> - goto err_free_irqs;
> + return ret;
> }
> 
> return 0;
> -
> -err_free_irqs:
> - arm_smmu_free_irqs(smmu);
> - return ret;
> }
> 
> static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
> @@ -2558,10 +2549,23 @@ static int __init arm_smmu_device_probe(struct 
> platform_device *pdev)
> if (ret)
> goto out_free;
> 
> + ret = arm_smmu_setup_irqs(smmu);
> + if ( ret )
> + {
> + dev_err(smmu->dev, "failed to setup irqs\n");
> + goto out_free;
> + }
> +
> + /* Initialize tasklets for threaded IRQs*/
> + tasklet_init(&smmu->evtq_irq_tasklet, arm_smmu_evtq_tasklet, smmu);
> + tasklet_init(&smmu->priq_irq_tasklet, arm_smmu_priq_tasklet, smmu);
> + tasklet_init(&smmu->combined_irq_tasklet, arm_smmu_combined_irq_tasklet,
> + smmu);
> +
> /* Reset the device */
> ret = arm_smmu_device_reset(smmu);
> if (ret)
> - goto out_free;
> + goto out_free_irqs;
> 
> /*
> * Keep a list of all probed devices. This will be used to query
> @@ -2575,6 +2579,8 @@ static int __init arm_smmu_device_probe(struct 
> platform_device *pdev)
> 
> return 0;
> 
> +out_free_irqs:
> + arm_smmu_free_irqs(smmu);
> 
> out_free:
> arm_smmu_free_structures(smmu);
> @@ -2855,6 +2861,96 @@ static void arm_smmu_iommu_xen_domain_teardown(struct 
> domain *d)
> xfree(xen_domain);
> }
> 
> +#ifdef CONFIG_SYSTEM_SUSPEND
> +
> +static void arm_smmu_reset_for_suspend_rollback(struct arm_smmu_device *smmu)
> +{
> + int ret = arm_smmu_device_reset(smmu);
> +
> + if ( ret )
> + dev_err(smmu->dev, "Failed to reset during suspend rollback: %d\n",
> + ret);
> +}
> +
> +static int arm_smmu_suspend(void)
> +{
> + struct arm_smmu_device *smmu;
> + int ret = 0;
> +
> + list_for_each_entry(smmu, &arm_smmu_devices, devices)
> + {
> + bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
> +
> + /* Abort all transactions before disable to avoid spurious bypass */
> + ret = arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
> + if ( ret )
> + goto fail;
> +
> + /* Disable the SMMU via CR0.EN and all queues except CMDQ */
> + ret = arm_smmu_write_reg_sync(smmu, CR0_CMDQEN, ARM_SMMU_CR0,
> + ARM_SMMU_CR0ACK);
> + if ( ret )
> + {
> + dev_err(smmu->dev, "Timed-out while disabling smmu\n");
> + goto fail;
> + }
> +
> + /*
> + * At this point the SMMU is completely disabled and won't access
> + * any translation/config structures, even speculative accesses
> + * aren't performed as per the IHI0070 spec (section 6.3.9.6).
> + */
> +
> + /* Wait for the CMDQs to be drained to flush any pending commands */
> + ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
> + if ( ret )
> + {
> + dev_err(smmu->dev, "Draining queues timed-out\n");
> + goto fail;
> + }

polling the queue doesn’t give you the assurance that all prior commands are 
complete,
I would use arm_smmu_cmdq_issue_sync for that instead of the above.

ret = arm_smmu_cmdq_issue_sync(smmu);
if ( ret )
   goto fail;

> +
> + /* Disable everything */
> + ret = arm_smmu_device_disable(smmu);
> + if ( ret )
> + goto fail;
> +
> + dev_dbg(smmu->dev, "Suspended smmu\n");
> + }
> +
> + return 0;
> +
> + fail:
> + /* Reset the device that failed as well as any already-suspended ones. */
> + arm_smmu_reset_for_suspend_rollback(smmu);
> +
> + list_for_each_entry_continue_reverse(smmu, &arm_smmu_devices, devices)
> + arm_smmu_reset_for_suspend_rollback(smmu);
> +
> + return ret;
> +}
> +

Cheers,
Luca



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.