|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 6/6] xen/arm: ffa: Deliver VM-to-VM notifications locally
Hi Bertrand,
On Wed, Apr 29, 2026 at 7:44 AM Bertrand Marquis
<bertrand.marquis@xxxxxxx> wrote:
>
> VM notification binding and pending tracking exist for non-secure
> endpoints, but FFA_NOTIFICATION_SET still only forwards secure
> destinations to the SPMC. Non-secure VMs therefore cannot receive
> notifications from other VMs. Local NPI delivery also needs explicit
> re-arm tracking so repeated raises are not lost while the interrupt is
> already pending.
>
> Add a local VM notification delivery path for non-secure destinations.
> notification_set_vm() resolves the destination endpoint, verifies that
> every requested bit is bound to the sender, sets the receiver's
> vm_pending bitmap under notif_lock, and raises an NPI only when local
> pending state is not already armed.
>
> Track whether a local NPI is already armed with notif_irq_raised,
> clear that state once both VM and hypervisor pending bitmaps are
> drained, and keep notif_lock held across the VM notification injection
> attempt. If no destination vCPU is online, leave the pending bits set
> and keep notif_irq_raised clear so delivery can be retried later.
> Also expose firmware notification availability so FFA_FEATURES only
> advertises notification support when it is actually provided by the
> firmware or by CONFIG_FFA_VM_TO_VM.
>
> Functional impact: when CONFIG_FFA_VM_TO_VM is enabled, non-secure
> FFA_NOTIFICATION_SET delivers VM-to-VM notifications locally and keeps
> NPI delivery reliable across repeated raises.
>
> Signed-off-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>
> ---
> Changes since v1:
> - serialize notification_set_vm() state updates with the NPI attempt
> - keep pending VM notifications set when local injection fails
> ---
> xen/arch/arm/tee/ffa.c | 24 ++++++++--
> xen/arch/arm/tee/ffa_notif.c | 82 ++++++++++++++++++++++++++++++++--
> xen/arch/arm/tee/ffa_private.h | 17 ++++---
> 3 files changed, 107 insertions(+), 16 deletions(-)
>
> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
> index 1fe33f26454a..7fe021049cba 100644
> --- a/xen/arch/arm/tee/ffa.c
> +++ b/xen/arch/arm/tee/ffa.c
> @@ -39,8 +39,13 @@
> * o FFA_MSG_SEND_DIRECT_REQ:
> * - only supported from a VM to an SP
> * o FFA_NOTIFICATION_*:
> + * - only supported when firmware notifications are enabled or VM-to-VM
> + * support is built in
> * - only supports global notifications, that is, per vCPU notifications
> - * are not supported
> + * are not supported and secure per-vCPU notification information is
> + * not forwarded
> + * - the source endpoint ID reported for a notification may no longer
> + * exist by the time the receiver consumes it
> * - doesn't support signalling the secondary scheduler of pending
> * notification for secure partitions
> * - doesn't support notifications for Xen itself
> @@ -245,6 +250,8 @@ static void handle_features(struct cpu_user_regs *regs)
> uint32_t a1 = get_user_reg(regs, 1);
> struct domain *d = current->domain;
> struct ffa_ctx *ctx = d->arch.tee;
> + bool notif_supported = IS_ENABLED(CONFIG_FFA_VM_TO_VM) ||
> + ffa_notif_fw_enabled();
>
> /*
> * FFA_FEATURES defines w2 as input properties only for specific
> @@ -343,10 +350,16 @@ static void handle_features(struct cpu_user_regs *regs)
>
> break;
> case FFA_FEATURE_NOTIF_PEND_INTR:
> - ffa_set_regs_success(regs, GUEST_FFA_NOTIF_PEND_INTR_ID, 0);
> + if ( notif_supported )
> + ffa_set_regs_success(regs, GUEST_FFA_NOTIF_PEND_INTR_ID, 0);
> + else
> + ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
> break;
> case FFA_FEATURE_SCHEDULE_RECV_INTR:
> - ffa_set_regs_success(regs, GUEST_FFA_SCHEDULE_RECV_INTR_ID, 0);
> + if ( notif_supported )
> + ffa_set_regs_success(regs, GUEST_FFA_SCHEDULE_RECV_INTR_ID, 0);
> + else
> + ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
> break;
> case FFA_PARTITION_INFO_GET_REGS:
> if ( ACCESS_ONCE(ctx->guest_vers) >= FFA_VERSION_1_2 )
> @@ -361,7 +374,10 @@ static void handle_features(struct cpu_user_regs *regs)
> case FFA_NOTIFICATION_SET:
> case FFA_NOTIFICATION_INFO_GET_32:
> case FFA_NOTIFICATION_INFO_GET_64:
> - ffa_set_regs_success(regs, 0, 0);
> + if ( notif_supported )
> + ffa_set_regs_success(regs, 0, 0);
> + else
> + ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
> break;
> default:
> ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
> diff --git a/xen/arch/arm/tee/ffa_notif.c b/xen/arch/arm/tee/ffa_notif.c
> index a841c8f8d747..b29d948a7110 100644
> --- a/xen/arch/arm/tee/ffa_notif.c
> +++ b/xen/arch/arm/tee/ffa_notif.c
> @@ -21,6 +21,11 @@ static bool __ro_after_init fw_notif_enabled;
> static unsigned int __ro_after_init notif_sri_irq;
> static DEFINE_SPINLOCK(notif_info_lock);
>
> +bool ffa_notif_fw_enabled(void)
> +{
> + return fw_notif_enabled;
> +}
> +
> static bool inject_notif_pending(struct domain *d)
> {
> struct vcpu *v;
> @@ -107,6 +112,55 @@ out_unlock:
> return ret;
> }
>
> +/*
> + * Deliver a VM-to-VM notification. ctx->notif.notif_lock protects
> + * vm_bind/vm_pending so callers must not hold it already.
> + */
> +static int32_t notification_set_vm(uint16_t dst_id, uint16_t src_id,
> + uint32_t flags, uint64_t bitmap)
> +{
> + struct domain *dst_d;
> + struct ffa_ctx *dst_ctx;
> + unsigned int id;
> + int32_t ret;
> +
> + if ( flags )
> + return FFA_RET_INVALID_PARAMETERS;
> +
> + ret = ffa_endpoint_domain_lookup(dst_id, &dst_d, &dst_ctx);
> + if ( ret )
> + return ret;
> +
> + ret = FFA_RET_OK;
> +
> + spin_lock(&dst_ctx->notif.notif_lock);
> +
> + for ( id = 0; id < FFA_NUM_VM_NOTIF; id++ )
> + {
> + if ( !(bitmap & BIT(id, ULL)) )
> + continue;
> +
> + if ( dst_ctx->notif.vm_bind[id] != src_id )
> + {
> + ret = FFA_RET_DENIED;
> + goto out_unlock;
> + }
> + }
> +
> + dst_ctx->notif.vm_pending |= bitmap;
> + if ( !dst_ctx->notif.notif_irq_raised &&
> + (dst_ctx->notif.vm_pending || dst_ctx->notif.hyp_pending) &&
> + inject_notif_pending(dst_d) )
> + dst_ctx->notif.notif_irq_raised = true;
> +
> +out_unlock:
> + spin_unlock(&dst_ctx->notif.notif_lock);
> +
> + rcu_unlock_domain(dst_d);
> +
> + return ret;
> +}
> +
> int32_t ffa_handle_notification_bind(struct cpu_user_regs *regs)
> {
> struct domain *d = current->domain;
> @@ -288,6 +342,8 @@ void ffa_handle_notification_get(struct cpu_user_regs
> *regs)
>
> if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
> {
> + bool pending;
> +
> spin_lock(&ctx->notif.notif_lock);
>
> if ( (flags & FFA_NOTIF_FLAG_BITMAP_HYP) && ctx->notif.hyp_pending )
> @@ -298,6 +354,18 @@ void ffa_handle_notification_get(struct cpu_user_regs
> *regs)
> ctx->notif.notif_irq_raised = false;
> }
>
> + if ( (flags & FFA_NOTIF_FLAG_BITMAP_VM) && ctx->notif.vm_pending )
> + {
> + w4 = (uint32_t)(ctx->notif.vm_pending & GENMASK(31, 0));
> + w5 = (uint32_t)((ctx->notif.vm_pending >> 32) & GENMASK(31, 0));
> + ctx->notif.vm_pending = 0;
> + }
> +
> + pending = (ctx->notif.hyp_pending != 0) ||
> + (ctx->notif.vm_pending != 0);
> + if ( !pending )
> + ctx->notif.notif_irq_raised = false;
This seems to take care of clearing notif_irq_raised for all cases. Do
we still need the one just above this block (copied here):
if ( !ctx->notif.vm_pending )
ctx->notif.notif_irq_raised = false;
?
Cheers,
Jens
> +
> spin_unlock(&ctx->notif.notif_lock);
> }
>
> @@ -323,9 +391,17 @@ int32_t ffa_handle_notification_set(struct cpu_user_regs
> *regs)
> if ( flags )
> return FFA_RET_INVALID_PARAMETERS;
>
> - if ( FFA_ID_IS_SECURE(dest_id) && fw_notif_enabled )
> - return ffa_simple_call(FFA_NOTIFICATION_SET, src_dst, flags,
> bitmap_lo,
> - bitmap_hi);
> + if ( FFA_ID_IS_SECURE(dest_id) )
> + {
> + if ( fw_notif_enabled )
> + return ffa_simple_call(FFA_NOTIFICATION_SET, src_dst, flags,
> + bitmap_lo, bitmap_hi);
> + }
> + else if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
> + {
> + return notification_set_vm(dest_id, caller_id, flags,
> + ((uint64_t)bitmap_hi << 32) | bitmap_lo);
> + }
>
> return FFA_RET_NOT_SUPPORTED;
> }
> diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h
> index 78a0a9815d56..923a071a9d7c 100644
> --- a/xen/arch/arm/tee/ffa_private.h
> +++ b/xen/arch/arm/tee/ffa_private.h
> @@ -340,20 +340,18 @@ struct ffa_ctx_notif {
> uint64_t vm_pending;
>
> /*
> - * Source endpoint bound to each VM notification ID (0 means unbound).
> + * Tracks whether an NPI has been raised for local pending notifications.
> + * Protected by notif_lock.
> */
> - uint16_t vm_bind[FFA_NUM_VM_NOTIF];
> + bool notif_irq_raised;
>
> /*
> - * Lock protecting the hypervisor-managed notification state.
> + * Source endpoint bound to each VM notification ID (0 means unbound).
> */
> - spinlock_t notif_lock;
> + uint16_t vm_bind[FFA_NUM_VM_NOTIF];
>
> - /*
> - * Tracks whether a local notification pending interrupt was raised.
> - * Protected by notif_lock.
> - */
> - bool notif_irq_raised;
> + /* Lock protecting local notification state. */
> + spinlock_t notif_lock;
>
> /*
> * Bitmap of pending hypervisor notifications (for HYP bitmap queries).
> @@ -495,6 +493,7 @@ void ffa_notif_init(void);
> void ffa_notif_init_interrupt(void);
> int ffa_notif_domain_init(struct domain *d);
> void ffa_notif_domain_destroy(struct domain *d);
> +bool ffa_notif_fw_enabled(void);
>
> int32_t ffa_handle_notification_bind(struct cpu_user_regs *regs);
> int32_t ffa_handle_notification_unbind(struct cpu_user_regs *regs);
> --
> 2.53.0
>
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |