[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 4/5] xen/arm: ffa: Add indirect message between VM



Hi Bertrand,

On Mon, Mar 24, 2025 at 2:53 PM Bertrand Marquis
<bertrand.marquis@xxxxxxx> wrote:
>
> Add support for indirect messages between VMs.
> This is only enabled if CONFIG_FFA_VM_TO_VM is selected.
>
> Signed-off-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>
> ---
> Changes in v4:
> - Use a local copy of the message header to prevent a TOC/TOU possible
>   issue when using the payload size
> Changes in v3:
> - Move vm to vm indirect message handling in a sub function to simplify
>   lock handling and make implementation easier to read
> Changes in v2:
> - Switch ifdef to IS_ENABLED
> ---
>  xen/arch/arm/tee/ffa_msg.c | 110 ++++++++++++++++++++++++++++++++-----
>  1 file changed, 96 insertions(+), 14 deletions(-)

Reviewed-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>

Cheers,
Jens

>
> diff --git a/xen/arch/arm/tee/ffa_msg.c b/xen/arch/arm/tee/ffa_msg.c
> index ee594e737fc7..7c36e3aac47c 100644
> --- a/xen/arch/arm/tee/ffa_msg.c
> +++ b/xen/arch/arm/tee/ffa_msg.c
> @@ -88,43 +88,125 @@ out:
>                   resp.a7 & mask);
>  }
>
> +static int32_t ffa_msg_send2_vm(uint16_t dst_id,
> +                                struct ffa_part_msg_rxtx *src_msg)
> +{
> +    struct domain *dst_d;
> +    struct ffa_ctx *dst_ctx;
> +    struct ffa_part_msg_rxtx *dst_msg;
> +    int err;
> +    int32_t ret;
> +
> +    if ( dst_id == 0 )
> +        /* FF-A ID 0 is the hypervisor, this is not valid */
> +        return FFA_RET_INVALID_PARAMETERS;
> +
> +    /* This is also checking that dest is not src */
> +    err = rcu_lock_live_remote_domain_by_id(dst_id - 1, &dst_d);
> +    if ( err )
> +        return FFA_RET_INVALID_PARAMETERS;
> +
> +    if ( dst_d->arch.tee == NULL )
> +    {
> +        ret = FFA_RET_INVALID_PARAMETERS;
> +        goto out_unlock;
> +    }
> +
> +    dst_ctx = dst_d->arch.tee;
> +    if ( !dst_ctx->guest_vers )
> +    {
> +        ret = FFA_RET_INVALID_PARAMETERS;
> +        goto out_unlock;
> +    }
> +
> +    /* we need to have enough space in the destination buffer */
> +    if ( dst_ctx->page_count * FFA_PAGE_SIZE <
> +            (sizeof(struct ffa_part_msg_rxtx) + src_msg->msg_size) )
> +    {
> +        ret = FFA_RET_NO_MEMORY;
> +        goto out_unlock;
> +    }
> +
> +    /* This also checks that destination has set a Rx buffer */
> +    ret = ffa_rx_acquire(dst_d);
> +    if ( ret )
> +        goto out_unlock;
> +
> +    dst_msg = dst_ctx->rx;
> +
> +    /* prepare destination header */
> +    dst_msg->flags = 0;
> +    dst_msg->reserved = 0;
> +    dst_msg->msg_offset = sizeof(struct ffa_part_msg_rxtx);
> +    dst_msg->send_recv_id = src_msg->send_recv_id;
> +    dst_msg->msg_size = src_msg->msg_size;
> +
> +    memcpy(dst_ctx->rx + sizeof(struct ffa_part_msg_rxtx),
> +           ((void *)src_msg) + src_msg->msg_offset, src_msg->msg_size);
> +
> +    /* receiver rx buffer will be released by the receiver*/
> +
> +out_unlock:
> +    rcu_unlock_domain(dst_d);
> +    if ( !ret )
> +        ffa_raise_rx_buffer_full(dst_d);
> +
> +    return ret;
> +}
> +
>  int32_t ffa_handle_msg_send2(struct cpu_user_regs *regs)
>  {
>      struct domain *src_d = current->domain;
>      struct ffa_ctx *src_ctx = src_d->arch.tee;
> -    const struct ffa_part_msg_rxtx *src_msg;
> +    struct ffa_part_msg_rxtx src_msg;
>      uint16_t dst_id, src_id;
>      int32_t ret;
>
> -    if ( !ffa_fw_supports_fid(FFA_MSG_SEND2) )
> -        return FFA_RET_NOT_SUPPORTED;
> -
>      if ( !spin_trylock(&src_ctx->tx_lock) )
>          return FFA_RET_BUSY;
>
> -    src_msg = src_ctx->tx;
> -    src_id = src_msg->send_recv_id >> 16;
> -    dst_id = src_msg->send_recv_id & GENMASK(15,0);
> +    /* create a copy of the message header */
> +    memcpy(&src_msg, src_ctx->tx, sizeof(src_msg));
> +
> +    src_id = src_msg.send_recv_id >> 16;
> +    dst_id = src_msg.send_recv_id & GENMASK(15,0);
>
> -    if ( src_id != ffa_get_vm_id(src_d) || !FFA_ID_IS_SECURE(dst_id) )
> +    if ( src_id != ffa_get_vm_id(src_d) )
>      {
>          ret = FFA_RET_INVALID_PARAMETERS;
> -        goto out_unlock_tx;
> +        goto out;
>      }
>
>      /* check source message fits in buffer */
>      if ( src_ctx->page_count * FFA_PAGE_SIZE <
> -         src_msg->msg_offset + src_msg->msg_size ||
> -         src_msg->msg_offset < sizeof(struct ffa_part_msg_rxtx) )
> +         src_msg.msg_offset + src_msg.msg_size ||
> +         src_msg.msg_offset < sizeof(struct ffa_part_msg_rxtx) )
>      {
>          ret = FFA_RET_INVALID_PARAMETERS;
> -        goto out_unlock_tx;
> +        goto out;
>      }
>
> -    ret = ffa_simple_call(FFA_MSG_SEND2,
> +    if ( FFA_ID_IS_SECURE(dst_id) )
> +    {
> +        /* Message for a secure partition */
> +        if ( !ffa_fw_supports_fid(FFA_MSG_SEND2) )
> +        {
> +            ret = FFA_RET_NOT_SUPPORTED;
> +            goto out;
> +        }
> +
> +        ret = ffa_simple_call(FFA_MSG_SEND2,
>                            ((uint32_t)ffa_get_vm_id(src_d)) << 16, 0, 0, 0);
> +    }
> +    else if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
> +    {
> +        /* Message for a VM */
> +        ret = ffa_msg_send2_vm(dst_id, &src_msg);
> +    }
> +    else
> +        ret = FFA_RET_INVALID_PARAMETERS;
>
> -out_unlock_tx:
> +out:
>      spin_unlock(&src_ctx->tx_lock);
>      return ret;
>  }
> --
> 2.47.1
>



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.