[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 07/10] xen/arm: ffa: Transmit RXTX buffers to the SPMC


  • To: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
  • From: Bertrand Marquis <Bertrand.Marquis@xxxxxxx>
  • Date: Thu, 26 Sep 2024 06:56:06 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=2; mx.microsoft.com 1; spf=pass (sender ip is 63.35.35.123) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=pass (signature was verified) header.d=arm.com; arc=pass (0 oda=1 ltdi=1 spf=[1,1,smtp.mailfrom=arm.com] dkim=[1,1,header.d=arm.com] dmarc=[1,1,header.from=arm.com])
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=2; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=k1MElKBbHBF+g1LiKhZEBedqQfkq9YlmT5RI75Ejo7k=; b=VKwR/v0l4Bvd9Zu/67gaxT5+XGklESlRsfzJLg9QjKz3L2TEBVU1ADfSaZsysVuFrENvlCPRlmHaA/GUnRIWhpgXB7riZtJFT85ewa8bp7KV1c0Md1Gu2DzaZf0IUxfUaKRW9EdlxUq9ZAN5DkFxxZZbrTVLPapkNThxAJZ25q0ukrCN/yg6bI/8xkHSC91NMkQN8L5sPNJbHi3PaBaRsnlQXPmR0s4gQDw6CfGl8hSncK/9OdSeZIPXVV02E0f5ESvJk2+DhW5J7qc2ffenhG7Gw4XIV2Az0bL4iQDuG4UBAaxF001olmmQ7f55+GwO98a5hyR6nKhleMkV2X5cBQ==
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=k1MElKBbHBF+g1LiKhZEBedqQfkq9YlmT5RI75Ejo7k=; b=UAhBQRcC4hmjPZO9Sod1xwrNDG2gxwsgzrb6z+MrgomHE+ldaxJTL/QTK8RZ6mX+5Zdm9/imXUf6PhmJidPogZFMBhJa5iwZuYRiZ2z2bsgoqiy8vzVKWgO/uA0PsjtoIrDeqX/13gISuDeT6IG7tMhWJbmiJCAC19fxm6/aroBKIU6ExmHHOp3E3/GNalAL8BTrEliYDCYOsoDLIpa7CRn3I3NDzCt73suJB1W/f3iMQ5jUXa/PwAfsNAfXXSzVtSsCQ/FvOvZb5hhhavUMTrF6ZZtFi9GpR0GBrm54kr6sdzYxGSgUX7JkeHG1zdh7bfbV41Cijy6yV/HuTDvdlA==
  • Arc-seal: i=2; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=pass; b=NCj1B/QhTCxXyFOvRE1F5DVLO8fv+zts/fGzqY6egT/YVXOKvHcHr6uZTBzY+GZuj3kQvfftrCqEd6xdzk73SPv4Ix4b0U6xnFpwPKRJnWu7INQ5wZcPxPk5sNrhzIKCCnBS0O8BMp64LWCvgw3m3HV/Qwb3bd7bafZHcAztfhmhjyPxsvx8Fl7M+YDNDC0hzKho/wYaTlXRkuNDSyO9z0vHyp1gB7/H9vDJVjW5fFn3Nt5DBrERHT8IEoydctUxJkGUh+70ZdiYhVlRAWHwYOQncOtTomzXlV/7S1gvpZs19IHDbpRmQ/YllHHP5u2L3eb8Iv6Kb5eCbl6dlJsX0w==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=mBgYfX2aahXOaMqfEKexT/720PqJYe0bXlYAZJLSbhk7RuSs6LZ3jwxWqr7PrFCMbXgIArmRTk5A0b4jYYjOXIWzZ6yJe5lESGgp/QOke08kagLGoQD9Lvd+xhADggi2BVYzS4vORISifBwL93reRVBNd8x0PntT8gdEvr4rAQvxqiNVkO5DSoJoWYF24Ogb5JHVcTHgwHtCKo/ziPK93d/fHGEs7Ae9iTJS6JAi6jt8XfW+OmFZHlE7k/7kKLHAv76COoSvbqGMAAh83Y1PPasPIdzEj9FeVSXsE6vPNyQZK5yuBmdT3AqdpJG56zHCepEaVdKcu71vvdoyiomEdQ==
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>
  • Delivery-date: Thu, 26 Sep 2024 06:56:45 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Original-authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Thread-index: AQHbCo5HO1zpybOQMUW0An8uX23nwLJlqTMAgAQD9QA=
  • Thread-topic: [PATCH 07/10] xen/arm: ffa: Transmit RXTX buffers to the SPMC

Hi Jens,

> On 23 Sep 2024, at 19:36, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
> 
> Hi,
> 
> On Thu, Sep 19, 2024 at 2:20 PM Bertrand Marquis
> <bertrand.marquis@xxxxxxx> wrote:
>> 
>> When an RXTX buffer is mapped by a VM transmit it to the SPMC when it
>> supports RX_ACQUIRE.
>> As a consequence of that, we must acquire the RX buffer of a VM from the
>> SPMC when we want to use it:
>> - create a generic acquire and release function to get the rx buffer of
>>  a VM which gets it from the SPMC when supported
>> - rename the rx_acquire to hyp_rx_acquire to remove confusion
>> - rework the rx_lock to only lock access to rx_is_free and only allow
>>  usage of the rx buffer to one who managed to acquire it, thus removing
>>  the trylock and returning busy if rx_is_free is false
>> 
>> As part of this change move some structure definition to ffa_private
>> from ffa_shm as those are need for the MAP call with the SPMC.
>> 
>> Signed-off-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>
>> ---
>> xen/arch/arm/tee/ffa.c          |   2 +-
>> xen/arch/arm/tee/ffa_partinfo.c |  28 +++----
>> xen/arch/arm/tee/ffa_private.h  |  22 +++++-
>> xen/arch/arm/tee/ffa_rxtx.c     | 126 ++++++++++++++++++++++++++++----
>> xen/arch/arm/tee/ffa_shm.c      |  15 ----
>> 5 files changed, 142 insertions(+), 51 deletions(-)
>> 
>> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
>> index 45f9c1db8a6e..4a769e20007b 100644
>> --- a/xen/arch/arm/tee/ffa.c
>> +++ b/xen/arch/arm/tee/ffa.c
>> @@ -344,7 +344,7 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>>         ffa_handle_partition_info_get(regs);
>>         return true;
>>     case FFA_RX_RELEASE:
>> -        e = ffa_handle_rx_release();
>> +        e = ffa_rx_release(d);
>>         break;
>>     case FFA_MSG_SEND_DIRECT_REQ_32:
>>     case FFA_MSG_SEND_DIRECT_REQ_64:
>> diff --git a/xen/arch/arm/tee/ffa_partinfo.c 
>> b/xen/arch/arm/tee/ffa_partinfo.c
>> index b391b1adf9f2..fde187dba4e5 100644
>> --- a/xen/arch/arm/tee/ffa_partinfo.c
>> +++ b/xen/arch/arm/tee/ffa_partinfo.c
>> @@ -121,11 +121,9 @@ void ffa_handle_partition_info_get(struct cpu_user_regs 
>> *regs)
>>         goto out;
>>     }
>> 
>> -    if ( !spin_trylock(&ctx->rx_lock) )
>> -    {
>> -        ret = FFA_RET_BUSY;
>> +    ret = ffa_rx_acquire(d);
>> +    if ( ret != FFA_RET_OK )
>>         goto out;
>> -    }
>> 
>>     dst_buf = ctx->rx;
>> 
>> @@ -135,22 +133,16 @@ void ffa_handle_partition_info_get(struct 
>> cpu_user_regs *regs)
>>         goto out_rx_release;
>>     }
>> 
>> -    if ( !ctx->page_count || !ctx->rx_is_free )
>> -    {
>> -        ret = FFA_RET_DENIED;
>> -        goto out_rx_release;
>> -    }
>> -
>>     spin_lock(&ffa_rx_buffer_lock);
>> 
>>     ret = ffa_partition_info_get(uuid, 0, &ffa_sp_count, &src_size);
>> 
>>     if ( ret )
>> -        goto out_rx_buf_unlock;
>> +        goto out_rx_hyp_unlock;
>> 
>>     /*
>>      * ffa_partition_info_get() succeeded so we now own the RX buffer we
>> -     * share with the SPMC. We must give it back using ffa_rx_release()
>> +     * share with the SPMC. We must give it back using ffa_hyp_rx_release()
>>      * once we've copied the content.
>>      */
>> 
>> @@ -193,15 +185,13 @@ void ffa_handle_partition_info_get(struct 
>> cpu_user_regs *regs)
>>         }
>>     }
>> 
>> -    ctx->rx_is_free = false;
>> -
>> out_rx_hyp_release:
>> -    ffa_rx_release();
>> -out_rx_buf_unlock:
>> +    ffa_hyp_rx_release();
>> +out_rx_hyp_unlock:
>>     spin_unlock(&ffa_rx_buffer_lock);
>> out_rx_release:
>> -    spin_unlock(&ctx->rx_lock);
>> -
>> +    if ( ret != FFA_RET_OK )
>> +        ffa_rx_release(d);
>> out:
>>     if ( ret )
>>         ffa_set_regs_error(regs, ret);
>> @@ -368,7 +358,7 @@ bool ffa_partinfo_init(void)
>>     ret = init_subscribers(count, fpi_size);
>> 
>> out:
>> -    ffa_rx_release();
>> +    ffa_hyp_rx_release();
>>     return ret;
>> }
>> 
>> diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h
>> index c6903e335489..84b0f866a71e 100644
>> --- a/xen/arch/arm/tee/ffa_private.h
>> +++ b/xen/arch/arm/tee/ffa_private.h
>> @@ -263,6 +263,21 @@
>>                                     FFA_FUNC_ID(FFA_FUNC_MIN)) << 1 | \
>>                                     FFA_FUNC_CONV(id))
>> 
>> +/* Constituent memory region descriptor */
>> +struct ffa_address_range {
>> +    uint64_t address;
>> +    uint32_t page_count;
>> +    uint32_t reserved;
>> +};
>> +
>> +/* Composite memory region descriptor */
>> +struct ffa_mem_region {
>> +    uint32_t total_page_count;
>> +    uint32_t address_range_count;
>> +    uint64_t reserved;
>> +    struct ffa_address_range address_range_array[];
>> +};
>> +
>> struct ffa_ctx_notif {
>>     bool enabled;
>> 
>> @@ -290,7 +305,7 @@ struct ffa_ctx {
>>     struct ffa_ctx_notif notif;
>>     /*
>>      * tx_lock is used to serialize access to tx
>> -     * rx_lock is used to serialize access to rx
>> +     * rx_lock is used to serialize access to rx_is_free
>>      * lock is used for the rest in this struct
>>      */
>>     spinlock_t tx_lock;
>> @@ -329,7 +344,8 @@ void ffa_rxtx_domain_destroy(struct domain *d);
>> uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t tx_addr,
>>                             register_t rx_addr, uint32_t page_count);
>> uint32_t ffa_handle_rxtx_unmap(void);
>> -int32_t ffa_handle_rx_release(void);
>> +int32_t ffa_rx_acquire(struct domain *d);
>> +int32_t ffa_rx_release(struct domain *d);
>> 
>> void ffa_notif_init(void);
>> void ffa_notif_init_interrupt(void);
>> @@ -418,7 +434,7 @@ static inline int32_t ffa_simple_call(uint32_t fid, 
>> register_t a1,
>>     return ffa_get_ret_code(&resp);
>> }
>> 
>> -static inline int32_t ffa_rx_release(void)
>> +static inline int32_t ffa_hyp_rx_release(void)
>> {
>>     return ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
>> }
>> diff --git a/xen/arch/arm/tee/ffa_rxtx.c b/xen/arch/arm/tee/ffa_rxtx.c
>> index cb54c76911fd..dacf33cc9efc 100644
>> --- a/xen/arch/arm/tee/ffa_rxtx.c
>> +++ b/xen/arch/arm/tee/ffa_rxtx.c
>> @@ -30,6 +30,17 @@ struct ffa_endpoint_rxtx_descriptor_1_1 {
>>     uint32_t tx_region_offs;
>> };
>> 
>> +static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t rx_addr,
>> +                            uint32_t page_count)
>> +{
>> +    return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 
>> 0);
>> +}
>> +
>> +static int32_t ffa_rxtx_unmap(void)
>> +{
>> +    return ffa_simple_call(FFA_RXTX_UNMAP, 0, 0, 0, 0);
>> +}
>> +
>> uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t tx_addr,
>>                             register_t rx_addr, uint32_t page_count)
>> {
>> @@ -42,6 +53,9 @@ uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t 
>> tx_addr,
>>     void *rx;
>>     void *tx;
>> 
>> +    /* The code is considering that we only get one page for now */
>> +    BUILD_BUG_ON(FFA_MAX_RXTX_PAGE_COUNT != 1);
>> +
>>     if ( !smccc_is_conv_64(fid) )
>>     {
>>         /*
>> @@ -87,6 +101,65 @@ uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t 
>> tx_addr,
>>     if ( !rx )
>>         goto err_unmap_tx;
>> 
>> +    /*
>> +     * Transmit the RX/TX buffer information to the SPM if acquire is 
>> supported
>> +     * as the spec says that if not there is not need to acquire/release/map
>> +     * rxtx buffers from the SPMC
>> +     */
>> +    if ( ffa_fw_supports_fid(FFA_RX_ACQUIRE) )
>> +    {
>> +        struct ffa_endpoint_rxtx_descriptor_1_1 *rxtx_desc;
>> +        struct ffa_mem_region *mem_reg;
>> +
>> +        /* All must fit in our TX buffer */
>> +        BUILD_BUG_ON((sizeof(*rxtx_desc) + sizeof(*mem_reg)*2 +
>> +                      sizeof(struct ffa_address_range)*2) > FFA_PAGE_SIZE);
>> +
>> +        spin_lock(&ffa_tx_buffer_lock);
>> +        rxtx_desc = ffa_tx;
>> +
>> +        /*
>> +         * We have only one page for each so we pack everything:
>> +         * - rx region descriptor
>> +         * - rx region range
>> +         * - tx region descriptor
>> +         * - tx region range
>> +         */
>> +        rxtx_desc->sender_id = ffa_get_vm_id(d);
>> +        rxtx_desc->reserved = 0;
>> +        rxtx_desc->rx_region_offs = sizeof(*rxtx_desc);
>> +        rxtx_desc->tx_region_offs = sizeof(*rxtx_desc) +
>> +                                    offsetof(struct ffa_mem_region,
>> +                                             address_range_array[1]);
>> +
>> +        /* rx buffer */
>> +        mem_reg = ffa_tx + sizeof(*rxtx_desc);
>> +        mem_reg->total_page_count = 1;
>> +        mem_reg->address_range_count = 1;
>> +        mem_reg->reserved = 0;
>> +
>> +        mem_reg->address_range_array[0].address = page_to_maddr(rx_pg);
>> +        mem_reg->address_range_array[0].page_count = 1;
>> +        mem_reg->address_range_array[0].reserved = 0;
>> +
>> +        /* tx buffer */
>> +        mem_reg = ffa_tx + rxtx_desc->tx_region_offs;
>> +        mem_reg->total_page_count = 1;
>> +        mem_reg->address_range_count = 1;
>> +        mem_reg->reserved = 0;
>> +
>> +        mem_reg->address_range_array[0].address = page_to_maddr(tx_pg);
>> +        mem_reg->address_range_array[0].page_count = 1;
>> +        mem_reg->address_range_array[0].reserved = 0;
>> +
>> +        ret = ffa_rxtx_map(0, 0, 1);
> 
> I can't find the corresponding ffa_rxtx_unmap() of a VM rxtx buffer.

Right, i will fix that in the next version.

Cheers
Bertrand

> 
> Cheers,
> Jens
> 
>> +
>> +        spin_unlock(&ffa_tx_buffer_lock);
>> +
>> +        if ( ret != FFA_RET_OK )
>> +            goto err_unmap_tx;
>> +    }
>> +
>>     ctx->rx = rx;
>>     ctx->tx = tx;
>>     ctx->rx_pg = rx_pg;
>> @@ -132,34 +205,61 @@ uint32_t ffa_handle_rxtx_unmap(void)
>>     return FFA_RET_OK;
>> }
>> 
>> -int32_t ffa_handle_rx_release(void)
>> +int32_t ffa_rx_acquire(struct domain *d)
>> {
>>     int32_t ret = FFA_RET_DENIED;
>> -    struct domain *d = current->domain;
>>     struct ffa_ctx *ctx = d->arch.tee;
>> 
>> -    if ( !spin_trylock(&ctx->rx_lock) )
>> -        return FFA_RET_BUSY;
>> +    spin_lock(&ctx->rx_lock);
>> 
>> -    if ( !ctx->page_count || ctx->rx_is_free )
>> +    if ( !ctx->page_count )
>> +    {
>> +        ret = FFA_RET_DENIED;
>> +        goto out;
>> +    }
>> +
>> +    if ( !ctx->rx_is_free )
>> +    {
>> +        ret = FFA_RET_BUSY;
>>         goto out;
>> +    }
>> +
>> +    if ( ffa_fw_supports_fid(FFA_RX_ACQUIRE) )
>> +    {
>> +        ret = ffa_simple_call(FFA_RX_ACQUIRE, ffa_get_vm_id(d), 0, 0, 0);
>> +        if ( ret != FFA_RET_OK )
>> +            goto out;
>> +    }
>>     ret = FFA_RET_OK;
>> -    ctx->rx_is_free = true;
>> +    ctx->rx_is_free = false;
>> out:
>>     spin_unlock(&ctx->rx_lock);
>> 
>>     return ret;
>> }
>> 
>> -static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t rx_addr,
>> -                            uint32_t page_count)
>> +int32_t ffa_rx_release(struct domain *d)
>> {
>> -    return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 
>> 0);
>> -}
>> +    int32_t ret = FFA_RET_DENIED;
>> +    struct ffa_ctx *ctx = d->arch.tee;
>> 
>> -static int32_t ffa_rxtx_unmap(void)
>> -{
>> -    return ffa_simple_call(FFA_RXTX_UNMAP, 0, 0, 0, 0);
>> +    spin_lock(&ctx->rx_lock);
>> +
>> +    if ( !ctx->page_count || ctx->rx_is_free )
>> +        goto out;
>> +
>> +    if ( ffa_fw_supports_fid(FFA_RX_ACQUIRE) )
>> +    {
>> +        ret = ffa_simple_call(FFA_RX_RELEASE, ffa_get_vm_id(d), 0, 0, 0);
>> +        if ( ret != FFA_RET_OK )
>> +            goto out;
>> +    }
>> +    ret = FFA_RET_OK;
>> +    ctx->rx_is_free = true;
>> +out:
>> +    spin_unlock(&ctx->rx_lock);
>> +
>> +    return ret;
>> }
>> 
>> void ffa_rxtx_domain_destroy(struct domain *d)
>> diff --git a/xen/arch/arm/tee/ffa_shm.c b/xen/arch/arm/tee/ffa_shm.c
>> index 29675f9ba3f7..d628c1b70609 100644
>> --- a/xen/arch/arm/tee/ffa_shm.c
>> +++ b/xen/arch/arm/tee/ffa_shm.c
>> @@ -16,21 +16,6 @@
>> 
>> #include "ffa_private.h"
>> 
>> -/* Constituent memory region descriptor */
>> -struct ffa_address_range {
>> -    uint64_t address;
>> -    uint32_t page_count;
>> -    uint32_t reserved;
>> -};
>> -
>> -/* Composite memory region descriptor */
>> -struct ffa_mem_region {
>> -    uint32_t total_page_count;
>> -    uint32_t address_range_count;
>> -    uint64_t reserved;
>> -    struct ffa_address_range address_range_array[];
>> -};
>> -
>> /* Memory access permissions descriptor */
>> struct ffa_mem_access_perm {
>>     uint16_t endpoint_id;
>> --
>> 2.39.5 (Apple Git-154)
>> 
>> 


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.