[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XEN PATCH v7 14/20] xen/arm: ffa: support guest FFA_PARTITION_INFO_GET


  • To: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
  • From: Bertrand Marquis <Bertrand.Marquis@xxxxxxx>
  • Date: Fri, 3 Mar 2023 13:50:05 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=aYvKSxvhi7aMCzsPEHn1XS+Ui2Bul6MgPShTw7UBvTI=; b=BlewlMJ6WfggePwi4B5dWAbXw08T+zfts1ouRMJlWioV2gGqP0lzp0uthTHBRIyLLe69w9hECTfBZCDDwI9sIJE7msiHSPkOhmzCAv4LSBy0StPIRdhFNipASHZeBCW/ktGc1aDYT/YYISsCl89aA07HmSwgTcAfWIHIH4pGIVKaf78qUS7RSlyr4Eg73QljDtpeiP1pioyc045R8oY6ib4S4nXCoW/kZcTLTT91jxXZthODstRf/WFssMMbpJNh77387HdEV/PlCPptD56rjIdU7SECIveScHLOb/uThWj+t9zbOgWcW7D7fk8bbzsYrYltertzEP0zIsyjnheeTA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=G4tEq9YUNiohG7XUeu+wc1fh0kal025rSDHUc7lYAs9ZEyVgM3Yb93Xpn+2CHY9cbe8TFwrfCFlvFhvO6iB8w9wgF9x+HYX4dKtZRq0pByGYBJHXL0GTutdfbcd+JyRhO3sxQ2UMZ4HkHBAD7/gPBk7jKLqg65RoRGPHwneHx0F4I/VMUVCxXaDKkTl/IynDCmgDTxWLNAfUkbgG5jfvCQEQXS1JkJWPQZa8bOn52TBnPLiZqoas46re4n2q68OlaDeYtQ5HVoysOPrruUludrbfkS8MBlTuoVq9T9ZfpwelwgasF5pO/drv3WlLAH47ek1CTceMmEdG6MMVixTPCQ==
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Marc Bonnici <Marc.Bonnici@xxxxxxx>, Achin Gupta <Achin.Gupta@xxxxxxx>, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>
  • Delivery-date: Fri, 03 Mar 2023 13:50:45 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Original-authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Thread-index: AQHZRtMdhtaowAhPaUGSsxv5T0sWbq7o3ZeAgAA5x4CAAAkNgA==
  • Thread-topic: [XEN PATCH v7 14/20] xen/arm: ffa: support guest FFA_PARTITION_INFO_GET


> On 3 Mar 2023, at 14:17, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
> 
> Hi Bertrand,
> 
> On Fri, Mar 3, 2023 at 10:51 AM Bertrand Marquis
> <Bertrand.Marquis@xxxxxxx> wrote:
>> 
>> Hi Jens,
>> 
>>> On 22 Feb 2023, at 16:33, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
>>> 
>>> Adds support in the mediator to handle FFA_PARTITION_INFO_GET requests
>>> from a guest. The requests are forwarded to the SPMC and the response is
>>> translated according to the FF-A version in use by the guest.
>>> 
>>> Using FFA_PARTITION_INFO_GET changes the owner of the RX buffer to the
>>> caller (the guest in this case), so once it is done with the buffer it
>>> must be released using FFA_RX_RELEASE before another call can be made.
>>> 
>>> Signed-off-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
>>> ---
>>> xen/arch/arm/tee/ffa.c | 126 ++++++++++++++++++++++++++++++++++++++++-
>>> 1 file changed, 124 insertions(+), 2 deletions(-)
>>> 
>>> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
>>> index 953b6dfd5eca..3571817c0bcd 100644
>>> --- a/xen/arch/arm/tee/ffa.c
>>> +++ b/xen/arch/arm/tee/ffa.c
>>> @@ -141,6 +141,12 @@
>>> #define FFA_MSG_POLL                    0x8400006AU
>>> 
>>> /* Partition information descriptor */
>>> +struct ffa_partition_info_1_0 {
>>> +    uint16_t id;
>>> +    uint16_t execution_context;
>>> +    uint32_t partition_properties;
>>> +};
>>> +
>>> struct ffa_partition_info_1_1 {
>>>    uint16_t id;
>>>    uint16_t execution_context;
>>> @@ -157,9 +163,8 @@ struct ffa_ctx {
>>>    uint32_t guest_vers;
>>>    bool tx_is_mine;
>>>    bool interrupted;
>>> +    spinlock_t lock;
>>> };
>>> -
>>> -
>> 
>> This is removing 2 empty lines (previous patch was wrongly adding one)
>> but one empty line is required here.
> 
> I'll fix it.
> 
>> 
>>> /* Negotiated FF-A version to use with the SPMC */
>>> static uint32_t ffa_version __ro_after_init;
>>> 
>>> @@ -173,10 +178,16 @@ static unsigned int subscr_vm_destroyed_count 
>>> __read_mostly;
>>> * Our rx/tx buffers shared with the SPMC.
>>> *
>>> * ffa_page_count is the number of pages used in each of these buffers.
>>> + *
>>> + * The RX buffer is protected from concurrent usage with 
>>> ffa_rx_buffer_lock.
>>> + * Note that the SPMC is also tracking the ownership of our RX buffer so
>>> + * for calls which uses our RX buffer to deliver a result we must call
>>> + * ffa_rx_release() to let the SPMC know that we're done with the buffer.
>>> */
>>> static void *ffa_rx __read_mostly;
>>> static void *ffa_tx __read_mostly;
>>> static unsigned int ffa_page_count __read_mostly;
>>> +static DEFINE_SPINLOCK(ffa_rx_buffer_lock);
>>> 
>>> static bool ffa_get_version(uint32_t *vers)
>>> {
>>> @@ -463,6 +474,98 @@ static uint32_t handle_rxtx_unmap(void)
>>>    return FFA_RET_OK;
>>> }
>>> 
>>> +static uint32_t handle_partition_info_get(uint32_t w1, uint32_t w2, 
>>> uint32_t w3,
>>> +                                          uint32_t w4, uint32_t w5,
>>> +                                          uint32_t *count)
>>> +{
>>> +    bool query_count_only = w5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
>>> +    uint32_t w5_mask = 0;
>>> +    uint32_t ret = FFA_RET_DENIED;
>>> +    struct domain *d = current->domain;
>>> +    struct ffa_ctx *ctx = d->arch.tee;
>>> +
>>> +    /*
>>> +     * FF-A v1.0 has w5 MBZ while v1.1 allows
>>> +     * FFA_PARTITION_INFO_GET_COUNT_FLAG to be non-zero.
>>> +     */
>>> +    if ( ctx->guest_vers == FFA_VERSION_1_1 )
>>> +        w5_mask = FFA_PARTITION_INFO_GET_COUNT_FLAG;
>>> +    if ( w5 & ~w5_mask )
>>> +        return FFA_RET_INVALID_PARAMETERS;
>>> +
>>> +    if ( query_count_only )
>>> +        return ffa_partition_info_get(w1, w2, w3, w4, w5, count);
>> 
>> This code seems a bit to complex.
>> I would suggest the following:
>> 
>> if (w5 & FFA_PARTITION_INFO_GET_COUNT_FLAG)
>> {
>>     if ( ctx->guest_vers == FFA_VERSION_1_1 )
>>        return ffa_partition_info_get(w1, w2, w3, w4, w5, count);
>>     else
>>        return FFA_RET_INVALID_PARAMETERS;
>> }
> 
> OK, I can use that. I'll have to add a
> if (w5)
>    return FFA_RET_INVALID_PARAMETERS;
> 
> since the rest of the bits must be zero.

ok

> 
>> 
>>> +
>>> +    if ( !ffa_page_count )
>>> +        return FFA_RET_DENIED;
>>> +
>>> +    spin_lock(&ctx->lock);
>>> +    spin_lock(&ffa_rx_buffer_lock);
>>> +    if ( !ctx->page_count || !ctx->tx_is_mine )
>> 
>> If i understand correctly tx_is_mine is protecting the guest rx
>> buffer until rx_release is called by the guest so that we do not
>> write in it before the guest has retrieved the data from it.
>> 
>> The name is very misleading, maybe rx_is_writeable or free would be better ?
> 
> The FF-A specification talks about ownership of the TX buffer (from
> the VMs point of view), hence the name.
> I'll change it to rx_is_free to be more intuitive without the spec.

Yes but in the code it is quite unclear so better to rename it here.

> 
>> 
>> Also it would be more optimal to test it before taking ffa_rx_buffer_lock.
> 
> Yes, I'll change that.
> 
>> 
>> 
>>> +        goto out;
>>> +    ret = ffa_partition_info_get(w1, w2, w3, w4, w5, count);
>>> +    if ( ret )
>>> +        goto out;
>>> +
>>> +    if ( ctx->guest_vers == FFA_VERSION_1_0 )
>>> +    {
>>> +        size_t n;
>>> +        struct ffa_partition_info_1_1 *src = ffa_rx;
>>> +        struct ffa_partition_info_1_0 *dst = ctx->rx;
>>> +
>>> +        if ( ctx->page_count * FFA_PAGE_SIZE < *count * sizeof(*dst) )
>>> +        {
>>> +            ret = FFA_RET_NO_MEMORY;
>>> +            goto out_rx_release;
>>> +        }
>>> +
>>> +        for ( n = 0; n < *count; n++ )
>>> +        {
>>> +            dst[n].id = src[n].id;
>>> +            dst[n].execution_context = src[n].execution_context;
>>> +            dst[n].partition_properties = src[n].partition_properties;
>>> +        }
>>> +    }
>>> +    else
>>> +    {
>>> +        size_t sz = *count * sizeof(struct ffa_partition_info_1_1);
>>> +
>>> +        if ( ctx->page_count * FFA_PAGE_SIZE < sz )
>>> +        {
>>> +            ret = FFA_RET_NO_MEMORY;
>>> +            goto out_rx_release;
>>> +        }
>>> +
>>> +
>>> +        memcpy(ctx->rx, ffa_rx, sz);
>>> +    }
>>> +    ctx->tx_is_mine = false;
>> 
>> at this point we have no reason to hold ctx->lock
> 
> ctx->lock is special, we're never supposed to have contention on that
> lock. I believe that we in principle could use spin_trylock() instead
> and return FFA_RET_BUSY if it fails, but that might be a bit too much.
> The VM is supposed to synchronize calls that use the RXTX buffers. So
> unlocking ctx->lock early should give nothing for well-behaving
> guests, I'd prefer to keep things simple and unlock in reverse order
> if you don't mind. I'll add a comment.

Please add a comment and a TODO as we have very big locked sections
here and xen is not preemptible so having someone blocked here by an
other core doing something is a concern.

If it is expected that a VM should synchronize calls then we might want to
switch the ctx lock to use trylock and return busy.

> 
>> 
>>> +out_rx_release:
>>> +    ffa_rx_release();
>> 
>> There should be no case where do release without unlocking.
>> 
>> It might be cleaner to have 2 functions ffa_rx_get and ffa_rx_release
>> handling both the lock and the rx_release message.
> 
> I'd like to keep ffa_rx_release() as a dumb wrapper. ffa_rx_release()
> is also used in init_subscribers()  where we don't use any locks.
> ffa_rx_release() is called after a successful call to
> ffa_partition_info_get() where we also gained ownership of our RX
> buffer. Things might be a bit too intertwined for further abstraction.
> I'll add a comment explaining the relationship between
> ffa_partition_info_get() and ffa_rx_release().

That would not create any problem to take the lock where it is not
done already and would make the implemenation a bit more robust.

If at some point some FFA services are used in different cores during
the boot phase, it might make sense to take the lock even if we are in 
situations where there should be no concurrency just to make the code
safer.

Please tell me what you think here.

> 
>> 
>>> +out:
>>> +    spin_unlock(&ffa_rx_buffer_lock);
>> 
>> this should stay with ffa_rx_release
> 
> Depending on if you accept my explanation above.


Cheers
Bertrand

> 
> Thanks,
> Jens
> 
>> 
>> Cheers
>> Bertrand
>> 
>>> +    spin_unlock(&ctx->lock);
>>> +
>>> +    return ret;
>>> +}
>>> +
>>> +static uint32_t handle_rx_release(void)
>>> +{
>>> +    uint32_t ret = FFA_RET_DENIED;
>>> +    struct domain *d = current->domain;
>>> +    struct ffa_ctx *ctx = d->arch.tee;
>>> +
>>> +    spin_lock(&ctx->lock);
>>> +    if ( !ctx->page_count || ctx->tx_is_mine )
>>> +        goto out;
>>> +    ret = FFA_RET_OK;
>>> +    ctx->tx_is_mine = true;
>>> +out:
>>> +    spin_unlock(&ctx->lock);
>>> +
>>> +    return ret;
>>> +}
>>> +
>>> static void handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t 
>>> fid)
>>> {
>>>    struct arm_smccc_1_2_regs arg = { .a0 = fid, };
>>> @@ -528,6 +631,7 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>>>    uint32_t fid = get_user_reg(regs, 0);
>>>    struct domain *d = current->domain;
>>>    struct ffa_ctx *ctx = d->arch.tee;
>>> +    uint32_t count;
>>>    int e;
>>> 
>>>    if ( !ctx )
>>> @@ -559,6 +663,24 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>>>        else
>>>            set_regs_success(regs, 0, 0);
>>>        return true;
>>> +    case FFA_PARTITION_INFO_GET:
>>> +        e = handle_partition_info_get(get_user_reg(regs, 1),
>>> +                                      get_user_reg(regs, 2),
>>> +                                      get_user_reg(regs, 3),
>>> +                                      get_user_reg(regs, 4),
>>> +                                      get_user_reg(regs, 5), &count);
>>> +        if ( e )
>>> +            set_regs_error(regs, e);
>>> +        else
>>> +            set_regs_success(regs, count, 0);
>>> +        return true;
>>> +    case FFA_RX_RELEASE:
>>> +        e = handle_rx_release();
>>> +        if ( e )
>>> +            set_regs_error(regs, e);
>>> +        else
>>> +            set_regs_success(regs, 0, 0);
>>> +        return true;
>>>    case FFA_MSG_SEND_DIRECT_REQ_32:
>>> #ifdef CONFIG_ARM_64
>>>    case FFA_MSG_SEND_DIRECT_REQ_64:
>>> --
>>> 2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.