|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [XEN PATCH v10 13/24] xen/arm: ffa: support mapping guest RX/TX buffers
Hi Bertrand,
On Tue, Jul 18, 2023 at 12:10 PM Bertrand Marquis
<Bertrand.Marquis@xxxxxxx> wrote:
>
> Hi Jens,
>
> > On 17 Jul 2023, at 09:20, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
> >
> > Adds support in the mediator to map and unmap the RX and TX buffers
> > provided by the guest using the two FF-A functions FFA_RXTX_MAP and
> > FFA_RXTX_UNMAP.
> >
> > These buffer are later used to transmit data that cannot be passed in
> > registers only.
> >
> > Signed-off-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
> > ---
> > xen/arch/arm/tee/ffa.c | 138 +++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 138 insertions(+)
> >
> > diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
> > index d755363de686..ffabb5ed0a80 100644
> > --- a/xen/arch/arm/tee/ffa.c
> > +++ b/xen/arch/arm/tee/ffa.c
> > @@ -74,6 +74,12 @@
> > */
> > #define FFA_RXTX_PAGE_COUNT 1
> >
> > +/*
> > + * Limits the number of pages RX/TX buffers guests can map.
>
> Typo: s/Limits/Limit/
OK
>
> > + * TODO support a larger number.
> > + */
> > +#define FFA_MAX_RXTX_PAGE_COUNT 1
> > +
> > /*
> > * Flags and field values used for the MSG_SEND_DIRECT_REQ/RESP:
> > * BIT(31): Framework or partition message
> > @@ -169,6 +175,12 @@ struct ffa_partition_info_1_1 {
> > };
> >
> > struct ffa_ctx {
> > + void *rx;
> > + const void *tx;
> > + struct page_info *rx_pg;
> > + struct page_info *tx_pg;
> > + /* Number of 4kB pages in each of rx/rx_pg and tx/tx_pg */
> > + unsigned int page_count;
> > /* FF-A version used by the guest */
> > uint32_t guest_vers;
> > /*
> > @@ -176,6 +188,7 @@ struct ffa_ctx {
> > * ffa_domain_teardown() to know which SPs need to be signalled.
> > */
> > uint16_t create_signal_count;
> > + bool rx_is_free;
> > };
> >
> > /* Negotiated FF-A version to use with the SPMC */
> > @@ -371,6 +384,11 @@ static void set_regs(struct cpu_user_regs *regs,
> > register_t v0, register_t v1,
> > set_user_reg(regs, 7, v7);
> > }
> >
> > +static void set_regs_error(struct cpu_user_regs *regs, uint32_t error_code)
> > +{
> > + set_regs(regs, FFA_ERROR, 0, error_code, 0, 0, 0, 0, 0);
> > +}
> > +
> > static void set_regs_success(struct cpu_user_regs *regs, uint32_t w2,
> > uint32_t w3)
> > {
> > @@ -392,6 +410,106 @@ static void handle_version(struct cpu_user_regs *regs)
> > set_regs(regs, vers, 0, 0, 0, 0, 0, 0, 0);
> > }
> >
> > +static uint32_t handle_rxtx_map(uint32_t fid, register_t tx_addr,
> > + register_t rx_addr, uint32_t page_count)
> > +{
> > + uint32_t ret = FFA_RET_INVALID_PARAMETERS;
> > + struct domain *d = current->domain;
> > + struct ffa_ctx *ctx = d->arch.tee;
> > + struct page_info *tx_pg;
> > + struct page_info *rx_pg;
> > + p2m_type_t t;
> > + void *rx;
> > + void *tx;
> > +
> > + if ( !smccc_is_conv_64(fid) )
> > + {
> > + /*
> > + * Calls using the 32-bit calling convention must ignore the upper
> > + * 32 bits in the argument registers.
> > + */
> > + tx_addr &= UINT32_MAX;
> > + rx_addr &= UINT32_MAX;
> > + }
> > +
> > + if ( page_count > FFA_MAX_RXTX_PAGE_COUNT )
> > + {
> > + printk(XENLOG_ERR "ffa: RXTX_MAP: error: %u pages requested (limit
> > %u)\n",
> > + page_count, FFA_MAX_RXTX_PAGE_COUNT);
> > + return FFA_RET_NOT_SUPPORTED;
> > + }
> > +
> > + /* Already mapped */
> > + if ( ctx->rx )
> > + return FFA_RET_DENIED;
> > +
> > + tx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(tx_addr)), &t,
> > P2M_ALLOC);
> > + if ( !tx_pg )
> > + return FFA_RET_INVALID_PARAMETERS;
>
> Please add a newline here
OK
>
> > + /* Only normal RW RAM for now */
> > + if ( t != p2m_ram_rw )
> > + goto err_put_tx_pg;
> > +
> > + rx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(rx_addr)), &t,
> > P2M_ALLOC);
> > + if ( !tx_pg )
> > + goto err_put_tx_pg;
>
> Please add a newline here
OK
Thanks,
Jens
>
> > + /* Only normal RW RAM for now */
> > + if ( t != p2m_ram_rw )
> > + goto err_put_rx_pg;
> > +
> > + tx = __map_domain_page_global(tx_pg);
> > + if ( !tx )
> > + goto err_put_rx_pg;
> > +
> > + rx = __map_domain_page_global(rx_pg);
> > + if ( !rx )
> > + goto err_unmap_tx;
> > +
> > + ctx->rx = rx;
> > + ctx->tx = tx;
> > + ctx->rx_pg = rx_pg;
> > + ctx->tx_pg = tx_pg;
> > + ctx->page_count = page_count;
> > + ctx->rx_is_free = true;
> > + return FFA_RET_OK;
> > +
> > +err_unmap_tx:
> > + unmap_domain_page_global(tx);
> > +err_put_rx_pg:
> > + put_page(rx_pg);
> > +err_put_tx_pg:
> > + put_page(tx_pg);
> > +
> > + return ret;
> > +}
> > +
> > +static void rxtx_unmap(struct ffa_ctx *ctx)
> > +{
> > + unmap_domain_page_global(ctx->rx);
> > + unmap_domain_page_global(ctx->tx);
> > + put_page(ctx->rx_pg);
> > + put_page(ctx->tx_pg);
> > + ctx->rx = NULL;
> > + ctx->tx = NULL;
> > + ctx->rx_pg = NULL;
> > + ctx->tx_pg = NULL;
> > + ctx->page_count = 0;
> > + ctx->rx_is_free = false;
> > +}
> > +
> > +static uint32_t handle_rxtx_unmap(void)
> > +{
> > + struct domain *d = current->domain;
> > + struct ffa_ctx *ctx = d->arch.tee;
> > +
> > + if ( !ctx->rx )
> > + return FFA_RET_INVALID_PARAMETERS;
> > +
> > + rxtx_unmap(ctx);
> > +
> > + return FFA_RET_OK;
> > +}
> > +
> > static void handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t
> > fid)
> > {
> > struct arm_smccc_1_2_regs arg = { .a0 = fid, };
> > @@ -448,6 +566,7 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
> > uint32_t fid = get_user_reg(regs, 0);
> > struct domain *d = current->domain;
> > struct ffa_ctx *ctx = d->arch.tee;
> > + int e;
> >
> > if ( !ctx )
> > return false;
> > @@ -460,6 +579,22 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
> > case FFA_ID_GET:
> > set_regs_success(regs, get_vm_id(d), 0);
> > return true;
> > + case FFA_RXTX_MAP_32:
> > + case FFA_RXTX_MAP_64:
> > + e = handle_rxtx_map(fid, get_user_reg(regs, 1), get_user_reg(regs,
> > 2),
> > + get_user_reg(regs, 3));
> > + if ( e )
> > + set_regs_error(regs, e);
> > + else
> > + set_regs_success(regs, 0, 0);
> > + return true;
> > + case FFA_RXTX_UNMAP:
> > + e = handle_rxtx_unmap();
> > + if ( e )
> > + set_regs_error(regs, e);
> > + else
> > + set_regs_success(regs, 0, 0);
> > + return true;
> > case FFA_MSG_SEND_DIRECT_REQ_32:
> > case FFA_MSG_SEND_DIRECT_REQ_64:
> > handle_msg_send_direct_req(regs, fid);
> > @@ -551,6 +686,9 @@ static int ffa_domain_teardown(struct domain *d)
> > get_vm_id(d), subscr_vm_destroyed[n], res);
> > }
> >
> > + if ( ctx->rx )
> > + rxtx_unmap(ctx);
> > +
> > XFREE(d->arch.tee);
> >
> > return 0;
> > --
> > 2.34.1
> >
>
> Cheers
> Bertrand
>
>
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |