[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 03/12] xen/arm: ffa: Harden shm page parsing


  • To: Bertrand Marquis <bertrand.marquis@xxxxxxx>
  • From: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
  • Date: Mon, 9 Feb 2026 10:31:15 +0100
  • Arc-authentication-results: i=1; mx.google.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20240605; h=content-transfer-encoding:cc:to:subject:message-id:date:from :in-reply-to:references:mime-version:dkim-signature; bh=O0o1/Bj54XGcPpPmp6xwMpgdLQgL1D0IbYLcV2QHG1w=; fh=wNLC6Hyb5Ukz/ErppBRQBwv8vwa/OMsdh6R8bnNsiPU=; b=TtehPdsyqfBqc9UwrnnTAN7w0O+rtj/BNUeQ58uTuLTpfx0J9HGkzDV0tLYVplZnBI tAbaWLaoVbc5qnsztJ34DOtrhOAGqsP2UHANW5DX9bj+i8JvECxZxAmxSHDevyFxKz7I /frG1Nb4lzo0OiZI+LzzmJUMla36o1kaBSTwoZNOU7kxE8nkIsWfc2IErpPMRvHRy5aJ OKoHVs0v4q8lkNeRptB6VRiYOhT5Yi5CvNc7rIQvmt3XroRjwFC5iGzUGyHK2Vmt2ZY3 jcfhN+Iih5Vm7mIKWmlHsjmHUfz3BaBPOd2uxuxGACsFpiuGOat4iLc/SU98Q2g2Rd2p x+qQ==; darn=lists.xenproject.org
  • Arc-seal: i=1; a=rsa-sha256; t=1770629487; cv=none; d=google.com; s=arc-20240605; b=J+HJTN1L+6deIK1tkIf1bVmPZcMx0HaDiLGWH0pfSzMtwD6WRv23lg+8HJwx/e1Dnz IPwkYBTKw1U5wJZoYosFggeCwq05y1QsvNBeZbkMyOC1UWDSxIliipiR7YWZJT1lekUA ZZiBbV577Tacr+JIv289QdH0uTwvILPGAeZGB3nN+SYmKrqeJ5XfolaLvj3KSD0WSJuc wUlogLLgOnpyO+60Y/lJQ0BIIM54QkCtISK1mxSP6/bdiAYmBsSNXhOrwAzA6O9wSD6g A9PrWCpcZHzz+PwiTHuA1qR0v/+6TGjR+9BwFQ18J77vYUXv+FcbO31oljUCZFkOpBg2 OxMQ==
  • Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>
  • Delivery-date: Mon, 09 Feb 2026 09:31:46 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Hi Bertrand,

On Tue, Feb 3, 2026 at 6:38 PM Bertrand Marquis
<bertrand.marquis@xxxxxxx> wrote:
>
> get_shm_pages() uses unchecked address arithmetic and does not enforce
> alignment, so malformed descriptors can cause overflow or slip through
> validation. The reclaim path also repeats handle-to-shm-mem conversion
> in multiple places, duplicating error handling.
>
> Harden page parsing and reclaim handling:
> - add ffa_safe_addr_add() and use it to detect address overflows
> - enforce alignment checks in get_shm_pages() and return FF-A errors
> - introduce ffa_secure_reclaim() and use it for MEM_RECLAIM and teardown
> - simplify ffa_mem_share() argument handling and allow max page count
>
> Functional impact: invalid or misaligned memory ranges now fail earlier
> with proper error codes; behavior for valid descriptors is unchanged.
>
> Signed-off-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>
> ---
>  xen/arch/arm/tee/ffa_private.h | 11 +++++++
>  xen/arch/arm/tee/ffa_shm.c     | 57 +++++++++++++++++-----------------
>  2 files changed, 40 insertions(+), 28 deletions(-)
>
> diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h
> index b625f1c72914..58562d8e733c 100644
> --- a/xen/arch/arm/tee/ffa_private.h
> +++ b/xen/arch/arm/tee/ffa_private.h
> @@ -632,4 +632,15 @@ static inline void ffa_uuid_set(struct ffa_uuid *id, 
> uint32_t val0,
>      id->val[1] = ((uint64_t)val3 << 32U) | val2;
>  }
>
> +/*
> + * Common overflow-safe helper to verify that adding a number of pages to an
> + * address will not wrap around.
> + */
> +static inline bool ffa_safe_addr_add(uint64_t addr, uint64_t pages)
> +{
> +    uint64_t off = pages * FFA_PAGE_SIZE;
> +
> +    return (off / FFA_PAGE_SIZE) == pages && addr <= UINT64_MAX - off;
> +}
> +
>  #endif /*__FFA_PRIVATE_H__*/
> diff --git a/xen/arch/arm/tee/ffa_shm.c b/xen/arch/arm/tee/ffa_shm.c
> index 90800e44a86a..4c0b45cde6ee 100644
> --- a/xen/arch/arm/tee/ffa_shm.c
> +++ b/xen/arch/arm/tee/ffa_shm.c
> @@ -96,16 +96,14 @@ struct ffa_shm_mem {
>      struct page_info *pages[];
>  };
>
> -static int32_t ffa_mem_share(uint32_t tot_len, uint32_t frag_len,
> -                             register_t addr, uint32_t pg_count,
> -                             uint64_t *handle)
> +static int32_t ffa_mem_share(uint32_t tot_len, uint64_t *handle)
>  {
>      struct arm_smccc_1_2_regs arg = {
>          .a0 = FFA_MEM_SHARE_64,
>          .a1 = tot_len,
> -        .a2 = frag_len,
> -        .a3 = addr,
> -        .a4 = pg_count,
> +        .a2 = tot_len,
> +        .a3 = 0,
> +        .a4 = 0,
>      };
>      struct arm_smccc_1_2_regs resp;
>
> @@ -131,12 +129,16 @@ static int32_t ffa_mem_share(uint32_t tot_len, uint32_t 
> frag_len,
>      }
>  }
>
> -static int32_t ffa_mem_reclaim(uint32_t handle_lo, uint32_t handle_hi,
> -                               uint32_t flags)
> +static int32_t ffa_secure_reclaim(struct ffa_shm_mem *shm, uint32_t flags)

I agree with moving the uint64_to_regpair() call into this function,
but I'm puzzled by the new name. What's secure?

>  {
> +    register_t handle_hi;
> +    register_t handle_lo;
> +
>      if ( !ffa_fw_supports_fid(FFA_MEM_RECLAIM) )
>          return FFA_RET_NOT_SUPPORTED;
>
> +    uint64_to_regpair(&handle_hi, &handle_lo, shm->handle);
> +
>      return ffa_simple_call(FFA_MEM_RECLAIM, handle_lo, handle_hi, flags, 0);
>  }
>
> @@ -145,7 +147,7 @@ static int32_t ffa_mem_reclaim(uint32_t handle_lo, 
> uint32_t handle_hi,
>   * this function fails then the caller is still expected to call
>   * put_shm_pages() as a cleanup.
>   */
> -static int get_shm_pages(struct domain *d, struct ffa_shm_mem *shm,
> +static int32_t get_shm_pages(struct domain *d, struct ffa_shm_mem *shm,
>                           const struct ffa_address_range *range,
>                           uint32_t range_count)
>  {
> @@ -156,17 +158,26 @@ static int get_shm_pages(struct domain *d, struct 
> ffa_shm_mem *shm,
>      p2m_type_t t;
>      uint64_t addr;
>      uint64_t page_count;
> +    uint64_t gaddr;
>
>      for ( n = 0; n < range_count; n++ )
>      {
>          page_count = ACCESS_ONCE(range[n].page_count);
>          addr = ACCESS_ONCE(range[n].address);
> +
> +        if ( !IS_ALIGNED(addr, FFA_PAGE_SIZE) )
> +            return FFA_RET_INVALID_PARAMETERS;
> +
>          for ( m = 0; m < page_count; m++ )
>          {
>              if ( pg_idx >= shm->page_count )
>                  return FFA_RET_INVALID_PARAMETERS;
>
> -            gfn = gaddr_to_gfn(addr + m * FFA_PAGE_SIZE);
> +            if ( !ffa_safe_addr_add(addr, m) )
> +                return FFA_RET_INVALID_PARAMETERS;
> +
> +            gaddr = addr + m * FFA_PAGE_SIZE;
> +            gfn = gaddr_to_gfn(gaddr);
>              shm->pages[pg_idx] = get_page_from_gfn(d, gfn_x(gfn), &t,
>                                                    P2M_ALLOC);
>              if ( !shm->pages[pg_idx] )
> @@ -180,7 +191,7 @@ static int get_shm_pages(struct domain *d, struct 
> ffa_shm_mem *shm,
>
>      /* The ranges must add up */
>      if ( pg_idx < shm->page_count )
> -            return FFA_RET_INVALID_PARAMETERS;
> +        return FFA_RET_INVALID_PARAMETERS;
>
>      return FFA_RET_OK;
>  }
> @@ -198,15 +209,11 @@ static void put_shm_pages(struct ffa_shm_mem *shm)
>
>  static bool inc_ctx_shm_count(struct domain *d, struct ffa_ctx *ctx)
>  {
> -    bool ret = true;
> +    bool ret = false;
>
>      spin_lock(&ctx->lock);
>
> -    if ( ctx->shm_count >= FFA_MAX_SHM_COUNT )
> -    {
> -        ret = false;
> -    }
> -    else
> +    if ( ctx->shm_count < FFA_MAX_SHM_COUNT )
>      {
>          /*
>           * If this is the first shm added, increase the domain reference
> @@ -217,6 +224,7 @@ static bool inc_ctx_shm_count(struct domain *d, struct 
> ffa_ctx *ctx)
>              get_knownalive_domain(d);
>
>          ctx->shm_count++;
> +        ret = true;
>      }
>
>      spin_unlock(&ctx->lock);
> @@ -251,7 +259,7 @@ static struct ffa_shm_mem *alloc_ffa_shm_mem(struct 
> domain *d,
>      struct ffa_ctx *ctx = d->arch.tee;
>      struct ffa_shm_mem *shm;
>
> -    if ( page_count >= FFA_MAX_SHM_PAGE_COUNT )
> +    if ( page_count > FFA_MAX_SHM_PAGE_COUNT )
>          return NULL;
>      if ( !inc_ctx_shm_count(d, ctx) )
>          return NULL;
> @@ -367,7 +375,7 @@ static int share_shm(struct ffa_shm_mem *shm)
>          init_range(addr_range, pa);
>      }
>
> -    ret = ffa_mem_share(tot_len, frag_len, 0, 0, &shm->handle);

Please remove frag_len from share_shm() since it's not needed any longer.

Cheers,
Jens

> +    ret = ffa_mem_share(tot_len, &shm->handle);
>
>  out:
>      ffa_rxtx_spmc_tx_release();
> @@ -637,8 +645,6 @@ int32_t ffa_handle_mem_reclaim(uint64_t handle, uint32_t 
> flags)
>      struct domain *d = current->domain;
>      struct ffa_ctx *ctx = d->arch.tee;
>      struct ffa_shm_mem *shm;
> -    register_t handle_hi;
> -    register_t handle_lo;
>      int32_t ret;
>
>      if ( !ffa_fw_supports_fid(FFA_MEM_RECLAIM) )
> @@ -652,8 +658,7 @@ int32_t ffa_handle_mem_reclaim(uint64_t handle, uint32_t 
> flags)
>      if ( !shm )
>          return FFA_RET_INVALID_PARAMETERS;
>
> -    uint64_to_regpair(&handle_hi, &handle_lo, handle);
> -    ret = ffa_mem_reclaim(handle_lo, handle_hi, flags);
> +    ret = ffa_secure_reclaim(shm, flags);
>
>      if ( ret )
>      {
> @@ -677,11 +682,7 @@ bool ffa_shm_domain_destroy(struct domain *d)
>
>      list_for_each_entry_safe(shm, tmp, &ctx->shm_list, list)
>      {
> -        register_t handle_hi;
> -        register_t handle_lo;
> -
> -        uint64_to_regpair(&handle_hi, &handle_lo, shm->handle);
> -        res = ffa_mem_reclaim(handle_lo, handle_hi, 0);
> +        res = ffa_secure_reclaim(shm, 0);
>          switch ( res ) {
>          case FFA_RET_OK:
>              printk(XENLOG_G_DEBUG "%pd: ffa: Reclaimed handle %#lx\n",
> --
> 2.50.1 (Apple Git-155)
>



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.