[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH for-4.19???] x86/physdev: replace physdev_{,un}map_pirq() checking against DOMID_SELF



On Wed, 2024-06-12 at 10:44 +0200, Jan Beulich wrote:
> It's hardly ever correct to check for just DOMID_SELF, as guests have
> ways to figure out their domain IDs and hence could instead use those
> as
> inputs to respective hypercalls. Note, however, that for ordinary
> DomU-s
> the adjustment is relaxing things rather than tightening them, since
> - as a result of XSA-237 - the respective XSM checks would have
> rejected
> self (un)mapping attempts for other than the control domain.
> 
> Since in physdev_map_pirq() handling overall is a little easier this
> way, move obtaining of the domain pointer into the caller. Doing the
> same for physdev_unmap_pirq() is just to keep both consistent in this
> regard. For both this has the advantage that it is now provable (by
> the
> build not failing) that there are no DOMID_SELF checks left (and none
> could easily be re-added).
> 
> Fixes: 0b469cd68708 ("Interrupt remapping to PIRQs in HVM guests")
> Fixes: 9e1a3415b773 ("x86: fixes after emuirq changes")
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Release-Acked-By: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>

~ Oleksii
> ---
> Note that the moving of rcu_lock_domain_by_any_id() is also going to
> help
> https://lists.xen.org/archives/html/xen-devel/2024-06/msg00206.html.
> 
> --- a/xen/arch/x86/physdev.c
> +++ b/xen/arch/x86/physdev.c
> @@ -18,9 +18,9 @@
>  #include <xsm/xsm.h>
>  #include <asm/p2m.h>
>  
> -int physdev_map_pirq(domid_t domid, int type, int *index, int
> *pirq_p,
> +int physdev_map_pirq(struct domain *d, int type, int *index, int
> *pirq_p,
>                       struct msi_info *msi);
> -int physdev_unmap_pirq(domid_t domid, int pirq);
> +int physdev_unmap_pirq(struct domain *d, int pirq);
>  
>  #include "x86_64/mmconfig.h"
>  
> @@ -88,13 +88,12 @@ static int physdev_hvm_map_pirq(
>      return ret;
>  }
>  
> -int physdev_map_pirq(domid_t domid, int type, int *index, int
> *pirq_p,
> +int physdev_map_pirq(struct domain *d, int type, int *index, int
> *pirq_p,
>                       struct msi_info *msi)
>  {
> -    struct domain *d = current->domain;
>      int ret;
>  
> -    if ( domid == DOMID_SELF && is_hvm_domain(d) && has_pirq(d) )
> +    if ( d == current->domain && is_hvm_domain(d) && has_pirq(d) )
>      {
>          /*
>           * Only makes sense for vector-based callback, else HVM-IRQ
> logic
> @@ -106,13 +105,9 @@ int physdev_map_pirq(domid_t domid, int
>          return physdev_hvm_map_pirq(d, type, index, pirq_p);
>      }
>  
> -    d = rcu_lock_domain_by_any_id(domid);
> -    if ( d == NULL )
> -        return -ESRCH;
> -
>      ret = xsm_map_domain_pirq(XSM_DM_PRIV, d);
>      if ( ret )
> -        goto free_domain;
> +        return ret;
>  
>      /* Verify or get irq. */
>      switch ( type )
> @@ -135,24 +130,17 @@ int physdev_map_pirq(domid_t domid, int
>          break;
>      }
>  
> - free_domain:
> -    rcu_unlock_domain(d);
>      return ret;
>  }
>  
> -int physdev_unmap_pirq(domid_t domid, int pirq)
> +int physdev_unmap_pirq(struct domain *d, int pirq)
>  {
> -    struct domain *d;
>      int ret = 0;
>  
> -    d = rcu_lock_domain_by_any_id(domid);
> -    if ( d == NULL )
> -        return -ESRCH;
> -
> -    if ( domid != DOMID_SELF || !is_hvm_domain(d) || !has_pirq(d) )
> +    if ( d != current->domain || !is_hvm_domain(d) || !has_pirq(d) )
>          ret = xsm_unmap_domain_pirq(XSM_DM_PRIV, d);
>      if ( ret )
> -        goto free_domain;
> +        return ret;
>  
>      if ( is_hvm_domain(d) && has_pirq(d) )
>      {
> @@ -160,8 +148,8 @@ int physdev_unmap_pirq(domid_t domid, in
>          if ( domain_pirq_to_emuirq(d, pirq) != IRQ_UNBOUND )
>              ret = unmap_domain_pirq_emuirq(d, pirq);
>          write_unlock(&d->event_lock);
> -        if ( domid == DOMID_SELF || ret )
> -            goto free_domain;
> +        if ( d == current->domain || ret )
> +            return ret;
>      }
>  
>      pcidevs_lock();
> @@ -170,8 +158,6 @@ int physdev_unmap_pirq(domid_t domid, in
>      write_unlock(&d->event_lock);
>      pcidevs_unlock();
>  
> - free_domain:
> -    rcu_unlock_domain(d);
>      return ret;
>  }
>  #endif /* COMPAT */
> @@ -184,6 +170,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
>  
>      switch ( cmd )
>      {
> +        struct domain *d;
> +
>      case PHYSDEVOP_eoi: {
>          struct physdev_eoi eoi;
>          struct pirq *pirq;
> @@ -331,8 +319,15 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
>          msi.sbdf.devfn = map.devfn;
>          msi.entry_nr = map.entry_nr;
>          msi.table_base = map.table_base;
> -        ret = physdev_map_pirq(map.domid, map.type, &map.index,
> &map.pirq,
> -                               &msi);
> +
> +        d = rcu_lock_domain_by_any_id(map.domid);
> +        ret = -ESRCH;
> +        if ( !d )
> +            break;
> +
> +        ret = physdev_map_pirq(d, map.type, &map.index, &map.pirq,
> &msi);
> +
> +        rcu_unlock_domain(d);
>  
>          if ( map.type == MAP_PIRQ_TYPE_MULTI_MSI )
>              map.entry_nr = msi.entry_nr;
> @@ -348,7 +343,15 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
>          if ( copy_from_guest(&unmap, arg, 1) != 0 )
>              break;
>  
> -        ret = physdev_unmap_pirq(unmap.domid, unmap.pirq);
> +        d = rcu_lock_domain_by_any_id(unmap.domid);
> +        ret = -ESRCH;
> +        if ( !d )
> +            break;
> +
> +        ret = physdev_unmap_pirq(d, unmap.pirq);
> +
> +        rcu_unlock_domain(d);
> +
>          break;
>      }
>  


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.