[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 8/8] vpci/msix: Add function to clean MSIX resources



On Wed, Apr 09, 2025 at 02:45:28PM +0800, Jiqian Chen wrote:
> When init_msix() fails, it needs to clean all MSIX resources.
> So, add a new function fini_msix() to do that.
> 
> And to unregister the mmio handler of vpci_msix_table_ops, add
> a new function.
> 
> Signed-off-by: Jiqian Chen <Jiqian.Chen@xxxxxxx>
> ---
> cc: Jan Beulich <jbeulich@xxxxxxxx>
> cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
> ---
> v1->v2 changes:
> new patch.
> 
> Best regards,
> Jiqian Chen.
> ---
>  xen/arch/x86/hvm/intercept.c      | 44 ++++++++++++++++++++++
>  xen/arch/x86/include/asm/hvm/io.h |  3 ++
>  xen/drivers/vpci/msix.c           | 61 ++++++++++++++++++++++++++++---
>  3 files changed, 103 insertions(+), 5 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
> index da22c386763e..5eacf51d4d2c 100644
> --- a/xen/arch/x86/hvm/intercept.c
> +++ b/xen/arch/x86/hvm/intercept.c
> @@ -276,6 +276,50 @@ void register_mmio_handler(struct domain *d,
>      handler->mmio.ops = ops;
>  }
>  
> +void unregister_mmio_handler(struct domain *d,
> +                             const struct hvm_mmio_ops *ops)
> +{
> +    unsigned int i, count = d->arch.hvm.io_handler_count;
> +
> +    ASSERT(d->arch.hvm.io_handler);
> +
> +    if ( !count )
> +        return;
> +
> +    for ( i = 0; i < count; i++ )
> +        if ( d->arch.hvm.io_handler[i].type == IOREQ_TYPE_COPY &&
> +             d->arch.hvm.io_handler[i].mmio.ops == ops )
> +            break;
> +
> +    if ( i == count )
> +        return;
> +
> +    for ( ; i < count - 1; i++ )
> +    {
> +        struct hvm_io_handler *curr = &d->arch.hvm.io_handler[i];
> +        struct hvm_io_handler *next = &d->arch.hvm.io_handler[i + 1];
> +
> +        curr->type = next->type;
> +        curr->ops = next->ops;
> +        if ( next->type == IOREQ_TYPE_COPY )
> +        {
> +            curr->portio.port = 0;
> +            curr->portio.size = 0;
> +            curr->portio.action = 0;
> +            curr->mmio.ops = next->mmio.ops;
> +        }
> +        else
> +        {
> +            curr->mmio.ops = 0;
> +            curr->portio.port = next->portio.port;
> +            curr->portio.size = next->portio.size;
> +            curr->portio.action = next->portio.action;
> +        }
> +    }

Can't you use memmove() instead of a for loop?

memmove(&d->arch.hvm.io_handler[i], &d->arch.hvm.io_handler[i + 1],
        sizeof(d->arch.hvm.io_handler[0]) * (count - i - 1));

> +
> +    d->arch.hvm.io_handler_count--;
> +}
> +
>  void register_portio_handler(struct domain *d, unsigned int port,
>                               unsigned int size, portio_action_t action)
>  {
> diff --git a/xen/arch/x86/include/asm/hvm/io.h 
> b/xen/arch/x86/include/asm/hvm/io.h
> index 565bad300d20..018d2745fd99 100644
> --- a/xen/arch/x86/include/asm/hvm/io.h
> +++ b/xen/arch/x86/include/asm/hvm/io.h
> @@ -75,6 +75,9 @@ bool hvm_mmio_internal(paddr_t gpa);
>  void register_mmio_handler(struct domain *d,
>                             const struct hvm_mmio_ops *ops);
>  
> +void unregister_mmio_handler(struct domain *d,
> +                             const struct hvm_mmio_ops *ops);
> +
>  void register_portio_handler(
>      struct domain *d, unsigned int port, unsigned int size,
>      portio_action_t action);
> diff --git a/xen/drivers/vpci/msix.c b/xen/drivers/vpci/msix.c
> index 6537374c79a0..60654d4f6d0b 100644
> --- a/xen/drivers/vpci/msix.c
> +++ b/xen/drivers/vpci/msix.c
> @@ -703,6 +703,54 @@ int vpci_make_msix_hole(const struct pci_dev *pdev)
>      return 0;
>  }
>  
> +static void fini_msix(struct pci_dev *pdev)
> +{
> +    struct vpci *vpci = pdev->vpci;
> +
> +    if ( !vpci->msix )
> +        return;
> +
> +    list_del(&vpci->msix->next);
> +    if ( list_empty(&pdev->domain->arch.hvm.msix_tables) )
> +        unregister_mmio_handler(pdev->domain, &vpci_msix_table_ops);

At the point the MMIO handler is added the capability initialization
cannot fail, so arguably if the MSI-X handler is registered there will
always be at least one functional MSI-X capability that requires it.

IOW: you can likely drop the addition of unregister_mmio_handler() and
avoid the removal of the MMIO handler.  Worst case a domain will end
up with a dummy handler that does nothing, but it won't cause
malfunctions.

> +
> +    /* Remove any MSIX regions if present. */
> +    for ( unsigned int i = 0;
> +          vpci->msix && i < ARRAY_SIZE(vpci->msix->tables);
> +          i++ )
> +    {
> +        unsigned long start = PFN_DOWN(vmsix_table_addr(pdev->vpci, i));
> +        unsigned long end = PFN_DOWN(vmsix_table_addr(pdev->vpci, i) +
> +                                     vmsix_table_size(pdev->vpci, i) - 1);
> +
> +        for ( unsigned int j = 0; j < ARRAY_SIZE(vpci->header.bars); j++ )
> +        {
> +            int rc;
> +            const struct vpci_bar *bar = &vpci->header.bars[j];
> +
> +            if ( rangeset_is_empty(bar->mem) )
> +                continue;
> +
> +            rc = rangeset_remove_range(bar->mem, start, end);
> +            if ( rc )
> +            {
> +                gprintk(XENLOG_WARNING,
> +                       "%pp: failed to remove MSIX table [%lx, %lx]: %d\n",
> +                        &pdev->sbdf, start, end, rc);
> +                return;
> +            }
> +        }
> +    }

There's no need to do any of this rangeset manipulation.  The BAR
rangesets are re-created for any map/unmap request, and hence should
be empty unless there's a concurrent operation going on (which won't
be the case when initializing the capabilities).

> +
> +    for ( unsigned int i = 0; i < ARRAY_SIZE(vpci->msix->table); i++ )
> +        if ( vpci->msix->table[i] )
> +            iounmap(vpci->msix->table[i]);

The MSI-X init function never maps tables, so the code here (given
it's current usage) will also never unmap anything.  However I would
also like to use this code for vPCI deassing, at which point the logic
will get used.

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.