[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 11/11] vpci/msix: add MSI-X handlers



>>> On 19.09.17 at 17:29, <roger.pau@xxxxxxxxxx> wrote:
> --- a/xen/drivers/vpci/header.c
> +++ b/xen/drivers/vpci/header.c
> @@ -152,6 +152,7 @@ static int vpci_check_bar_overlap(const struct pci_dev 
> *pdev,
>  static void vpci_modify_bars(const struct pci_dev *pdev, bool map)
>  {
>      struct vpci_header *header = &pdev->vpci->header;
> +    struct vpci_msix *msix = pdev->vpci->msix;

const and please fetch the value only right before you first need it.

> --- a/xen/drivers/vpci/msi.c
> +++ b/xen/drivers/vpci/msi.c
> @@ -320,13 +320,17 @@ void vpci_dump_msi(void)
>          if ( !has_vpci(d) )
>              continue;
>  
> -        printk("vPCI MSI information for d%d\n", d->domain_id);
> +        printk("vPCI MSI/MSI-X information for d%d\n", d->domain_id);
>  
>          list_for_each_entry ( pdev, &d->arch.pdev_list, domain_list )
>          {
>              uint8_t seg = pdev->seg, bus = pdev->bus;
>              uint8_t slot = PCI_SLOT(pdev->devfn), func = 
> PCI_FUNC(pdev->devfn);
>              const struct vpci_msi *msi = pdev->vpci->msi;
> +            const struct vpci_msix *msix = pdev->vpci->msix;
> +
> +            if ( msi || msix )
> +                printk("Device %04x:%02x:%02x.%u\n", seg, bus, slot, func);
>  
>              if ( !spin_trylock(&pdev->vpci->lock) )
>              {
> @@ -336,7 +340,7 @@ void vpci_dump_msi(void)
>  
>              if ( msi )
>              {
> -                printk("Device %04x:%02x:%02x.%u\n", seg, bus, slot, func);
> +                printk(" MSI\n");
>  
>                  printk("  Enabled: %u Supports masking: %u 64-bit addresses: 
> %u\n",
>                         msi->enabled, msi->masking, msi->address64);
> @@ -349,6 +353,20 @@ void vpci_dump_msi(void)
>                      printk("  mask=%08x\n", msi->mask);
>              }
>  
> +            if ( msix )
> +            {
> +                unsigned int i;
> +
> +                printk(" MSI-X\n");
> +
> +                printk("  Max entries: %u maskall: %u enabled: %u\n",
> +                       msix->max_entries, msix->masked, msix->enabled);
> +
> +                printk("  Table entries:\n");
> +                for ( i = 0; i < msix->max_entries; i++ )
> +                    vpci_msix_arch_print_entry(&msix->entries[i]);
> +            }
> +

Again, please try to reduce the amount of overall output.

> +static void vpci_msix_control_write(const struct pci_dev *pdev,
> +                                    unsigned int reg, uint32_t val, void 
> *data)
> +{
> +    uint8_t seg = pdev->seg, bus = pdev->bus;
> +    uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
> +    struct vpci_msix *msix = data;
> +    bool new_masked, new_enabled;
> +    unsigned int i;
> +    int rc;
> +
> +    new_masked = val & PCI_MSIX_FLAGS_MASKALL;
> +    new_enabled = val & PCI_MSIX_FLAGS_ENABLE;
> +
> +    /*
> +     * According to the PCI 3.0 specification, switching the enable bit
> +     * to 1 or the function mask bit to 0 should cause all the cached
> +     * addresses and data fields to be recalculated. Xen implements this
> +     * as disabling and enabling the entries.
> +     *
> +     * Note that the disable/enable sequence is only performed when the
> +     * guest has written to the entry (ie: updated field set) or MSIX is
> +     * enabled.
> +     */
> +    if ( new_enabled && !new_masked && (!msix->enabled || msix->masked) )
> +    {
> +        paddr_t table_base =
> +            pdev->vpci->header.bars[msix->mem[VPCI_MSIX_TABLE].bir].addr;
> +
> +        for ( i = 0; i < msix->max_entries; i++ )
> +        {
> +            if ( msix->entries[i].masked ||
> +                 (new_enabled && msix->enabled && !msix->entries[i].updated) 
> )
> +                continue;

This doesn't look to match up with the earlier comment.

> +static int vpci_msix_read(struct vcpu *v, unsigned long addr,
> +                          unsigned int len, unsigned long *data)
> +{
> +    struct domain *d = v->domain;

const?

> +    const struct vpci_bar *bars;
> +    struct vpci_msix *msix;
> +    const struct vpci_msix_entry *entry;
> +    unsigned int offset;
> +
> +    *data = ~0ul;
> +
> +    msix = vpci_msix_find(d, addr);
> +    if ( !msix || !vpci_msix_access_allowed(msix->pdev, addr, len) )
> +        return X86EMUL_OKAY;

In the !msix case I'm once again not convinced returning OKAY is correct
here.

> --- a/xen/include/xen/vpci.h
> +++ b/xen/include/xen/vpci.h
> @@ -100,6 +100,40 @@ struct vpci {
>          /* 64-bit address capable? */
>          bool address64;
>      } *msi;
> +
> +    /* MSI-X data. */
> +    struct vpci_msix {
> +        struct pci_dev *pdev;
> +        /* List link. */
> +        struct list_head next;
> +        /* Table information. */
> +        struct vpci_msix_mem {
> +            /* MSI-X table offset. */
> +            unsigned int offset;
> +            /* MSI-X table BIR. */
> +            unsigned int bir;
> +            /* Table size. */
> +            unsigned int size;
> +#define VPCI_MSIX_TABLE     0
> +#define VPCI_MSIX_PBA       1
> +#define VPCI_MSIX_MEM_NUM   2
> +        } mem[VPCI_MSIX_MEM_NUM];
> +        /* Maximum number of vectors supported by the device. */
> +        unsigned int max_entries;
> +        /* MSI-X enabled? */
> +        bool enabled;
> +        /* Masked? */
> +        bool masked;
> +        /* Entries. */
> +        struct vpci_msix_entry {
> +            uint64_t addr;
> +            uint32_t data;
> +            unsigned int nr;
> +            struct vpci_arch_msix_entry arch;
> +            bool masked;
> +            bool updated;
> +        } entries[];
> +    } *msix;

Same remark as for MSI regarding optimizing structure size.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.