[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH,RFC 5/7] PCI multi-seg: AMD-IOMMU specificadjustments



>>> On 26.08.11 at 13:57, Wei Wang2 <wei.wang2@xxxxxxx> wrote:
> Read PCI segment from IVHD block is fine. The value should be the same as in 
> IVHDR if they both show up in BIOS. IVHDR also has PCI segment number, but 
> IVHDR is only used by IVRS revision 2. Actually, IVRS rev 2 allows device 
> IDs 
> to be chosen freely by OS instead of by firmware. Rev 2 might co-exist with 
> rev 1 in BIOS but if SW like Xen or Linux uses device IDs supplied by BIOS, 
> it should just ignore rev 2 tables.

I don't really follow: The two cases where I can't spot where to get the
segment number from are register_exclusion_range_for_all_devices()
and register_exclusion_range_for_device(), both called in the context
of struct acpi_ivmd_block_header(), which only gets a struct
acpi_ivmd_block_header (not having a segment number afaict).

Jan

> On Thursday 25 August 2011 16:58:18 Jan Beulich wrote:
>> There are two places here where it is entirely unclear to me where the
>> necessary PCI segment number should be taken from (as IVMD descriptors
>> don't have such, only IVHD ones do).
>>
>> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
>>
>> --- 2011-08-25.orig/xen/drivers/passthrough/amd/iommu_acpi.c    2011-06-28
>> 09:41:39.000000000 +0200 +++
>> 2011-08-25/xen/drivers/passthrough/amd/iommu_acpi.c 2011-08-25
>> 15:06:47.000000000 +0200 @@ -30,6 +30,7 @@ static unsigned short __initdata
>> last_bd
>>  static void __init add_ivrs_mapping_entry(
>>      u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu)
>>  {
>> +    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg);
>>      u8 sys_mgt, lint1_pass, lint0_pass, nmi_pass, ext_int_pass, init_pass;
>>      ASSERT( ivrs_mappings != NULL );
>>
>> @@ -118,9 +119,10 @@ static void __init reserve_iommu_exclusi
>>  }
>>
>>  static void __init reserve_unity_map_for_device(
>> -    u16 bdf, unsigned long base,
>> +    u16 seg, u16 bdf, unsigned long base,
>>      unsigned long length, u8 iw, u8 ir)
>>  {
>> +    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
>>      unsigned long old_top, new_top;
>>
>>      /* need to extend unity-mapped range? */
>> @@ -147,6 +149,7 @@ static void __init reserve_unity_map_for
>>  static int __init register_exclusion_range_for_all_devices(
>>      unsigned long base, unsigned long limit, u8 iw, u8 ir)
>>  {
>> +    int seg = 0; /* XXX */
>>      unsigned long range_top, iommu_top, length;
>>      struct amd_iommu *iommu;
>>      u16 bdf;
>> @@ -163,7 +166,7 @@ static int __init register_exclusion_ran
>>          /* reserve r/w unity-mapped page entries for devices */
>>          /* note: these entries are part of the exclusion range */
>>          for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
>> -            reserve_unity_map_for_device(bdf, base, length, iw, ir);
>> +            reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
>>          /* push 'base' just outside of virtual address space */
>>          base = iommu_top;
>>      }
>> @@ -180,11 +183,13 @@ static int __init register_exclusion_ran
>>  static int __init register_exclusion_range_for_device(
>>      u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir)
>>  {
>> +    int seg = 0; /* XXX */
>> +    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
>>      unsigned long range_top, iommu_top, length;
>>      struct amd_iommu *iommu;
>>      u16 req;
>>
>> -    iommu = find_iommu_for_device(bdf);
>> +    iommu = find_iommu_for_device(seg, bdf);
>>      if ( !iommu )
>>      {
>>          AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
>> @@ -202,8 +207,8 @@ static int __init register_exclusion_ran
>>          length = range_top - base;
>>          /* reserve unity-mapped page entries for device */
>>          /* note: these entries are part of the exclusion range */
>> -        reserve_unity_map_for_device(bdf, base, length, iw, ir);
>> -        reserve_unity_map_for_device(req, base, length, iw, ir);
>> +        reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
>> +        reserve_unity_map_for_device(seg, req, base, length, iw, ir);
>>
>>          /* push 'base' just outside of virtual address space */
>>          base = iommu_top;
>> @@ -240,11 +245,13 @@ static int __init register_exclusion_ran
>>          /* note: these entries are part of the exclusion range */
>>          for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
>>          {
>> -            if ( iommu == find_iommu_for_device(bdf) )
>> +            if ( iommu == find_iommu_for_device(iommu->seg, bdf) )
>>              {
>> -                reserve_unity_map_for_device(bdf, base, length, iw, ir);
>> -                req = ivrs_mappings[bdf].dte_requestor_id;
>> -                reserve_unity_map_for_device(req, base, length, iw, ir);
>> +                reserve_unity_map_for_device(iommu->seg, bdf, base,
>> length, +                                             iw, ir);
>> +                req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id;
>> +                reserve_unity_map_for_device(iommu->seg, req, base,
>> length, +                                             iw, ir);
>>              }
>>          }
>>
>> @@ -627,7 +634,7 @@ static u16 __init parse_ivhd_device_exte
>>  }
>>
>>  static u16 __init parse_ivhd_device_special(
>> -    union acpi_ivhd_device *ivhd_device,
>> +    union acpi_ivhd_device *ivhd_device, u16 seg,
>>      u16 header_length, u16 block_length, struct amd_iommu *iommu)
>>  {
>>      u16 dev_length, bdf;
>> @@ -648,6 +655,7 @@ static u16 __init parse_ivhd_device_spec
>>
>>      add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu);
>>      /* set device id of ioapic */
>> +    ioapic_seg[ivhd_device->special.handle] = seg;
>>      ioapic_bdf[ivhd_device->special.handle] = bdf;
>>      return dev_length;
>>  }
>> @@ -729,7 +737,7 @@ static int __init parse_ivhd_block(struc
>>              break;
>>          case AMD_IOMMU_ACPI_IVHD_DEV_SPECIAL:
>>              dev_length = parse_ivhd_device_special(
>> -                ivhd_device,
>> +                ivhd_device, ivhd_block->pci_segment,
>>                  ivhd_block->header.length, block_length, iommu);
>>              break;
>>          default:
>> --- 2011-08-25.orig/xen/drivers/passthrough/amd/iommu_detect.c  2011-04-04
>> 09:11:24.000000000 +0200 +++
>> 2011-08-25/xen/drivers/passthrough/amd/iommu_detect.c       2011-08-25
>> 15:06:47.000000000 +0200 @@ -27,8 +27,8 @@
>>  #include <asm/hvm/svm/amd-iommu-proto.h>
>>  #include <asm/hvm/svm/amd-iommu-acpi.h>
>>
>> -static int __init get_iommu_msi_capabilities(u8 bus, u8 dev, u8 func,
>> -            struct amd_iommu *iommu)
>> +static int __init get_iommu_msi_capabilities(
>> +    u16 seg, u8 bus, u8 dev, u8 func, struct amd_iommu *iommu)
>>  {
>>      int cap_ptr, cap_id;
>>      u32 cap_header;
>> @@ -66,8 +66,8 @@ static int __init get_iommu_msi_capabili
>>      return 0;
>>  }
>>
>> -static int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8
>> cap_ptr, -                                  struct amd_iommu *iommu)
>> +static int __init get_iommu_capabilities(
>> +    u16 seg, u8 bus, u8 dev, u8 func, u8 cap_ptr, struct amd_iommu *iommu)
>>  {
>>      u32 cap_header, cap_range, misc_info;
>>
>> @@ -121,6 +121,11 @@ int __init amd_iommu_detect_one_acpi(voi
>>
>>      spin_lock_init(&iommu->lock);
>>
>> +    iommu->seg = ivhd_block->pci_segment;
>> +    if (alloc_ivrs_mappings(ivhd_block->pci_segment)) {
>> +        xfree(iommu);
>> +        return -ENOMEM;
>> +    }
>>      iommu->bdf = ivhd_block->header.dev_id;
>>      iommu->cap_offset = ivhd_block->cap_offset;
>>      iommu->mmio_base_phys = ivhd_block->mmio_base;
>> @@ -147,8 +152,9 @@ int __init amd_iommu_detect_one_acpi(voi
>>      bus = iommu->bdf >> 8;
>>      dev = PCI_SLOT(iommu->bdf & 0xFF);
>>      func = PCI_FUNC(iommu->bdf & 0xFF);
>> -    get_iommu_capabilities(bus, dev, func, iommu->cap_offset, iommu);
>> -    get_iommu_msi_capabilities(bus, dev, func, iommu);
>> +    get_iommu_capabilities(iommu->seg, bus, dev, func,
>> +                           iommu->cap_offset, iommu);
>> +    get_iommu_msi_capabilities(iommu->seg, bus, dev, func, iommu);
>>
>>      list_add_tail(&iommu->list, &amd_iommu_head);
>>
>> --- 2011-08-25.orig/xen/drivers/passthrough/amd/iommu_init.c    2011-08-19
>> 17:08:35.000000000 +0200 +++
>> 2011-08-25/xen/drivers/passthrough/amd/iommu_init.c 2011-08-25
>> 15:06:47.000000000 +0200 @@ -33,7 +33,7 @@ static struct amd_iommu
>> **__read_mostly
>>  static int __initdata nr_amd_iommus;
>>
>>  unsigned short ivrs_bdf_entries;
>> -struct ivrs_mappings *ivrs_mappings;
>> +static struct radix_tree_root ivrs_maps;
>>  struct list_head amd_iommu_head;
>>  struct table_struct device_table;
>>
>> @@ -697,7 +697,6 @@ error_out:
>>  static void __init amd_iommu_init_cleanup(void)
>>  {
>>      struct amd_iommu *iommu, *next;
>> -    int bdf;
>>
>>      /* free amd iommu list */
>>      list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
>> @@ -713,21 +712,13 @@ static void __init amd_iommu_init_cleanu
>>      }
>>
>>      /* free interrupt remapping table */
>> -    for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
>> -    {
>> -        if ( ivrs_mappings[bdf].intremap_table )
>> -            amd_iommu_free_intremap_table(bdf);
>> -    }
>> +    iterate_ivrs_entries(amd_iommu_free_intremap_table);
>>
>>      /* free device table */
>>      deallocate_iommu_table_struct(&device_table);
>>
>>      /* free ivrs_mappings[] */
>> -    if ( ivrs_mappings )
>> -    {
>> -        xfree(ivrs_mappings);
>> -        ivrs_mappings = NULL;
>> -    }
>> +    radix_tree_destroy(&ivrs_maps, xfree);
>>
>>      /* free irq_to_iommu[] */
>>      if ( irq_to_iommu )
>> @@ -741,19 +732,71 @@ static void __init amd_iommu_init_cleanu
>>      iommu_intremap = 0;
>>  }
>>
>> -static int __init init_ivrs_mapping(void)
>> +/*
>> + * We allocate an extra array element to store the segment number
>> + * (and in the future perhaps other global information).
>> + */
>> +#define IVRS_MAPPINGS_SEG(m) m[ivrs_bdf_entries].dte_requestor_id
>> +
>> +struct ivrs_mappings *get_ivrs_mappings(u16 seg)
>> +{
>> +    return radix_tree_lookup(&ivrs_maps, seg);
>> +}
>> +
>> +int iterate_ivrs_mappings(int (*handler)(u16 seg, struct ivrs_mappings *))
>>  {
>> +    u16 seg = 0;
>> +    int rc = 0;
>> +
>> +    do {
>> +        struct ivrs_mappings *map;
>> +
>> +        if ( !radix_tree_gang_lookup(&ivrs_maps, (void **)&map, seg, 1) )
>> +            break;
>> +        seg = IVRS_MAPPINGS_SEG(map);
>> +        rc = handler(seg, map);
>> +    } while ( !rc && ++seg );
>> +
>> +    return rc;
>> +}
>> +
>> +int iterate_ivrs_entries(int (*handler)(u16 seg, struct ivrs_mappings *))
>> +{
>> +    u16 seg = 0;
>> +    int rc = 0;
>> +
>> +    do {
>> +        struct ivrs_mappings *map;
>> +        int bdf;
>> +
>> +        if ( !radix_tree_gang_lookup(&ivrs_maps, (void **)&map, seg, 1) )
>> +            break;
>> +        seg = IVRS_MAPPINGS_SEG(map);
>> +        for ( bdf = 0; !rc && bdf < ivrs_bdf_entries; ++bdf )
>> +            rc = handler(seg, map + bdf);
>> +    } while ( !rc && ++seg );
>> +
>> +    return rc;
>> +}
>> +
>> +int __init alloc_ivrs_mappings(u16 seg)
>> +{
>> +    struct ivrs_mappings *ivrs_mappings;
>>      int bdf;
>>
>>      BUG_ON( !ivrs_bdf_entries );
>>
>> -    ivrs_mappings = xmalloc_array( struct ivrs_mappings,
>> ivrs_bdf_entries); +    if ( get_ivrs_mappings(seg) )
>> +        return 0;
>> +
>> +    ivrs_mappings = xmalloc_array(struct ivrs_mappings, ivrs_bdf_entries +
>> 1); if ( ivrs_mappings == NULL )
>>      {
>>          AMD_IOMMU_DEBUG("Error allocating IVRS Mappings table\n");
>>          return -ENOMEM;
>>      }
>>      memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct
>> ivrs_mappings)); +    IVRS_MAPPINGS_SEG(ivrs_mappings) = seg;
>>
>>      /* assign default values for device entries */
>>      for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
>> @@ -775,10 +818,14 @@ static int __init init_ivrs_mapping(void
>>          if ( amd_iommu_perdev_intremap )
>>              spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
>>      }
>> +
>> +    radix_tree_insert(&ivrs_maps, seg, ivrs_mappings);
>> +
>>      return 0;
>>  }
>>
>> -static int __init amd_iommu_setup_device_table(void)
>> +static int __init amd_iommu_setup_device_table(
>> +    u16 seg, struct ivrs_mappings *ivrs_mappings)
>>  {
>>      int bdf;
>>      void *intr_tb, *dte;
>> @@ -849,7 +896,8 @@ int __init amd_iommu_init(void)
>>      if ( !ivrs_bdf_entries )
>>          goto error_out;
>>
>> -    if ( init_ivrs_mapping() != 0 )
>> +    radix_tree_init(&ivrs_maps);
>> +    if ( alloc_ivrs_mappings(0) != 0 )
>>          goto error_out;
>>
>>      if ( amd_iommu_update_ivrs_mapping_acpi() != 0 )
>> @@ -860,7 +908,7 @@ int __init amd_iommu_init(void)
>>          goto error_out;
>>
>>      /* allocate and initialize a global device table shared by all iommus
>> */ -    if ( amd_iommu_setup_device_table() != 0 )
>> +    if ( iterate_ivrs_mappings(amd_iommu_setup_device_table) != 0 )
>>          goto error_out;
>>
>>      /* per iommu initialization  */
>> @@ -905,7 +953,8 @@ static void invalidate_all_domain_pages(
>>          amd_iommu_flush_all_pages(d);
>>  }
>>
>> -static void invalidate_all_devices(void)
>> +static int _invalidate_all_devices(
>> +    u16 seg, struct ivrs_mappings *ivrs_mappings)
>>  {
>>      int bdf, req_id;
>>      unsigned long flags;
>> @@ -913,7 +962,7 @@ static void invalidate_all_devices(void)
>>
>>      for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
>>      {
>> -        iommu = find_iommu_for_device(bdf);
>> +        iommu = find_iommu_for_device(seg, bdf);
>>          req_id = ivrs_mappings[bdf].dte_requestor_id;
>>          if ( iommu )
>>          {
>> @@ -924,6 +973,13 @@ static void invalidate_all_devices(void)
>>              spin_unlock_irqrestore(&iommu->lock, flags);
>>          }
>>      }
>> +
>> +    return 0;
>> +}
>> +
>> +static void invalidate_all_devices(void)
>> +{
>> +    iterate_ivrs_mappings(_invalidate_all_devices);
>>  }
>>
>>  void amd_iommu_suspend(void)
>> --- 2011-08-25.orig/xen/drivers/passthrough/amd/iommu_intr.c    2011-08-19
>> 17:08:35.000000000 +0200 +++
>> 2011-08-25/xen/drivers/passthrough/amd/iommu_intr.c 2011-08-25
>> 15:06:47.000000000 +0200 @@ -28,20 +28,21 @@
>>  #define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH)
>>
>>  int ioapic_bdf[MAX_IO_APICS];
>> +u16 ioapic_seg[MAX_IO_APICS];
>>  void *shared_intremap_table;
>>  static DEFINE_SPINLOCK(shared_intremap_lock);
>>
>> -static spinlock_t* get_intremap_lock(int req_id)
>> +static spinlock_t* get_intremap_lock(int seg, int req_id)
>>  {
>>      return (amd_iommu_perdev_intremap ?
>> -           &ivrs_mappings[req_id].intremap_lock:
>> +           &get_ivrs_mappings(seg)[req_id].intremap_lock:
>>             &shared_intremap_lock);
>>  }
>>
>> -static int get_intremap_requestor_id(int bdf)
>> +static int get_intremap_requestor_id(int seg, int bdf)
>>  {
>>      ASSERT( bdf < ivrs_bdf_entries );
>> -    return ivrs_mappings[bdf].dte_requestor_id;
>> +    return get_ivrs_mappings(seg)[bdf].dte_requestor_id;
>>  }
>>
>>  static int get_intremap_offset(u8 vector, u8 dm)
>> @@ -53,20 +54,20 @@ static int get_intremap_offset(u8 vector
>>      return offset;
>>  }
>>
>> -static u8 *get_intremap_entry(int bdf, int offset)
>> +static u8 *get_intremap_entry(int seg, int bdf, int offset)
>>  {
>>      u8 *table;
>>
>> -    table = (u8*)ivrs_mappings[bdf].intremap_table;
>> +    table = (u8*)get_ivrs_mappings(seg)[bdf].intremap_table;
>>      ASSERT( (table != NULL) && (offset < INTREMAP_ENTRIES) );
>>
>>      return (u8*) (table + offset);
>>  }
>>
>> -static void free_intremap_entry(int bdf, int offset)
>> +static void free_intremap_entry(int seg, int bdf, int offset)
>>  {
>>      u32* entry;
>> -    entry = (u32*)get_intremap_entry(bdf, offset);
>> +    entry = (u32*)get_intremap_entry(seg, bdf, offset);
>>      memset(entry, 0, sizeof(u32));
>>  }
>>
>> @@ -125,8 +126,8 @@ static void update_intremap_entry_from_i
>>      spinlock_t *lock;
>>      int offset;
>>
>> -    req_id = get_intremap_requestor_id(bdf);
>> -    lock = get_intremap_lock(req_id);
>> +    req_id = get_intremap_requestor_id(iommu->seg, bdf);
>> +    lock = get_intremap_lock(iommu->seg, req_id);
>>
>>      delivery_mode = rte->delivery_mode;
>>      vector = rte->vector;
>> @@ -136,7 +137,7 @@ static void update_intremap_entry_from_i
>>      spin_lock_irqsave(lock, flags);
>>
>>      offset = get_intremap_offset(vector, delivery_mode);
>> -    entry = (u32*)get_intremap_entry(req_id, offset);
>> +    entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset);
>>      update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
>>
>>      spin_unlock_irqrestore(lock, flags);
>> @@ -175,7 +176,7 @@ int __init amd_iommu_setup_ioapic_remapp
>>
>>              /* get device id of ioapic devices */
>>              bdf = ioapic_bdf[IO_APIC_ID(apic)];
>> -            iommu = find_iommu_for_device(bdf);
>> +            iommu = find_iommu_for_device(ioapic_seg[IO_APIC_ID(apic)],
>> bdf); if ( !iommu )
>>              {
>>                  AMD_IOMMU_DEBUG("Fail to find iommu for ioapic "
>> @@ -183,8 +184,8 @@ int __init amd_iommu_setup_ioapic_remapp
>>                  continue;
>>              }
>>
>> -            req_id = get_intremap_requestor_id(bdf);
>> -            lock = get_intremap_lock(req_id);
>> +            req_id = get_intremap_requestor_id(iommu->seg, bdf);
>> +            lock = get_intremap_lock(iommu->seg, req_id);
>>
>>              delivery_mode = rte.delivery_mode;
>>              vector = rte.vector;
>> @@ -193,7 +194,7 @@ int __init amd_iommu_setup_ioapic_remapp
>>
>>              spin_lock_irqsave(lock, flags);
>>              offset = get_intremap_offset(vector, delivery_mode);
>> -            entry = (u32*)get_intremap_entry(req_id, offset);
>> +            entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset);
>>              update_intremap_entry(entry, vector,
>>                                    delivery_mode, dest_mode, dest);
>>              spin_unlock_irqrestore(lock, flags);
>> @@ -227,7 +228,7 @@ void amd_iommu_ioapic_update_ire(
>>
>>      /* get device id of ioapic devices */
>>      bdf = ioapic_bdf[IO_APIC_ID(apic)];
>> -    iommu = find_iommu_for_device(bdf);
>> +    iommu = find_iommu_for_device(ioapic_seg[IO_APIC_ID(apic)], bdf);
>>      if ( !iommu )
>>      {
>>          AMD_IOMMU_DEBUG("Fail to find iommu for ioapic device id =
>> 0x%x\n", @@ -289,28 +290,28 @@ static void update_intremap_entry_from_m
>>      int offset;
>>
>>      bdf = (pdev->bus << 8) | pdev->devfn;
>> -    req_id = get_dma_requestor_id(bdf);
>> -    alias_id = get_intremap_requestor_id(bdf);
>> +    req_id = get_dma_requestor_id(pdev->seg, bdf);
>> +    alias_id = get_intremap_requestor_id(pdev->seg, bdf);
>>
>>      if ( msg == NULL )
>>      {
>> -        lock = get_intremap_lock(req_id);
>> +        lock = get_intremap_lock(iommu->seg, req_id);
>>          spin_lock_irqsave(lock, flags);
>> -        free_intremap_entry(req_id, msi_desc->remap_index);
>> +        free_intremap_entry(iommu->seg, req_id, msi_desc->remap_index);
>>          spin_unlock_irqrestore(lock, flags);
>>
>>          if ( ( req_id != alias_id ) &&
>> -            ivrs_mappings[alias_id].intremap_table != NULL )
>> +             get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL
>> ) {
>> -            lock = get_intremap_lock(alias_id);
>> +            lock = get_intremap_lock(iommu->seg, alias_id);
>>              spin_lock_irqsave(lock, flags);
>> -            free_intremap_entry(alias_id, msi_desc->remap_index);
>> +            free_intremap_entry(iommu->seg, alias_id,
>> msi_desc->remap_index); spin_unlock_irqrestore(lock, flags);
>>          }
>>          goto done;
>>      }
>>
>> -    lock = get_intremap_lock(req_id);
>> +    lock = get_intremap_lock(iommu->seg, req_id);
>>
>>      spin_lock_irqsave(lock, flags);
>>      dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
>> @@ -320,7 +321,7 @@ static void update_intremap_entry_from_m
>>      offset = get_intremap_offset(vector, delivery_mode);
>>      msi_desc->remap_index = offset;
>>
>> -    entry = (u32*)get_intremap_entry(req_id, offset);
>> +    entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset);
>>      update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
>>      spin_unlock_irqrestore(lock, flags);
>>
>> @@ -331,12 +332,12 @@ static void update_intremap_entry_from_m
>>       * devices.
>>       */
>>
>> -    lock = get_intremap_lock(alias_id);
>> +    lock = get_intremap_lock(iommu->seg, alias_id);
>>      if ( ( req_id != alias_id ) &&
>> -        ivrs_mappings[alias_id].intremap_table != NULL )
>> +         get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL )
>>      {
>>          spin_lock_irqsave(lock, flags);
>> -        entry = (u32*)get_intremap_entry(alias_id, offset);
>> +        entry = (u32*)get_intremap_entry(iommu->seg, alias_id, offset);
>>          update_intremap_entry(entry, vector, delivery_mode, dest_mode,
>> dest); spin_unlock_irqrestore(lock, flags);
>>      }
>> @@ -362,7 +363,7 @@ void amd_iommu_msi_msg_update_ire(
>>      if ( !iommu_intremap )
>>          return;
>>
>> -    iommu = find_iommu_for_device((pdev->bus << 8) | pdev->devfn);
>> +    iommu = find_iommu_for_device(pdev->seg, (pdev->bus << 8) |
>> pdev->devfn);
>>
>>      if ( !iommu )
>>      {
>> @@ -379,15 +380,18 @@ void amd_iommu_read_msi_from_ire(
>>  {
>>  }
>>
>> -void __init amd_iommu_free_intremap_table(int bdf)
>> +int __init amd_iommu_free_intremap_table(
>> +    u16 seg, struct ivrs_mappings *ivrs_mapping)
>>  {
>> -    void *tb = ivrs_mappings[bdf].intremap_table;
>> +    void *tb = ivrs_mapping->intremap_table;
>>
>>      if ( tb )
>>      {
>>          __free_amd_iommu_tables(tb, INTREMAP_TABLE_ORDER);
>> -        ivrs_mappings[bdf].intremap_table = NULL;
>> +        ivrs_mapping->intremap_table = NULL;
>>      }
>> +
>> +    return 0;
>>  }
>>
>>  void* __init amd_iommu_alloc_intremap_table(void)
>> --- 2011-08-25.orig/xen/drivers/passthrough/amd/iommu_map.c     2011-08-25
>> 08:21:53.000000000 +0200 +++
>> 2011-08-25/xen/drivers/passthrough/amd/iommu_map.c  2011-08-25
>> 15:06:47.000000000 +0200 @@ -719,8 +719,8 @@ static int
>> update_paging_mode(struct dom
>>          for_each_pdev( d, pdev )
>>          {
>>              bdf = (pdev->bus << 8) | pdev->devfn;
>> -            req_id = get_dma_requestor_id(bdf);
>> -            iommu = find_iommu_for_device(bdf);
>> +            req_id = get_dma_requestor_id(pdev->seg, bdf);
>> +            iommu = find_iommu_for_device(pdev->seg, bdf);
>>              if ( !iommu )
>>              {
>>                  AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
>> --- 2011-08-25.orig/xen/drivers/passthrough/amd/pci_amd_iommu.c 2011-08-25
>> 15:06:40.000000000 +0200 +++
>> 2011-08-25/xen/drivers/passthrough/amd/pci_amd_iommu.c      2011-08-25
>> 15:06:47.000000000 +0200 @@ -29,10 +29,12 @@
>>  extern bool_t __read_mostly opt_irq_perdev_vector_map;
>>  extern bool_t __read_mostly iommu_amd_perdev_vector_map;
>>
>> -struct amd_iommu *find_iommu_for_device(int bdf)
>> +struct amd_iommu *find_iommu_for_device(int seg, int bdf)
>>  {
>> +    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
>> +
>>      BUG_ON ( bdf >= ivrs_bdf_entries );
>> -    return ivrs_mappings[bdf].iommu;
>> +    return ivrs_mappings ? ivrs_mappings[bdf].iommu : NULL;
>>  }
>>
>>  /*
>> @@ -43,8 +45,9 @@ struct amd_iommu *find_iommu_for_device(
>>   * Return original device id, if device has valid interrupt remapping
>>   * table setup for both select entry and alias entry.
>>   */
>> -int get_dma_requestor_id(u16 bdf)
>> +int get_dma_requestor_id(u16 seg, u16 bdf)
>>  {
>> +    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
>>      int req_id;
>>
>>      BUG_ON ( bdf >= ivrs_bdf_entries );
>> @@ -95,7 +98,7 @@ static void amd_iommu_setup_domain_devic
>>          valid = 0;
>>
>>      /* get device-table entry */
>> -    req_id = get_dma_requestor_id(bdf);
>> +    req_id = get_dma_requestor_id(iommu->seg, bdf);
>>      dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
>>
>>      spin_lock_irqsave(&iommu->lock, flags);
>> @@ -139,7 +142,7 @@ static void __init amd_iommu_setup_dom0_
>>              list_add(&pdev->domain_list, &d->arch.pdev_list);
>>
>>              bdf = (bus << 8) | devfn;
>> -            iommu = find_iommu_for_device(bdf);
>> +            iommu = find_iommu_for_device(pdev->seg, bdf);
>>
>>              if ( likely(iommu != NULL) )
>>                  amd_iommu_setup_domain_device(d, iommu, bdf);
>> @@ -270,7 +273,7 @@ static void amd_iommu_disable_domain_dev
>>      int req_id;
>>
>>      BUG_ON ( iommu->dev_table.buffer == NULL );
>> -    req_id = get_dma_requestor_id(bdf);
>> +    req_id = get_dma_requestor_id(iommu->seg, bdf);
>>      dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
>>
>>      spin_lock_irqsave(&iommu->lock, flags);
>> @@ -301,12 +304,12 @@ static int reassign_device( struct domai
>>          return -ENODEV;
>>
>>      bdf = (bus << 8) | devfn;
>> -    iommu = find_iommu_for_device(bdf);
>> +    iommu = find_iommu_for_device(seg, bdf);
>>      if ( !iommu )
>>      {
>>          AMD_IOMMU_DEBUG("Fail to find iommu."
>> -                        " %02x:%x02.%x cannot be assigned to domain %d\n",
>> -                        bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
>> +                        " %04x:%02x:%x02.%x cannot be assigned to
>> dom%d\n", +                        seg, bus, PCI_SLOT(devfn),
>> PCI_FUNC(devfn), target->domain_id);
>>          return -ENODEV;
>>      }
>> @@ -322,8 +325,8 @@ static int reassign_device( struct domai
>>          allocate_domain_resources(t);
>>
>>      amd_iommu_setup_domain_device(target, iommu, bdf);
>> -    AMD_IOMMU_DEBUG("Re-assign %02x:%02x.%x from domain %d to domain
>> %d\n", -                    bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
>> +    AMD_IOMMU_DEBUG("Re-assign %04x:%02x:%02x.%u from dom%d to dom%d\n",
>> +                    seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
>>                      source->domain_id, target->domain_id);
>>
>>      return 0;
>> @@ -331,8 +334,9 @@ static int reassign_device( struct domai
>>
>>  static int amd_iommu_assign_device(struct domain *d, u16 seg, u8 bus, u8
>> devfn) {
>> +    struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
>>      int bdf = (bus << 8) | devfn;
>> -    int req_id = get_dma_requestor_id(bdf);
>> +    int req_id = get_dma_requestor_id(seg, bdf);
>>
>>      if ( ivrs_mappings[req_id].unity_map_enable )
>>      {
>> @@ -422,12 +426,12 @@ static int amd_iommu_add_device(struct p
>>          return -EINVAL;
>>
>>      bdf = (pdev->bus << 8) | pdev->devfn;
>> -    iommu = find_iommu_for_device(bdf);
>> +    iommu = find_iommu_for_device(pdev->seg, bdf);
>>      if ( !iommu )
>>      {
>>          AMD_IOMMU_DEBUG("Fail to find iommu."
>> -                        " %02x:%02x.%x cannot be assigned to domain %d\n",
>> -                        pdev->bus, PCI_SLOT(pdev->devfn),
>> +                        " %04x:%02x:%02x.%u cannot be assigned to
>> dom%d\n", +                        pdev->seg, pdev->bus,
>> PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
>> return -ENODEV;
>>      }
>> @@ -444,12 +448,12 @@ static int amd_iommu_remove_device(struc
>>          return -EINVAL;
>>
>>      bdf = (pdev->bus << 8) | pdev->devfn;
>> -    iommu = find_iommu_for_device(bdf);
>> +    iommu = find_iommu_for_device(pdev->seg, bdf);
>>      if ( !iommu )
>>      {
>>          AMD_IOMMU_DEBUG("Fail to find iommu."
>> -                        " %02x:%02x.%x cannot be removed from domain
>> %d\n", -                        pdev->bus, PCI_SLOT(pdev->devfn),
>> +                        " %04x:%02x:%02x.%u cannot be removed from
>> dom%d\n", +                        pdev->seg, pdev->bus,
>> PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
>> return -ENODEV;
>>      }
>> @@ -463,7 +467,7 @@ static int amd_iommu_group_id(u16 seg, u
>>      int rt;
>>      int bdf = (bus << 8) | devfn;
>>      rt = ( bdf < ivrs_bdf_entries ) ?
>> -        get_dma_requestor_id(bdf) :
>> +        get_dma_requestor_id(seg, bdf) :
>>          bdf;
>>      return rt;
>>  }
>> --- 2011-08-25.orig/xen/include/asm-x86/amd-iommu.h     2011-06-16
>> 09:21:02.000000000 +0200 +++ 2011-08-25/xen/include/asm-x86/amd-iommu.h 
>> 2011-08-25 15:06:47.000000000 +0200 @@ -40,6 +40,7 @@ struct amd_iommu {
>>      struct list_head list;
>>      spinlock_t lock; /* protect iommu */
>>
>> +    u16 seg;
>>      u16 bdf;
>>      u8  cap_offset;
>>      u8  revision;
>> @@ -101,6 +102,10 @@ struct ivrs_mappings {
>>  };
>>
>>  extern unsigned short ivrs_bdf_entries;
>> -extern struct ivrs_mappings *ivrs_mappings;
>> +
>> +int alloc_ivrs_mappings(u16 seg);
>> +struct ivrs_mappings *get_ivrs_mappings(u16 seg);
>> +int iterate_ivrs_mappings(int (*)(u16 seg, struct ivrs_mappings *));
>> +int iterate_ivrs_entries(int (*)(u16 seg, struct ivrs_mappings *));
>>
>>  #endif /* _ASM_X86_64_AMD_IOMMU_H */
>> --- 2011-08-25.orig/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h      
>> 2011-08-19 17:08:35.000000000 +0200 +++
>> 2011-08-25/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h    2011-08-25
>> 15:06:47.000000000 +0200 @@ -65,7 +65,7 @@ int
>> amd_iommu_reserve_domain_unity_map(s
>>  void amd_iommu_share_p2m(struct domain *d);
>>
>>  /* device table functions */
>> -int get_dma_requestor_id(u16 bdf);
>> +int get_dma_requestor_id(u16 seg, u16 bdf);
>>  void amd_iommu_add_dev_table_entry(
>>      u32 *dte, u8 sys_mgt, u8 dev_ex, u8 lint1_pass, u8 lint0_pass,
>>      u8 nmi_pass, u8 ext_int_pass, u8 init_pass);
>> @@ -80,12 +80,12 @@ int send_iommu_command(struct amd_iommu
>>  void flush_command_buffer(struct amd_iommu *iommu);
>>
>>  /* find iommu for bdf */
>> -struct amd_iommu *find_iommu_for_device(int bdf);
>> +struct amd_iommu *find_iommu_for_device(int seg, int bdf);
>>
>>  /* interrupt remapping */
>>  int amd_iommu_setup_ioapic_remapping(void);
>>  void *amd_iommu_alloc_intremap_table(void);
>> -void amd_iommu_free_intremap_table(int bdf);
>> +int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
>>  void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
>>  void amd_iommu_ioapic_update_ire(
>>      unsigned int apic, unsigned int reg, unsigned int value);
>> @@ -95,6 +95,7 @@ void amd_iommu_read_msi_from_ire(
>>      struct msi_desc *msi_desc, struct msi_msg *msg);
>>
>>  extern int ioapic_bdf[MAX_IO_APICS];
>> +extern u16 ioapic_seg[MAX_IO_APICS];
>>  extern void *shared_intremap_table;
>>
>>  /* power management support */



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.