[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC] [PATCH] arm64-its: Add ITS support for ACPI dom0



Hello Manish,

On 30/05/17 07:07, Manish Jaggi wrote:
This patch is an RFC on top of Andre's v10 series.
https://www.mail-archive.com/xen-devel@xxxxxxxxxxxxx/msg109093.html

This patch deny's access to ITS region for the guest and also updates

s/deny's/denies/

the acpi tables for dom0.

This patch is doing more that supporting ITS in the hardware domain. It also allows support of ITS in Xen when booting using ACPI.


Signed-off-by: Manish Jaggi <mjaggi@xxxxxxxxxx>
---
 xen/arch/arm/gic-v3.c            | 49
++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/gic_v3_its.h |  1 +
 2 files changed, 50 insertions(+)

diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
index c927306..f496fc1 100644
--- a/xen/arch/arm/gic-v3.c
+++ b/xen/arch/arm/gic-v3.c
@@ -1301,6 +1301,7 @@ static int gicv3_iomem_deny_access(const struct
domain *d)
 {
     int rc, i;
     unsigned long mfn, nr;
+    const struct host_its *its_data;

     mfn = dbase >> PAGE_SHIFT;
     nr = DIV_ROUND_UP(SZ_64K, PAGE_SIZE);
@@ -1333,6 +1334,16 @@ static int gicv3_iomem_deny_access(const struct
domain *d)
         return iomem_deny_access(d, mfn, mfn + nr);
     }

If GICv2 is supported, the function will bail out as soon as the virtual
base region is denied (see just above).


+    /* deny for ITS as well */
+    list_for_each_entry(its_data, &host_its_list, entry)
+    {
+        mfn = its_data->addr >> PAGE_SHIFT;

Please don't open-code the shift and using paddr_to_pfn(...).

+        nr = DIV_ROUND_UP(SZ_128K, PAGE_SIZE);

Please use PFN_UP rather than DIV_ROUND_UP(...).

Also, where does the SZ_128K comes from?

+        rc = iomem_deny_access(d, mfn, mfn + nr);
+        if ( rc )
+            return rc;
+    }

No implementation of ITS specific code in the GICv3 driver please.
Instead introduce a helper for that.

+
     return 0;
 }

@@ -1357,8 +1368,10 @@ static int gicv3_make_hwdom_madt(const struct
domain *d, u32 offset)
     struct acpi_subtable_header *header;
     struct acpi_madt_generic_interrupt *host_gicc, *gicc;
     struct acpi_madt_generic_redistributor *gicr;
+    struct acpi_madt_generic_translator *gic_its;
     u8 *base_ptr = d->arch.efi_acpi_table + offset;
     u32 i, table_len = 0, size;
+    const struct host_its *its_data;

See my comment above regarding ITS specific code.

     /* Add Generic Interrupt */
     header =
acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
@@ -1374,6 +1387,7 @@ static int gicv3_make_hwdom_madt(const struct
domain *d, u32 offset)
     for ( i = 0; i < d->max_vcpus; i++ )
     {
         gicc = (struct acpi_madt_generic_interrupt *)(base_ptr + table_len);
+

Spurious change.

         ACPI_MEMCPY(gicc, host_gicc, size);
         gicc->cpu_interface_number = i;
         gicc->uid = i;
@@ -1399,6 +1413,18 @@ static int gicv3_make_hwdom_madt(const struct
domain *d, u32 offset)
         gicr->length = d->arch.vgic.rdist_regions[i].size;
         table_len += size;
     }
+
+    /* Update GIC ITS information in dom0 madt */

s/dom0/hardware domain/
s/madt/MADT/

Also, likely you want to make sure you have space in efi_acpi_table (see estimate_acpi_efi_size).

+    list_for_each_entry(its_data, &host_its_list, entry)
+    {
+        size = sizeof(struct acpi_madt_generic_translator);
+        gic_its = (struct acpi_madt_generic_translator *)(base_ptr +
table_len);
+        gic_its->header.type = ACPI_MADT_TYPE_GENERIC_TRANSLATOR;
+        gic_its->header.length = size;
+        gic_its->base_address = its_data->addr;
+        gic_its->translation_id = its_data->translation_id;

Please explain why you need to have the same ID as the host.

+        table_len +=  size;
+    }

     return table_len;
 }
@@ -1511,6 +1537,25 @@ gic_acpi_get_madt_redistributor_num(struct
acpi_subtable_header *header,
      */
     return 0;
 }

Newnline here.

+#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
+
+int  gicv3_its_acpi_init(struct acpi_subtable_header *header, const unsigned 
long end)

Why this is not static?

+{

Same remark as above regarding ITS specific code.

+    struct acpi_madt_generic_translator *its_entry;
+    struct host_its *its_data;
+
+    its_data = xzalloc(struct host_its);

What if xzalloc fails?


+    its_entry = (struct acpi_madt_generic_translator *)header;
+    its_data->addr  = its_entry->base_address;
+    its_data->size = ACPI_GICV3_ITS_MEM_SIZE;
+
+    spin_lock_init(&its_data->cmd_lock);
+
+    printk("GICv3: Found ITS @0x%lx\n", its_data->addr);
+
+    list_add_tail(&its_data->entry, &host_its_list);

Likely you could re-use factorize a part of gicv3_its_dt_init to avoid implementing twice the initialization.

Also newline.

+    return 0;
+}

 static void __init gicv3_acpi_init(void)
 {
@@ -1567,6 +1612,9 @@ static void __init gicv3_acpi_init(void)

     gicv3.rdist_stride = 0;

+    acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+                          gicv3_its_acpi_init, 0);

acpi_table_parse_madt may return an error. Why this is not checked?

+
     /*
      * In ACPI, 0 is considered as the invalid address. However the rest
      * of the initialization rely on the invalid address to be
@@ -1585,6 +1633,7 @@ static void __init gicv3_acpi_init(void)
     else
         vsize = GUEST_GICC_SIZE;

+

Spurious line.

 }
 #else
 static void __init gicv3_acpi_init(void) { }
diff --git a/xen/include/asm-arm/gic_v3_its.h
b/xen/include/asm-arm/gic_v3_its.h
index d2a3e53..c92cdb9 100644
--- a/xen/include/asm-arm/gic_v3_its.h
+++ b/xen/include/asm-arm/gic_v3_its.h
@@ -125,6 +125,7 @@ struct host_its {
     spinlock_t cmd_lock;
     void *cmd_buf;
     unsigned int flags;
+    u32 translation_id;

Please document this field. From the name it does not make sense to only use it for ACPI.

 };



Regards,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.