|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH 11/19] arm/its: Add ITS VM and VPE allocation/teardown
Do necessary allocations for GICv4 VLPI injection.
When creating a domain allocate its_vm and property tables.
For each VCPU allocate a VPe with a unique vpe id and separate pending table.
Signed-off-by: Mykyta Poturai <mykyta_poturai@xxxxxxxx>
---
xen/arch/arm/gic-v3-its.c | 157 ++++++++++++----
xen/arch/arm/gic-v3-lpi.c | 61 +++++-
xen/arch/arm/gic-v3.c | 18 ++
xen/arch/arm/gic-v4-its.c | 259 ++++++++++++++++++++++++++
xen/arch/arm/include/asm/gic_v3_its.h | 17 ++
xen/arch/arm/include/asm/gic_v4_its.h | 1 +
xen/arch/arm/include/asm/vgic.h | 3 +
xen/arch/arm/vgic.c | 25 ++-
8 files changed, 496 insertions(+), 45 deletions(-)
diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
index 2328595a85..fb1d2709be 100644
--- a/xen/arch/arm/gic-v3-its.c
+++ b/xen/arch/arm/gic-v3-its.c
@@ -31,6 +31,8 @@
LIST_HEAD(host_its_list);
+unsigned int nvpeid = 16;
+
/*
* It is unlikely that a platform implements ITSes with different quirks,
* so assume they all share the same.
@@ -228,7 +230,7 @@ int gicv3_its_wait_commands(struct host_its *hw_its)
return -ETIMEDOUT;
}
-static uint64_t encode_rdbase(struct host_its *hw_its, unsigned int cpu,
+uint64_t encode_rdbase(struct host_its *hw_its, unsigned int cpu,
uint64_t reg)
{
reg &= ~GENMASK(51, 16);
@@ -443,6 +445,54 @@ struct its_baser *its_get_baser(struct host_its *hw_its,
uint32_t type)
return NULL;
}
+bool its_alloc_table_entry(struct its_baser *baser, uint32_t id)
+{
+ uint64_t reg = baser->val;
+ bool indirect = reg & GITS_BASER_INDIRECT;
+ unsigned int idx;
+ __le64 *table;
+ unsigned int entry_size = GITS_BASER_ENTRY_SIZE(reg);
+
+ /* Don't allow id that exceeds single, flat table limit */
+ if ( !indirect )
+ return (id < (baser->table_size / entry_size));
+
+ /* Compute 1st level table index & check if that exceeds table limit */
+ idx = id / (baser->pagesz / entry_size);
+ if ( idx >= (baser->pagesz / GITS_LVL1_ENTRY_SIZE) )
+ return false;
+
+ table = baser->base;
+
+ /* Allocate memory for 2nd level table */
+ if (!table[idx])
+ {
+ unsigned int page_size = baser->pagesz;
+ void *buffer;
+
+ buffer = alloc_xenheap_pages(get_order_from_bytes(page_size),
+ gicv3_its_get_memflags());
+ if ( !buffer )
+ return -ENOMEM;
+
+ /* Flush Lvl2 table to PoC if hw doesn't support coherency */
+ if ( gicv3_its_get_cacheability() <= GIC_BASER_CACHE_nC )
+ clean_and_invalidate_dcache_va_range(buffer, page_size);
+
+ table[idx] = cpu_to_le64(virt_to_maddr(buffer) | GITS_VALID_BIT);
+
+ /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+ if ( gicv3_its_get_cacheability() <= GIC_BASER_CACHE_nC )
+ clean_and_invalidate_dcache_va_range(table + idx,
+ GITS_LVL1_ENTRY_SIZE);
+
+ /* Ensure updated table contents are visible to ITS hardware */
+ dsb(sy);
+ }
+
+ return true;
+}
+
static int its_map_baser(void __iomem *basereg, uint64_t regc,
unsigned int nr_items, struct its_baser *baser)
{
@@ -737,13 +787,75 @@ static int gicv3_its_map_host_events(struct host_its *its,
return ret;
}
- /* TODO: Consider using INVALL here. Didn't work on the model, though. */
+ return 0;
+}
+
+static bool its_alloc_device_table(struct host_its *hw_its, uint32_t dev_id)
+{
+ struct its_baser *baser;
+
+ baser = its_get_baser(hw_its, GITS_BASER_TYPE_DEVICE);
+ if ( !baser )
+ return false;
+
+ return its_alloc_table_entry(baser, dev_id);
+}
+
+struct its_device *its_create_device(struct host_its *hw_its,
+ uint32_t host_devid, uint64_t nr_events)
+{
+ void *itt_addr = NULL;
+ struct its_device *dev = NULL;
+ int ret;
+
+ /* Sanitise the provided hardware values against the host ITS. */
+ if ( host_devid >= BIT(hw_its->devid_bits, UL) )
+ return NULL;
+
+ dev = xzalloc(struct its_device);
+ if ( !dev )
+ return NULL;
+
+ /* An Interrupt Translation Table needs to be 256-byte aligned. */
+ dev->itt_order = get_order_from_bytes(nr_events * hw_its->itte_size);
+ itt_addr = alloc_xenheap_pages(dev->itt_order, gicv3_its_get_memflags());
+ if ( !itt_addr )
+ goto fail_dev;
+
+ clean_and_invalidate_dcache_va_range(itt_addr,
+ nr_events * hw_its->itte_size);
+
- ret = its_send_cmd_sync(its, 0);
+ if ( !its_alloc_device_table(hw_its, host_devid) )
+ goto fail_itt;
+
+ ret = its_send_cmd_mapd(hw_its, host_devid, max(fls(nr_events - 1), 1U),
+ virt_to_maddr(itt_addr), true);
if ( ret )
- return ret;
+ goto fail_itt;
- return gicv3_its_wait_commands(its);
+ dev->itt_addr = itt_addr;
+ dev->hw_its = hw_its;
+ dev->host_devid = host_devid;
+ dev->eventids = nr_events;
+
+ return dev;
+
+fail_itt:
+ free_xenheap_pages(itt_addr, dev->itt_order);
+fail_dev:
+ xfree(dev);
+
+ return NULL;
+}
+
+static void its_free_device(struct its_device *dev)
+{
+ xfree(dev->host_lpi_blocks);
+ xfree(dev->itt_addr);
+ if ( dev->pend_irqs )
+ xfree(dev->pend_irqs);
+ xfree(dev);
}
/*
@@ -758,12 +870,10 @@ int gicv3_its_map_guest_device(struct domain *d,
paddr_t guest_doorbell, uint32_t guest_devid,
uint64_t nr_events, bool valid)
{
- void *itt_addr = NULL;
struct host_its *hw_its;
struct its_device *dev = NULL;
struct rb_node **new = &d->arch.vgic.its_devices.rb_node, *parent = NULL;
int i, ret = -ENOENT; /* "i" must be signed to check for >= 0 below.
*/
- unsigned int order;
hw_its = gicv3_its_find_by_doorbell(host_doorbell);
if ( !hw_its )
@@ -823,23 +933,12 @@ int gicv3_its_map_guest_device(struct domain *d,
if ( !valid )
goto out_unlock;
- ret = -ENOMEM;
-
- /* An Interrupt Translation Table needs to be 256-byte aligned. */
- order = get_order_from_bytes(max(nr_events * hw_its->itte_size, 256UL));
- itt_addr = alloc_xenheap_pages(order, gicv3_its_get_memflags());
- if ( !itt_addr )
- goto out_unlock;
-
- memset(itt_addr, 0, PAGE_SIZE << order);
-
- clean_and_invalidate_dcache_va_range(itt_addr,
- nr_events * hw_its->itte_size);
-
- dev = xzalloc(struct its_device);
+ dev = its_create_device(hw_its, host_devid, nr_events);
if ( !dev )
goto out_unlock;
+ ret = -ENOMEM;
+
/*
* Allocate the pending_irqs for each virtual LPI. They will be put
* into the domain's radix tree upon the guest's MAPTI command.
@@ -860,14 +959,6 @@ int gicv3_its_map_guest_device(struct domain *d,
if ( !dev->host_lpi_blocks )
goto out_unlock;
- ret = its_send_cmd_mapd(hw_its, host_devid, fls(nr_events - 1),
- virt_to_maddr(itt_addr), true);
- if ( ret )
- goto out_unlock;
-
- dev->itt_addr = itt_addr;
- dev->itt_order = order;
- dev->hw_its = hw_its;
dev->guest_doorbell = guest_doorbell;
dev->guest_devid = guest_devid;
dev->host_devid = host_devid;
@@ -920,13 +1011,7 @@ out_unlock:
out:
if ( dev )
- {
- xfree(dev->pend_irqs);
- xfree(dev->host_lpi_blocks);
- }
- if ( itt_addr )
- free_xenheap_pages(itt_addr, order);
- xfree(dev);
+ its_free_device(dev);
return ret;
}
diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
index c029d5d7a4..3c2649b695 100644
--- a/xen/arch/arm/gic-v3-lpi.c
+++ b/xen/arch/arm/gic-v3-lpi.c
@@ -58,6 +58,7 @@ static DEFINE_PER_CPU(struct lpi_redist_data, lpi_redist);
#define MAX_NR_HOST_LPIS (lpi_data.max_host_lpi_ids - LPI_OFFSET)
#define HOST_LPIS_PER_PAGE (PAGE_SIZE / sizeof(union host_lpi))
+uint32_t lpi_id_bits;
static union host_lpi *gic_get_host_lpi(uint32_t plpi)
{
@@ -202,14 +203,11 @@ void gicv3_lpi_update_host_entry(uint32_t host_lpi, int
domain_id,
write_u64_atomic(&hlpip->data, hlpi.data);
}
-static int gicv3_lpi_allocate_pendtable(unsigned int cpu)
+struct page_info *lpi_allocate_pendtable(void)
{
void *pendtable;
unsigned int order;
- if ( per_cpu(lpi_redist, cpu).pending_table )
- return -EBUSY;
-
/*
* The pending table holds one bit per LPI and even covers bits for
* interrupt IDs below 8192, so we allocate the full range.
@@ -219,20 +217,34 @@ static int gicv3_lpi_allocate_pendtable(unsigned int cpu)
order = get_order_from_bytes(max(lpi_data.max_host_lpi_ids / 8, (unsigned
long)SZ_64K));
pendtable = alloc_xenheap_pages(order, gicv3_its_get_memflags());
if ( !pendtable )
- return -ENOMEM;
+ return NULL;
memset(pendtable, 0, PAGE_SIZE << order);
/* Make sure the physical address can be encoded in the register. */
if ( virt_to_maddr(pendtable) & ~GENMASK(51, 16) )
{
free_xenheap_pages(pendtable, order);
- return -ERANGE;
+ return NULL;
}
clean_and_invalidate_dcache_va_range(pendtable,
lpi_data.max_host_lpi_ids / 8);
- per_cpu(lpi_redist, cpu).pending_table = pendtable;
+ return virt_to_page(pendtable);
+}
+
+static int gicv3_lpi_allocate_pendtable(unsigned int cpu)
+{
+ struct page_info *pendtable;
+
+ if ( per_cpu(lpi_redist, cpu).pending_table )
+ return -EBUSY;
+
+ pendtable = lpi_allocate_pendtable();
+ if ( !pendtable )
+ return -EINVAL;
+ per_cpu(lpi_redist, cpu).pending_table = page_to_virt(pendtable);
+
return 0;
}
@@ -274,6 +286,38 @@ static int gicv3_lpi_set_pendtable(void __iomem
*rdist_base)
return 0;
}
+void *lpi_allocate_proptable(void)
+{
+ void *table;
+ int order;
+
+ /* The property table holds one byte per LPI. */
+ order = get_order_from_bytes(lpi_data.max_host_lpi_ids);
+ table = alloc_xenheap_pages(order, gicv3_its_get_memflags());
+ if ( !table )
+ return NULL;
+
+ /* Make sure the physical address can be encoded in the register. */
+ if ( (virt_to_maddr(table) & ~GENMASK(51, 12)) )
+ {
+ free_xenheap_pages(table, order);
+ return NULL;
+ }
+ memset(table, GIC_PRI_IRQ | LPI_PROP_RES1, MAX_NR_HOST_LPIS);
+ clean_and_invalidate_dcache_va_range(table, MAX_NR_HOST_LPIS);
+
+ return table;
+}
+
+void lpi_free_proptable(void *vproptable)
+{
+ int order;
+
+ /* The property table holds one byte per LPI. */
+ order = get_order_from_bytes(lpi_data.max_host_lpi_ids);
+ free_xenheap_pages(vproptable, order);
+}
+
/*
* Tell a redistributor about the (shared) property table, allocating one
* if not already done.
@@ -314,7 +358,8 @@ static int gicv3_lpi_set_proptable(void __iomem *
rdist_base)
}
/* Encode the number of bits needed, minus one */
- reg |= fls(lpi_data.max_host_lpi_ids - 1) - 1;
+ lpi_id_bits = fls(lpi_data.max_host_lpi_ids - 1);
+ reg |= lpi_id_bits - 1;
reg |= virt_to_maddr(lpi_data.lpi_property);
diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
index 14852d18c2..d4af332b0e 100644
--- a/xen/arch/arm/gic-v3.c
+++ b/xen/arch/arm/gic-v3.c
@@ -2083,6 +2083,22 @@ static bool gic_dist_supports_lpis(void)
return (readl_relaxed(GICD + GICD_TYPER) & GICD_TYPE_LPIS);
}
+#ifdef CONFIG_GICV4
+static void __init gicv4_init(void)
+{
+ gicv3_info.hw_version = GIC_V4;
+
+
+ gicv4_its_vpeid_allocator_init();
+
+}
+#else
+static void __init gicv4_init(void)
+{
+ ASSERT_UNREACHABLE();
+}
+#endif
+
/* Set up the GIC */
static int __init gicv3_init(void)
{
@@ -2157,6 +2173,8 @@ static int __init gicv3_init(void)
gicv3_hyp_init();
+ if ( gic_is_gicv4() )
+ gicv4_init();
out:
spin_unlock(&gicv3.lock);
diff --git a/xen/arch/arm/gic-v4-its.c b/xen/arch/arm/gic-v4-its.c
index 358d0bffb9..fac3b44a94 100644
--- a/xen/arch/arm/gic-v4-its.c
+++ b/xen/arch/arm/gic-v4-its.c
@@ -27,6 +27,83 @@
#include <asm/vgic.h>
+/*
+ * VPE ID is at most 16 bits.
+ * Using a bitmap here limits us to 65536 concurrent VPEs.
+ */
+static unsigned long *vpeid_mask;
+
+static spinlock_t vpeid_alloc_lock = SPIN_LOCK_UNLOCKED;
+
+void __init gicv4_its_vpeid_allocator_init(void)
+{
+ /* Allocate space for vpeid_mask based on MAX_VPEID */
+ vpeid_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(MAX_VPEID));
+
+ if ( !vpeid_mask )
+ panic("Could not allocate VPEID bitmap space\n");
+}
+
+static int __init its_alloc_vpeid(struct its_vpe *vpe)
+{
+ int id;
+
+ spin_lock(&vpeid_alloc_lock);
+
+ id = find_first_zero_bit(vpeid_mask, MAX_VPEID);
+
+ if ( id == MAX_VPEID )
+ {
+ id = -EBUSY;
+ printk(XENLOG_ERR "VPEID pool exhausted\n");
+ goto out;
+ }
+
+ set_bit(id, vpeid_mask);
+
+out:
+ spin_unlock(&vpeid_alloc_lock);
+
+ return id;
+}
+
+static void __init its_free_vpeid(uint32_t vpe_id)
+{
+ spin_lock(&vpeid_alloc_lock);
+
+ clear_bit(vpe_id, vpeid_mask);
+
+ spin_unlock(&vpeid_alloc_lock);
+}
+
+static bool __init its_alloc_vpe_entry(uint32_t vpe_id)
+{
+ struct host_its *hw_its;
+
+ /*
+ * Make sure the L2 tables are allocated on *all* v4 ITSs. We
+ * could try and only do it on ITSs corresponding to devices
+ * that have interrupts targeted at this VPE, but the
+ * complexity becomes crazy.
+ */
+ list_for_each_entry(hw_its, &host_its_list, entry)
+ {
+ struct its_baser *baser;
+
+ if ( !hw_its->is_v4 )
+ continue;
+
+ baser = its_get_baser(hw_its, GITS_BASER_TYPE_VCPU);
+ if ( !baser )
+ return false;
+
+ if ( !its_alloc_table_entry(baser, vpe_id) )
+ return false;
+ }
+
+ return true;
+}
+
static int its_send_cmd_vsync(struct host_its *its, uint16_t vpeid)
{
uint64_t cmd[4];
@@ -39,6 +116,188 @@ static int its_send_cmd_vsync(struct host_its *its,
uint16_t vpeid)
return its_send_command(its, cmd);
}
+static int its_send_cmd_vmapp(struct host_its *its, struct its_vpe *vpe,
+ bool valid)
+{
+ uint64_t cmd[4];
+ uint16_t vpeid = vpe->vpe_id;
+ uint64_t vpt_addr;
+ int ret;
+
+ cmd[0] = GITS_CMD_VMAPP;
+ cmd[1] = (uint64_t)vpeid << 32;
+ cmd[2] = valid ? GITS_VALID_BIT : 0;
+
+ /* Unmap command */
+ if ( !valid )
+ goto out;
+
+ /* Target redistributor */
+ cmd[2] |= encode_rdbase(its, vpe->col_idx, 0x0);
+ vpt_addr = virt_to_maddr(vpe->vpendtable);
+ cmd[3] = (vpt_addr & GENMASK(51, 16)) |
+ ((HOST_LPIS_NRBITS - 1) & GENMASK(4, 0));
+
+ out:
+ ret = its_send_command(its, cmd);
+
+ return ret;
+}
+
+static int its_send_cmd_vinvall(struct host_its *its, struct its_vpe *vpe)
+{
+ uint64_t cmd[4];
+ uint16_t vpeid = vpe->vpe_id;
+
+ cmd[0] = GITS_CMD_VINVALL;
+ cmd[1] = (uint64_t)vpeid << 32;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+
+ return its_send_command(its, cmd);
+}
+
+static int its_map_vpe(struct host_its *its, struct its_vpe *vpe)
+{
+ int ret;
+
+ /*
+ * VMAPP command maps the vPE to the target RDbase, including an
+ * associated virtual LPI Pending table.
+ */
+ ret = its_send_cmd_vmapp(its, vpe, true);
+ if ( ret )
+ return ret;
+
+ ret = its_send_cmd_vinvall(its, vpe);
+ if ( ret )
+ return ret;
+
+ ret = its_send_cmd_vsync(its, vpe->vpe_id);
+ if ( ret )
+ return ret;
+
+ return 0;
+}
+static int __init its_vpe_init(struct its_vpe *vpe)
+{
+ int vpe_id, rc = -ENOMEM;
+ struct page_info *vpendtable;
+ struct host_its *hw_its;
+
+ /* Allocate vpe id */
+ vpe_id = its_alloc_vpeid(vpe);
+ if ( vpe_id < 0 )
+ return rc;
+
+ /* Allocate VPT */
+ vpendtable = lpi_allocate_pendtable();
+
+ if ( !vpendtable )
+ goto fail_vpt;
+
+ if ( !its_alloc_vpe_entry(vpe_id) )
+ goto fail_entry;
+
+ rwlock_init(&vpe->lock);
+ vpe->vpe_id = vpe_id;
+ vpe->vpendtable = page_to_virt(vpendtable);
+ /*
+ * We eagerly inform all the v4 ITS and map vPE to the first
+ * possible CPU
+ */
+ vpe->col_idx = cpumask_first(&cpu_online_map);
+ list_for_each_entry(hw_its, &host_its_list, entry)
+ {
+ if ( !hw_its->is_v4 )
+ continue;
+
+ if ( its_map_vpe(hw_its, vpe) )
+ goto fail_entry;
+ }
+
+ return 0;
+
+ fail_entry:
+ xfree(page_to_virt(vpendtable));
+ fail_vpt:
+ its_free_vpeid(vpe_id);
+
+ return rc;
+}
+
+static void __init its_vpe_teardown(struct its_vpe *vpe)
+{
+ unsigned int order;
+
+ order = get_order_from_bytes(max(lpi_data.max_host_lpi_ids / 8, (unsigned
long)SZ_64K));
+ its_free_vpeid(vpe->vpe_id);
+ free_xenheap_pages(vpe->vpendtable, order);
+ xfree(vpe);
+}
+
+int vgic_v4_its_vm_init(struct domain *d)
+{
+ unsigned int nr_vcpus = d->max_vcpus;
+ int ret = -ENOMEM;
+
+ if ( !gicv3_its_host_has_its() )
+ return 0;
+
+ d->arch.vgic.its_vm = xzalloc(struct its_vm);
+ if ( !d->arch.vgic.its_vm )
+ return ret;
+
+ d->arch.vgic.its_vm->vpes = xzalloc_array(struct its_vpe *, nr_vcpus);
+ if ( !d->arch.vgic.its_vm->vpes )
+ goto fail_vpes;
+ d->arch.vgic.its_vm->nr_vpes = nr_vcpus;
+
+ d->arch.vgic.its_vm->vproptable = lpi_allocate_proptable();
+ if ( !d->arch.vgic.its_vm->vproptable )
+ goto fail_vprop;
+
+ return 0;
+
+fail_vprop:
+ xfree(d->arch.vgic.its_vm->vpes);
+ fail_vpes:
+ xfree(d->arch.vgic.its_vm);
+
+ return ret;
+}
+
+void vgic_v4_free_its_vm(struct domain *d)
+{
+ struct its_vm *its_vm = d->arch.vgic.its_vm;
+ if ( its_vm->vpes )
+ xfree(its_vm->vpes);
+ if ( its_vm->vproptable )
+ lpi_free_proptable(its_vm);
+}
+
+int vgic_v4_its_vpe_init(struct vcpu *vcpu)
+{
+ int ret;
+ struct its_vm *its_vm = vcpu->domain->arch.vgic.its_vm;
+ unsigned int vcpuid = vcpu->vcpu_id;
+
+ vcpu->arch.vgic.its_vpe = xzalloc(struct its_vpe);
+ if ( !vcpu->arch.vgic.its_vpe )
+ return -ENOMEM;
+
+ its_vm->vpes[vcpuid] = vcpu->arch.vgic.its_vpe;
+ vcpu->arch.vgic.its_vpe->its_vm = its_vm;
+
+ ret = its_vpe_init(vcpu->arch.vgic.its_vpe);
+ if ( ret )
+ {
+ its_vpe_teardown(vcpu->arch.vgic.its_vpe);
+ return ret;
+ }
+ return 0;
+}
+
static int its_send_cmd_vmapti(struct host_its *its, struct its_device *dev,
uint32_t eventid)
{
diff --git a/xen/arch/arm/include/asm/gic_v3_its.h
b/xen/arch/arm/include/asm/gic_v3_its.h
index bd2696f354..411beb81c8 100644
--- a/xen/arch/arm/include/asm/gic_v3_its.h
+++ b/xen/arch/arm/include/asm/gic_v3_its.h
@@ -77,6 +77,7 @@
#define GITS_BASER_ENTRY_SIZE_SHIFT 48
#define GITS_BASER_ENTRY_SIZE(reg) \
((((reg) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_LVL1_ENTRY_SIZE 8UL
#define GITS_BASER_SHAREABILITY_SHIFT 10
#define GITS_BASER_PAGE_SIZE_SHIFT 8
#define GITS_BASER_SIZE_MASK 0xff
@@ -117,9 +118,19 @@
/* We allocate LPIs on the hosts in chunks of 32 to reduce handling overhead.
*/
#define LPI_BLOCK 32U
+extern unsigned int nvpeid;
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS nvpeid
+#define MAX_VPEID (1UL << ITS_MAX_VPEID_BITS)
+
#ifdef CONFIG_GICV4
#include <asm/gic_v4_its.h>
#endif
+
+extern uint32_t lpi_id_bits;
+#define HOST_LPIS_NRBITS lpi_id_bits
+#define MAX_HOST_LPIS BIT(lpi_id_bits, UL)
+
/*
* Describes a device which is using the ITS and is used by a guest.
* Since device IDs are per ITS (in contrast to vLPIs, which are per
@@ -169,6 +180,7 @@ struct host_its {
void *cmd_buf;
unsigned int flags;
struct its_baser tables[GITS_BASER_NR_REGS];
+ bool is_v4;
};
/* Map a collection for this host CPU to each host ITS. */
@@ -273,8 +285,13 @@ struct pending_irq *gicv3_assign_guest_event(struct domain
*d,
void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
uint32_t virt_lpi);
struct its_baser *its_get_baser(struct host_its *hw_its, uint32_t type);
+bool its_alloc_table_entry(struct its_baser *baser, uint32_t id);
+struct page_info *lpi_allocate_pendtable(void);
+void *lpi_allocate_proptable(void);
+void lpi_free_proptable(void *vproptable);
void lpi_write_config(uint8_t *prop_table, uint32_t lpi, uint8_t clr,
uint8_t set);
+uint64_t encode_rdbase(struct host_its *hw_its, unsigned int cpu, uint64_t
reg);
int its_send_command(struct host_its *hw_its, const void *its_cmd);
struct its_device *get_its_device(struct domain *d, paddr_t vdoorbell,
diff --git a/xen/arch/arm/include/asm/gic_v4_its.h
b/xen/arch/arm/include/asm/gic_v4_its.h
index 722247ec60..fb0ef37bbe 100644
--- a/xen/arch/arm/include/asm/gic_v4_its.h
+++ b/xen/arch/arm/include/asm/gic_v4_its.h
@@ -49,6 +49,7 @@ struct event_vlpi_map {
unsigned int nr_vlpis;
};
+void gicv4_its_vpeid_allocator_init(void);
#endif
/*
diff --git a/xen/arch/arm/include/asm/vgic.h b/xen/arch/arm/include/asm/vgic.h
index f12d736808..580310fec4 100644
--- a/xen/arch/arm/include/asm/vgic.h
+++ b/xen/arch/arm/include/asm/vgic.h
@@ -414,6 +414,9 @@ bool gic_is_gicv4(void);
#define gic_is_gicv4() (false)
#endif
+int vgic_v4_its_vm_init(struct domain *d);
+void vgic_v4_free_its_vm(struct domain *d);
+int vgic_v4_its_vpe_init(struct vcpu *vcpu);
#endif /* !CONFIG_NEW_VGIC */
/*** Common VGIC functions used by Xen arch code ****/
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 0da8c1a425..6baf870ad5 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -22,6 +22,7 @@
#include <asm/mmio.h>
#include <asm/gic.h>
+#include <asm/gic_v3_its.h>
#include <asm/vgic.h>
@@ -329,6 +330,15 @@ int domain_vgic_init(struct domain *d, unsigned int
nr_spis)
for ( i = 0; i < NR_GIC_SGI; i++ )
set_bit(i, d->arch.vgic.allocated_irqs);
+ if ( gic_is_gicv4() )
+ {
+ ret = vgic_v4_its_vm_init(d);
+ if ( ret )
+ {
+ printk(XENLOG_ERR "GICv4 its vm allocation failed\n");
+ return ret;
+ }
+ }
return 0;
}
@@ -366,11 +376,14 @@ void domain_vgic_free(struct domain *d)
#endif
xfree(d->arch.vgic.pending_irqs);
xfree(d->arch.vgic.allocated_irqs);
+
+ if ( gic_is_gicv4() )
+ vgic_v4_free_its_vm(d);
}
int vcpu_vgic_init(struct vcpu *v)
{
- int i;
+ int i, ret;
v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank);
if ( v->arch.vgic.private_irqs == NULL )
@@ -389,6 +402,16 @@ int vcpu_vgic_init(struct vcpu *v)
INIT_LIST_HEAD(&v->arch.vgic.lr_pending);
spin_lock_init(&v->arch.vgic.lock);
+ if ( gic_is_gicv4() && gicv3_its_host_has_its())
+ {
+ ret = vgic_v4_its_vpe_init(v);
+ if ( ret )
+ {
+ printk(XENLOG_ERR "GICv4 its vpe allocation failed\n");
+ return ret;
+ }
+ }
+
return 0;
}
--
2.51.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |