[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN PATCH v2 17/25] arm: new VGIC: its: Read initial LPI pending table


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Mykyta Poturai <Mykyta_Poturai@xxxxxxxx>
  • Date: Fri, 10 Nov 2023 12:56:21 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=oT4KIF2X4Eydop9+7zHu37RhHlwUjuweDeTiWeDTB5Q=; b=XKn2tApLn8Pw50DL9kVBH601DNYUDlO4QkE7JkH5WGanzs9hSatY7EFLg8rffozzyqLSsJ/kMfK4macRCxstNbSTVMBtEbTZ6hQWwyazctNnLSmtdhJb7vI6RCF9UXCsV4E23Pd7vZLCClnDZLCaeNwKkOcckn2YdXYDbbmv0UdUIHDfw2d0a57wkCPNpxZFxPPBz/Uf5aqMfLXvUl5snl0udLxaycbOrgLZOxEkvJwbSFdu9Qmh5s/YmAG5taA2ELzboq5c7TqyG8iajoVZ25RPXuz6pfrATHtTCNo/T2jM6SU4bwKldXLrqfUuM4yDflKcZ2JDpqlH3mSgeGEipQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=CfWAkGtMlpf+k70kdod+hSlaG77s3WKtFAfmUY7+SRh4uDTOIO1e882VPahd9d0D0W2DbfmSMku96lTGZwPzRetAJEllZ4oQ2vvGfbg0ruK+t7FlT1VodsSRq0KOx3z67qBCruAUxkwXRjU/Os92LctSdPKPHdwejRZ90u43APJR6sOSbgIkbCI1a4n4QpjsdGn6icFbQIeg5kILqVeEJv/4KclBhALRZR34PEkZ367Kasr5UzX9J5qrLElTEmF3WuULhkSGi9CxHSXMsYWaA3sjeGtN5DjvwvcA1HnnhCr40pY0mh2/mnsXuBXXqNVobbec++1b9QvKgKrkoFPmLg==
  • Cc: Mykyta Poturai <Mykyta_Poturai@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>
  • Delivery-date: Fri, 10 Nov 2023 12:56:43 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHaE9VNBToaXOI75keIgsngiRNg/A==
  • Thread-topic: [XEN PATCH v2 17/25] arm: new VGIC: its: Read initial LPI pending table

The LPI pending status for a GICv3 redistributor is held in a table
in (guest) memory. To achieve reasonable performance, we cache the
pending bit in our struct vgic_irq. The initial pending state must be
read from guest memory upon enabling LPIs for this redistributor.
As we can't access the guest memory while we hold the lpi_list spinlock,
we create a snapshot of the LPI list and iterate over that.

Based on Linux commit 33d3bc9556a7d by Andre Przywara

Signed-off-by: Mykyta Poturai <mykyta_poturai@xxxxxxxx>
---
 xen/arch/arm/include/asm/new_vgic.h |   5 ++
 xen/arch/arm/vgic/vgic-its.c        | 104 ++++++++++++++++++++++++++++
 2 files changed, 109 insertions(+)

diff --git a/xen/arch/arm/include/asm/new_vgic.h 
b/xen/arch/arm/include/asm/new_vgic.h
index 3048f39844..d0fd15e154 100644
--- a/xen/arch/arm/include/asm/new_vgic.h
+++ b/xen/arch/arm/include/asm/new_vgic.h
@@ -264,12 +264,17 @@ static inline paddr_t vgic_dist_base(const struct 
vgic_dist *vgic)
 }
 
 #ifdef CONFIG_HAS_ITS
+void vgic_enable_lpis(struct vcpu *vcpu);
 struct vgic_its_device *vgic_its_alloc_device(int nr_events);
 void vgic_its_free_device(struct vgic_its_device *its_dev);
 int vgic_its_add_device(struct domain *d, struct vgic_its_device *its_dev);
 void vgic_its_delete_device(struct domain *d, struct vgic_its_device *its_dev);
 struct vgic_its_device* vgic_its_get_device(struct domain *d, paddr_t 
vdoorbell,
                                          uint32_t vdevid);
+#else
+static inline void vgic_enable_lpis(struct vcpu *vcpu)
+{
+}
 #endif
 
 #endif /* __ASM_ARM_NEW_VGIC_H */
diff --git a/xen/arch/arm/vgic/vgic-its.c b/xen/arch/arm/vgic/vgic-its.c
index 5e94f0144d..af19cf4414 100644
--- a/xen/arch/arm/vgic/vgic-its.c
+++ b/xen/arch/arm/vgic/vgic-its.c
@@ -63,6 +63,47 @@ static struct vgic_its_device *find_its_device(struct 
vgic_its *its, u32 device_
 #define VGIC_ITS_TYPER_DEVBITS          16
 #define VGIC_ITS_TYPER_ITE_SIZE         8
 
+/*
+ * Create a snapshot of the current LPIs targeting @vcpu, so that we can
+ * enumerate those LPIs without holding any lock.
+ * Returns their number and puts the kmalloc'ed array into intid_ptr.
+ */
+int vgic_copy_lpi_list(struct domain *d, struct vcpu *vcpu, u32 **intid_ptr)
+{
+    struct vgic_dist *dist = &d->arch.vgic;
+    struct vgic_irq *irq;
+    unsigned long flags;
+    u32 *intids;
+    int irq_count, i = 0;
+
+    /*
+     * There is an obvious race between allocating the array and LPIs
+     * being mapped/unmapped. If we ended up here as a result of a
+     * command, we're safe (locks are held, preventing another
+     * command). If coming from another path (such as enabling LPIs),
+     * we must be careful not to overrun the array.
+     */
+    irq_count = ACCESS_ONCE(dist->lpi_list_count);
+    intids    = xmalloc_array(u32, irq_count);
+    if ( !intids )
+        return -ENOMEM;
+
+    spin_lock_irqsave(&dist->lpi_list_lock, flags);
+    list_for_each_entry(irq, &dist->lpi_list_head, lpi_list)
+    {
+        if ( i == irq_count )
+            break;
+        /* We don't need to "get" the IRQ, as we hold the list lock. */
+        if ( vcpu && irq->target_vcpu != vcpu )
+            continue;
+        intids[i++] = irq->intid;
+    }
+    spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+
+    *intid_ptr = intids;
+    return i;
+}
+
 /* Requires the its_lock to be held. */
 static void its_free_ite(struct domain *d, struct its_ite *ite)
 {
@@ -284,6 +325,62 @@ static unsigned long vgic_mmio_read_its_iidr(struct domain 
*d,
     return val;
 }
 
+/*
+ * Sync the pending table pending bit of LPIs targeting @vcpu
+ * with our own data structures. This relies on the LPI being
+ * mapped before.
+ */
+static int its_sync_lpi_pending_table(struct vcpu *vcpu)
+{
+    paddr_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic.pendbaser);
+    struct vgic_irq *irq;
+    int last_byte_offset = -1;
+    int ret              = 0;
+    u32 *intids;
+    int nr_irqs, i;
+    unsigned long flags;
+    u8 pendmask;
+
+    nr_irqs = vgic_copy_lpi_list(vcpu->domain, vcpu, &intids);
+    if ( nr_irqs < 0 )
+        return nr_irqs;
+
+    for ( i = 0; i < nr_irqs; i++ )
+    {
+        int byte_offset, bit_nr;
+
+        byte_offset = intids[i] / BITS_PER_BYTE;
+        bit_nr      = intids[i] % BITS_PER_BYTE;
+
+        /*
+         * For contiguously allocated LPIs chances are we just read
+         * this very same byte in the last iteration. Reuse that.
+         */
+        if ( byte_offset != last_byte_offset )
+        {
+            ret = access_guest_memory_by_gpa(vcpu->domain,
+                                             pendbase + byte_offset, &pendmask,
+                                             1, false);
+            if ( ret )
+            {
+                xfree(intids);
+                return ret;
+            }
+            last_byte_offset = byte_offset;
+        }
+
+        irq = vgic_get_irq(vcpu->domain, NULL, intids[i]);
+        spin_lock_irqsave(&irq->irq_lock, flags);
+        irq->pending_latch = pendmask & (1U << bit_nr);
+        vgic_queue_irq_unlock(vcpu->domain, irq, flags);
+        vgic_put_irq(vcpu->domain, irq);
+    }
+
+    xfree(intids);
+
+    return ret;
+}
+
 static unsigned long vgic_mmio_read_its_typer(struct domain *d,
                                               struct vgic_its *its,
                                               paddr_t addr, unsigned int len)
@@ -564,6 +661,13 @@ static struct vgic_register_region its_registers[] = {
                         VGIC_ACCESS_32bit),
 };
 
+/* This is called on setting the LPI enable bit in the redistributor. */
+void vgic_enable_lpis(struct vcpu *vcpu)
+{
+    if ( !(vcpu->arch.vgic.pendbaser & GICR_PENDBASER_PTZ) )
+        its_sync_lpi_pending_table(vcpu);
+}
+
 static int vgic_register_its_iodev(struct domain *d, struct vgic_its *its,
                                    u64 addr)
 {
-- 
2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.