[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN PATCH v2 02/25] arm: new VGIC: Add GICv3 world switch backend


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Mykyta Poturai <Mykyta_Poturai@xxxxxxxx>
  • Date: Fri, 10 Nov 2023 12:56:16 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=Fv2sSS1E8IyyC05BS/b/6K+ysUBbf+47kNcRVhgAkhk=; b=goDXh49b1gCLm7bTXz5Qtsv6FGqVjnMnwjPxE6NjpJvFNcbb+NoLdbWVLqYMjapq3m0KGMiNoY8ENGa5U8rYff42hOxohS9LfLnq2S1cxM0/sYtEVsut7vpkQeTsfgV6pMYfnBThYwjgm3DGd50ySwsgy+hxpvrJXSqaXy4aawDljAEu0NO4pCPX3F6oV+/uCg5A3rz4vBetQCTdl6lTwlUmt1FXOQBi8nPCVThiYpDbS9G7m5FCS84cRu5BxO3p9ZTo4i1FXIJYLxLJHgRpHK9/JF53txdPdxuxiuLAI00bhjkdedJWw/4kxd5sN3TgaqA+X1HGZPW2jqhVDw0USA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=ESPjaDO/HhFX35Rcc/hs05aNIlEvWybjwv25zHJ6i2Q0aGcX7EhYfQkKCHoezYhtAQcgQ70Teubb61HKBgxgope1rj2mn/BdRxmqDJ1Ge8cVq/Vgkocc+FUNM/1cH3Xc8fHyYPExUXjLSC7ec3yzgvqnbazW+zE4kE+pgWdsKDYNoGZw429fGjFVChlslt1XN3gLdicsoLjJhiOli2/2Zb8S6DKLsd/25LvfMxmcXuiAOC5tQhzMwvZ2njev4FcvfkOIbeGc6WT0R4i7D4cMjMjLcLKbsO77Q24+KFl2Dmcit8GXhXqHQLOfae3eV7vm5czfHC5bMIWJkA7CWkQKFQ==
  • Cc: Mykyta Poturai <Mykyta_Poturai@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>
  • Delivery-date: Fri, 10 Nov 2023 12:56:43 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHaE9VK1uiKTlA4P0ytsVfgwtVF7g==
  • Thread-topic: [XEN PATCH v2 02/25] arm: new VGIC: Add GICv3 world switch backend

As the GICv3 virtual interface registers differ from their GICv2
siblings, we need different handlers for processing maintenance
interrupts and reading/writing to the LRs.
Implement the respective handler functions and connect them to
existing code to be called if the host is using a GICv3.

This is based on Linux commit 59529f69f5048e0 by Marc Zyngier

Signed-off-by: Mykyta Poturai <mykyta_poturai@xxxxxxxx>
---
 xen/arch/arm/vgic/vgic-v3.c | 206 ++++++++++++++++++++++++++++++++++++
 xen/arch/arm/vgic/vgic.c    |  18 +++-
 xen/arch/arm/vgic/vgic.h    |  13 ++-
 3 files changed, 234 insertions(+), 3 deletions(-)
 create mode 100644 xen/arch/arm/vgic/vgic-v3.c

diff --git a/xen/arch/arm/vgic/vgic-v3.c b/xen/arch/arm/vgic/vgic-v3.c
new file mode 100644
index 0000000000..b018ee6040
--- /dev/null
+++ b/xen/arch/arm/vgic/vgic-v3.c
@@ -0,0 +1,206 @@
+/*
+ * Imported from Linux ("new" KVM VGIC) and heavily adapted to Xen.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <asm/new_vgic.h>
+#include <asm/gic.h>
+#include <xen/sched.h>
+
+#include "vgic.h"
+
+/*
+ * transfer the content of the LRs back into the corresponding ap_list:
+ * - active bit is transferred as is
+ * - pending bit is
+ *   - transferred as is in case of edge sensitive IRQs
+ *   - set to the line-level (resample time) for level sensitive IRQs
+ */
+void vgic_v3_fold_lr_state(struct vcpu *vcpu)
+{
+    struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic;
+    unsigned int used_lrs     = vcpu->arch.vgic.used_lrs;
+    unsigned long flags;
+    unsigned int lr;
+
+    if ( !used_lrs ) /* No LRs used, so nothing to sync back here. */
+        return;
+
+    gic_hw_ops->update_hcr_status(GICH_HCR_UIE, false);
+
+    for ( lr = 0; lr < used_lrs; lr++ )
+    {
+        struct gic_lr lr_val;
+        uint32_t intid;
+        struct vgic_irq *irq;
+        struct irq_desc *desc = NULL;
+
+        gic_hw_ops->read_lr(lr, &lr_val);
+
+        /*
+         * TODO: Possible optimization to avoid reading LRs:
+         * Read the ELRSR to find out which of our LRs have been cleared
+         * by the guest. We just need to know the IRQ number for those, which
+         * we could save in an array when populating the LRs.
+         * This trades one MMIO access (ELRSR) for possibly more than one 
(LRs),
+         * but requires some more code to save the IRQ number and to handle
+         * those finished IRQs according to the algorithm below.
+         * We need some numbers to justify this: chances are that we don't
+         * have many LRs in use most of the time, so we might not save much.
+         */
+        gic_hw_ops->clear_lr(lr);
+
+        intid = lr_val.virq;
+        irq   = vgic_get_irq(vcpu->domain, vcpu, intid);
+
+        local_irq_save(flags);
+
+        /*
+         * We check this here without taking the lock, because the locking
+         * order forces us to do so. irq->hw is a "write-once" member, so
+         * whenever we read true, the associated hardware IRQ will not go
+         * away anymore.
+         * TODO: rework this if possible, either by using the desc pointer
+         * directly in struct vgic_irq or by changing the locking order.
+         * Especially if we ever drop the assumption above.
+         */
+        if ( irq->hw )
+        {
+            desc = irq_to_desc(irq->hwintid);
+            spin_lock(&desc->lock);
+        }
+
+        spin_lock(&irq->irq_lock);
+
+        /*
+         * If a hardware mapped IRQ has been handled for good, we need to
+         * clear the _IRQ_INPROGRESS bit to allow handling of new IRQs.
+         *
+         * TODO: This is probably racy, but is so already in the existing
+         * VGIC. A fix does not seem to be trivial.
+         */
+        if ( irq->hw && !lr_val.active && !lr_val.pending )
+            clear_bit(_IRQ_INPROGRESS, &desc->status);
+
+        /* Always preserve the active bit */
+        irq->active = lr_val.active;
+
+        /* Edge is the only case where we preserve the pending bit */
+        if ( irq->config == VGIC_CONFIG_EDGE && lr_val.pending )
+        {
+            irq->pending_latch = true;
+
+            if ( vgic_irq_is_sgi(intid) )
+                irq->source |= (1U << lr_val.virt.source);
+        }
+
+        /* Clear soft pending state when level irqs have been acked. */
+        if ( irq->config == VGIC_CONFIG_LEVEL && !lr_val.pending )
+            irq->pending_latch = false;
+
+        /*
+         * Level-triggered mapped IRQs are special because we only
+         * observe rising edges as input to the VGIC.
+         *
+         * If the guest never acked the interrupt we have to sample
+         * the physical line and set the line level, because the
+         * device state could have changed or we simply need to
+         * process the still pending interrupt later.
+         *
+         * If this causes us to lower the level, we have to also clear
+         * the physical active state, since we will otherwise never be
+         * told when the interrupt becomes asserted again.
+         */
+        if ( vgic_irq_is_mapped_level(irq) && lr_val.pending )
+        {
+            ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);
+
+            irq->line_level = gic_read_pending_state(desc);
+
+            if ( !irq->line_level )
+                gic_set_active_state(desc, false);
+        }
+
+        spin_unlock(&irq->irq_lock);
+        if ( desc )
+            spin_unlock(&desc->lock);
+        local_irq_restore(flags);
+
+        vgic_put_irq(vcpu->domain, irq);
+    }
+
+    gic_hw_ops->update_hcr_status(GICH_HCR_EN, false);
+    vgic_cpu->used_lrs = 0;
+}
+
+/* Requires the irq to be locked already */
+void vgic_v3_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+    struct gic_lr lr_val = { 0 };
+
+    lr_val.virq          = irq->intid;
+
+    if ( irq_is_pending(irq) )
+    {
+        lr_val.pending = true;
+
+        if ( irq->config == VGIC_CONFIG_EDGE )
+            irq->pending_latch = false;
+
+        if ( vgic_irq_is_sgi(irq->intid) &&
+             vcpu->domain->arch.vgic.version == VGIC_V2 )
+        {
+            uint32_t src = ffs(irq->source);
+
+            BUG_ON(!src);
+            lr_val.virt.source = (src - 1);
+            irq->source &= ~(1 << (src - 1));
+            if ( irq->source )
+                irq->pending_latch = true;
+        }
+    }
+
+    lr_val.active = irq->active;
+
+    if ( irq->hw )
+    {
+        lr_val.hw_status = true;
+        lr_val.hw.pirq   = irq->hwintid;
+        /*
+         * Never set pending+active on a HW interrupt, as the
+         * pending state is kept at the physical distributor
+         * level.
+         */
+        if ( irq->active && irq_is_pending(irq) )
+            lr_val.pending = false;
+    }
+    else
+    {
+        if ( irq->config == VGIC_CONFIG_LEVEL )
+            lr_val.virt.eoi = true;
+    }
+
+    /*
+     * Level-triggered mapped IRQs are special because we only observe
+     * rising edges as input to the VGIC.  We therefore lower the line
+     * level here, so that we can take new virtual IRQs.  See
+     * vgic_v2_fold_lr_state for more info.
+     */
+    if ( vgic_irq_is_mapped_level(irq) && lr_val.pending )
+        irq->line_level = false;
+
+    /* The GICv2 LR only holds five bits of priority. */
+    lr_val.priority = irq->priority >> 3;
+
+    gic_hw_ops->write_lr(lr, &lr_val);
+}
diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
index b9463a5f27..97ca827fc4 100644
--- a/xen/arch/arm/vgic/vgic.c
+++ b/xen/arch/arm/vgic/vgic.c
@@ -520,7 +520,14 @@ retry:
 
 static void vgic_fold_lr_state(struct vcpu *vcpu)
 {
-    vgic_v2_fold_lr_state(vcpu);
+    if ( vcpu->domain->arch.vgic.version == GIC_V2 )
+    {
+        vgic_v2_fold_lr_state(vcpu);
+    }
+    else
+    {
+        vgic_v3_fold_lr_state(vcpu);
+    }
 }
 
 /* Requires the irq_lock to be held. */
@@ -529,7 +536,14 @@ static void vgic_populate_lr(struct vcpu *vcpu,
 {
     ASSERT(spin_is_locked(&irq->irq_lock));
 
-    vgic_v2_populate_lr(vcpu, irq, lr);
+    if ( vcpu->domain->arch.vgic.version == GIC_V2 )
+    {
+        vgic_v2_populate_lr(vcpu, irq, lr);
+    }
+    else
+    {
+        vgic_v3_populate_lr(vcpu, irq, lr);
+    }
 }
 
 static void vgic_set_underflow(struct vcpu *vcpu)
diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h
index 534b24bcd3..2819569f30 100644
--- a/xen/arch/arm/vgic/vgic.h
+++ b/xen/arch/arm/vgic/vgic.h
@@ -70,8 +70,19 @@ void vgic_v2_enable(struct vcpu *vcpu);
 int vgic_v2_map_resources(struct domain *d);
 int vgic_register_dist_iodev(struct domain *d, gfn_t dist_base_fn,
                              enum vgic_type);
+#ifdef CONFIG_GICV3
+void vgic_v3_fold_lr_state(struct vcpu *vcpu);
+void vgic_v3_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr);
+#else
+static inline void vgic_v3_fold_lr_state(struct vcpu *vcpu)
+{
+}
+static inline void vgic_v3_populate_lr(struct vcpu *vcpu, struct vgic_irq 
*irq, int lr)
+{
+}
+#endif /* CONFIG_GICV3 */
 
-#endif
+#endif /* __XEN_ARM_VGIC_VGIC_H__ */
 
 /*
  * Local variables:
-- 
2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.