[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 16/28] ARM: vITS: introduce translation table walks



The ITS stores the target (v)CPU and the (virtual) LPI number in tables.
Introduce functions to walk those tables and translate an device ID -
event ID pair into a pair of virtual LPI and vCPU.
Since the final interrupt translation tables can be smaller than a page,
we map them on demand (which is cheap on arm64). Also we take care of
the locking on the way, since we can't easily protect those ITTs from
being altered by the guest.

To allow compiling without warnings, we declare two functions as
non-static for the moment.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 xen/arch/arm/vgic-v3-its.c | 135 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 135 insertions(+)

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index fc28376..982c51d 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -60,6 +60,141 @@ struct vits_itte
     uint16_t collection;
 };
 
+#define UNMAPPED_COLLECTION      ((uint16_t)~0)
+
+/* Must be called with the ITS lock held. */
+static struct vcpu *get_vcpu_from_collection(struct virt_its *its, int collid)
+{
+    uint16_t vcpu_id;
+
+    if ( collid >= its->max_collections )
+        return NULL;
+
+    vcpu_id = its->coll_table[collid];
+    if ( vcpu_id == UNMAPPED_COLLECTION || vcpu_id >= its->d->max_vcpus )
+        return NULL;
+
+    return its->d->vcpu[vcpu_id];
+}
+
+#define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8))
+#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(7, 0)) + 1))
+#define DEV_TABLE_ENTRY(addr, bits)                     \
+        (((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(7, 0)))
+
+static paddr_t get_itte_address(struct virt_its *its,
+                                uint32_t devid, uint32_t evid)
+{
+    paddr_t addr;
+
+    if ( devid >= its->max_devices )
+        return ~0;
+
+    if ( evid >= DEV_TABLE_ITT_SIZE(its->dev_table[devid]) )
+        return ~0;
+
+    addr = DEV_TABLE_ITT_ADDR(its->dev_table[devid]);
+
+    return addr + evid * sizeof(struct vits_itte);
+}
+
+/*
+ * Looks up a given deviceID/eventID pair on an ITS and returns a pointer to
+ * the corresponding ITTE. This maps the respective guest page into Xen.
+ * Once finished with handling the ITTE, call put_devid_evid() to unmap
+ * the page again.
+ * Must be called with the ITS lock held.
+ */
+static struct vits_itte *get_devid_evid(struct virt_its *its,
+                                        uint32_t devid, uint32_t evid)
+{
+    paddr_t addr = get_itte_address(its, devid, evid);
+
+    if ( addr == ~0 )
+        return NULL;
+
+    return map_guest_pages(its->d, addr, 1);
+}
+
+/* Must be called with the ITS lock held. */
+static void put_devid_evid(struct virt_its *its, struct vits_itte *itte)
+{
+    unmap_guest_pages(itte, 1);
+}
+
+/*
+ * Queries the collection and device tables to get the vCPU and virtual
+ * LPI number for a given guest event. This takes care of mapping the
+ * respective tables and validating the values, since we can't efficiently
+ * protect the ITTs with their less-than-page-size granularity.
+ * Takes and drops the its_lock.
+ */
+bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
+               struct vcpu **vcpu, uint32_t *vlpi)
+{
+    struct vits_itte *itte;
+    int collid;
+    uint32_t _vlpi;
+    struct vcpu *_vcpu;
+
+    spin_lock(&its->its_lock);
+    itte = get_devid_evid(its, devid, evid);
+    if ( !itte )
+    {
+        spin_unlock(&its->its_lock);
+        return false;
+    }
+    collid = itte->collection;
+    _vlpi = itte->vlpi;
+    put_devid_evid(its, itte);
+
+    _vcpu = get_vcpu_from_collection(its, collid);
+    spin_unlock(&its->its_lock);
+
+    if ( !_vcpu )
+        return false;
+
+    if ( collid >= its->max_collections )
+        return false;
+
+    *vcpu = _vcpu;
+    *vlpi = _vlpi;
+
+    return true;
+}
+
+#define SKIP_LPI_UPDATE 1
+bool write_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
+                uint32_t collid, uint32_t vlpi, struct vcpu **vcpu)
+{
+    struct vits_itte *itte;
+
+    if ( collid >= its->max_collections )
+        return false;
+
+    /* TODO: validate vlpi */
+
+    spin_lock(&its->its_lock);
+    itte = get_devid_evid(its, devid, evid);
+    if ( !itte )
+    {
+        spin_unlock(&its->its_lock);
+        return false;
+    }
+
+    itte->collection = collid;
+    if ( vlpi != SKIP_LPI_UPDATE )
+        itte->vlpi = vlpi;
+
+    if ( vcpu )
+        *vcpu = get_vcpu_from_collection(its, collid);
+
+    put_devid_evid(its, itte);
+    spin_unlock(&its->its_lock);
+
+    return true;
+}
+
 /**************************************
  * Functions that handle ITS commands *
  **************************************/
-- 
2.9.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.