[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 5/6] xen/arm: Intercept vPCI config accesses and forward them to emulator



From: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>

This is needed for supporting virtio-pci.

In case when the PCI Host bridge is emulated outside of Xen
(IOREQ server), we need some mechanism to intercept config space
accesses on Xen on Arm, and forward them to the emulator
(for example, virtio backend) via IOREQ request.

Unlike x86, on Arm these accesses are MMIO, there is no CFC/CF8 method
to detect which PCI device is targeted.

In order to not mix PCI passthrough with virtio-pci features we add
one more region to cover the total configuration space for all possible
host bridges which can serve virtio-pci devices for that guest.
We expose one PCI host bridge per virtio backend domain.

To distinguish between virtio-pci devices belonging to PCI host
bridges emulated by device-models running in different backend domains
we also need to calculate a segment in virtio_pci_ioreq_server_get_addr().
For this to work the toolstack is responsible to allocate and assign unique
configuration space range and segment (host_id) within total reserved range
for these device-models. The device-models are responsible for applying
a segment when forming DM op for registering PCI devices with IOREQ Server.

Introduce new CONFIG_VIRTIO_PCI to guard the whole handling.

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
---
 xen/arch/arm/Kconfig                  |  10 +++
 xen/arch/arm/domain.c                 |   2 +-
 xen/arch/arm/{ => include/asm}/vpci.h |  11 +++
 xen/arch/arm/io.c                     |   8 +-
 xen/arch/arm/ioreq.c                  |  19 ++++-
 xen/arch/arm/vpci.c                   | 106 +++++++++++++++++++++++++-
 6 files changed, 146 insertions(+), 10 deletions(-)
 rename xen/arch/arm/{ => include/asm}/vpci.h (75%)

diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
index 2939db429b..9e8d6c4ce2 100644
--- a/xen/arch/arm/Kconfig
+++ b/xen/arch/arm/Kconfig
@@ -190,6 +190,16 @@ config STATIC_SHM
        help
          This option enables statically shared memory on a dom0less system.
 
+config VIRTIO_PCI
+       bool "Support of virtio-pci transport" if EXPERT
+       depends on ARM_64
+       select HAS_PCI
+       select HAS_VPCI
+       select IOREQ_SERVER
+       default n
+       help
+         This option enables support of virtio-pci transport
+
 endmenu
 
 menu "ARM errata workaround via the alternative framework"
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 28e3aaa5e4..140f9bbd58 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -28,9 +28,9 @@
 #include <asm/tee/tee.h>
 #include <asm/vfp.h>
 #include <asm/vgic.h>
+#include <asm/vpci.h>
 #include <asm/vtimer.h>
 
-#include "vpci.h"
 #include "vuart.h"
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
diff --git a/xen/arch/arm/vpci.h b/xen/arch/arm/include/asm/vpci.h
similarity index 75%
rename from xen/arch/arm/vpci.h
rename to xen/arch/arm/include/asm/vpci.h
index 3c713f3fcd..54d083c67b 100644
--- a/xen/arch/arm/vpci.h
+++ b/xen/arch/arm/include/asm/vpci.h
@@ -30,6 +30,17 @@ static inline unsigned int 
domain_vpci_get_num_mmio_handlers(struct domain *d)
 }
 #endif
 
+#ifdef CONFIG_VIRTIO_PCI
+bool virtio_pci_ioreq_server_get_addr(const struct domain *d,
+                                      paddr_t gpa, uint64_t *addr);
+#else
+static inline bool virtio_pci_ioreq_server_get_addr(const struct domain *d,
+                                                    paddr_t gpa, uint64_t 
*addr)
+{
+    return false;
+}
+#endif
+
 #endif /* __ARCH_ARM_VPCI_H__ */
 
 /*
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 96c740d563..5c3e03e30d 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -26,6 +26,7 @@ static enum io_state handle_read(const struct mmio_handler 
*handler,
 {
     const struct hsr_dabt dabt = info->dabt;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
+    int ret;
     /*
      * Initialize to zero to avoid leaking data if there is an
      * implementation error in the emulation (such as not correctly
@@ -33,8 +34,9 @@ static enum io_state handle_read(const struct mmio_handler 
*handler,
      */
     register_t r = 0;
 
-    if ( !handler->ops->read(v, info, &r, handler->priv) )
-        return IO_ABORT;
+    ret = handler->ops->read(v, info, &r, handler->priv);
+    if ( ret != IO_HANDLED )
+        return ret != IO_RETRY ? IO_ABORT : ret;
 
     r = sign_extend(dabt, r);
 
@@ -53,7 +55,7 @@ static enum io_state handle_write(const struct mmio_handler 
*handler,
 
     ret = handler->ops->write(v, info, get_user_reg(regs, dabt.reg),
                               handler->priv);
-    return ret ? IO_HANDLED : IO_ABORT;
+    return ret != IO_HANDLED && ret != IO_RETRY ? IO_ABORT : ret;
 }
 
 /* This function assumes that mmio regions are not overlapped */
diff --git a/xen/arch/arm/ioreq.c b/xen/arch/arm/ioreq.c
index 5df755b48b..fd4cc755b6 100644
--- a/xen/arch/arm/ioreq.c
+++ b/xen/arch/arm/ioreq.c
@@ -10,6 +10,7 @@
 
 #include <asm/traps.h>
 #include <asm/ioreq.h>
+#include <asm/vpci.h>
 
 #include <public/hvm/ioreq.h>
 
@@ -193,12 +194,24 @@ bool arch_ioreq_server_get_type_addr(const struct domain 
*d,
                                      uint8_t *type,
                                      uint64_t *addr)
 {
+    uint64_t pci_addr;
+
     if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
         return false;
 
-    *type = (p->type == IOREQ_TYPE_PIO) ?
-             XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
-    *addr = p->addr;
+    if ( p->type == IOREQ_TYPE_COPY &&
+         virtio_pci_ioreq_server_get_addr(d, p->addr, &pci_addr) )
+    {
+        /* PCI config data cycle */
+        *type = XEN_DMOP_IO_RANGE_PCI;
+        *addr = pci_addr;
+    }
+    else
+    {
+        *type = (p->type == IOREQ_TYPE_PIO) ?
+                 XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
+        *addr = p->addr;
+    }
 
     return true;
 }
diff --git a/xen/arch/arm/vpci.c b/xen/arch/arm/vpci.c
index 3bc4bb5508..1de4c3e71b 100644
--- a/xen/arch/arm/vpci.c
+++ b/xen/arch/arm/vpci.c
@@ -2,9 +2,12 @@
 /*
  * xen/arch/arm/vpci.c
  */
+#include <xen/ioreq.h>
 #include <xen/sched.h>
+#include <xen/sizes.h>
 #include <xen/vpci.h>
 
+#include <asm/ioreq.h>
 #include <asm/mmio.h>
 
 static pci_sbdf_t vpci_sbdf_from_gpa(const struct pci_host_bridge *bridge,
@@ -24,6 +27,27 @@ static pci_sbdf_t vpci_sbdf_from_gpa(const struct 
pci_host_bridge *bridge,
     return sbdf;
 }
 
+bool virtio_pci_ioreq_server_get_addr(const struct domain *d,
+                                      paddr_t gpa, uint64_t *addr)
+{
+    pci_sbdf_t sbdf;
+
+    if ( !has_vpci(d) )
+        return false;
+
+    if ( gpa < GUEST_VIRTIO_PCI_ECAM_BASE ||
+         gpa >= GUEST_VIRTIO_PCI_ECAM_BASE + GUEST_VIRTIO_PCI_TOTAL_ECAM_SIZE )
+        return false;
+
+    sbdf.sbdf = VPCI_ECAM_BDF((gpa - GUEST_VIRTIO_PCI_ECAM_BASE) %
+        GUEST_VIRTIO_PCI_HOST_ECAM_SIZE);
+    sbdf.seg = (gpa - GUEST_VIRTIO_PCI_ECAM_BASE) /
+        GUEST_VIRTIO_PCI_HOST_ECAM_SIZE;
+    *addr = ((uint64_t)sbdf.sbdf << 32) | ECAM_REG_OFFSET(gpa);
+
+    return true;
+}
+
 static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
                           register_t *r, void *p)
 {
@@ -36,12 +60,12 @@ static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
                         1U << info->dabt.size, &data) )
     {
         *r = data;
-        return 1;
+        return IO_HANDLED;
     }
 
     *r = ~0ul;
 
-    return 0;
+    return IO_ABORT;
 }
 
 static int vpci_mmio_write(struct vcpu *v, mmio_info_t *info,
@@ -59,6 +83,61 @@ static const struct mmio_handler_ops vpci_mmio_handler = {
     .write = vpci_mmio_write,
 };
 
+#ifdef CONFIG_VIRTIO_PCI
+static int virtio_pci_mmio_read(struct vcpu *v, mmio_info_t *info,
+                                register_t *r, void *p)
+{
+    const uint8_t access_size = (1 << info->dabt.size) * 8;
+    const uint64_t access_mask = GENMASK_ULL(access_size - 1, 0);
+    int rc = IO_HANDLED;
+
+    ASSERT(!is_hardware_domain(v->domain));
+
+    if ( domain_has_ioreq_server(v->domain) )
+    {
+        rc = try_fwd_ioserv(guest_cpu_user_regs(), v, info);
+        if ( rc == IO_HANDLED )
+        {
+            *r = v->io.req.data;
+            v->io.req.state = STATE_IOREQ_NONE;
+            return IO_HANDLED;
+        }
+        else if ( rc == IO_UNHANDLED )
+            rc = IO_HANDLED;
+    }
+
+    *r = access_mask;
+    return rc;
+}
+
+static int virtio_pci_mmio_write(struct vcpu *v, mmio_info_t *info,
+                                 register_t r, void *p)
+{
+    int rc = IO_HANDLED;
+
+    ASSERT(!is_hardware_domain(v->domain));
+
+    if ( domain_has_ioreq_server(v->domain) )
+    {
+        rc = try_fwd_ioserv(guest_cpu_user_regs(), v, info);
+        if ( rc == IO_HANDLED )
+        {
+            v->io.req.state = STATE_IOREQ_NONE;
+            return IO_HANDLED;
+        }
+        else if ( rc == IO_UNHANDLED )
+            rc = IO_HANDLED;
+    }
+
+    return rc;
+}
+
+static const struct mmio_handler_ops virtio_pci_mmio_handler = {
+    .read  = virtio_pci_mmio_read,
+    .write = virtio_pci_mmio_write,
+};
+#endif
+
 static int vpci_setup_mmio_handler_cb(struct domain *d,
                                       struct pci_host_bridge *bridge)
 {
@@ -90,9 +169,17 @@ int domain_vpci_init(struct domain *d)
             return ret;
     }
     else
+    {
         register_mmio_handler(d, &vpci_mmio_handler,
                               GUEST_VPCI_ECAM_BASE, GUEST_VPCI_ECAM_SIZE, 
NULL);
 
+#ifdef CONFIG_VIRTIO_PCI
+        register_mmio_handler(d, &virtio_pci_mmio_handler,
+                              GUEST_VIRTIO_PCI_ECAM_BASE,
+                              GUEST_VIRTIO_PCI_TOTAL_ECAM_SIZE, NULL);
+#endif
+    }
+
     return 0;
 }
 
@@ -105,6 +192,8 @@ static int vpci_get_num_handlers_cb(struct domain *d,
 
 unsigned int domain_vpci_get_num_mmio_handlers(struct domain *d)
 {
+    unsigned int count;
+
     if ( !has_vpci(d) )
         return 0;
 
@@ -125,7 +214,18 @@ unsigned int domain_vpci_get_num_mmio_handlers(struct 
domain *d)
      * For guests each host bridge requires one region to cover the
      * configuration space. At the moment, we only expose a single host bridge.
      */
-    return 1;
+    count = 1;
+
+    /*
+     * In order to not mix PCI passthrough with virtio-pci features we add
+     * one more region to cover the total configuration space for all possible
+     * host bridges which can serve virtio devices for that guest.
+     * We expose one host bridge per virtio backend domain.
+     */
+    if ( IS_ENABLED(CONFIG_VIRTIO_PCI) )
+        count++;
+
+    return count;
 }
 
 /*
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.