|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH V2 22/26] x86/vmsi: Hook delivering remapping format msi to guest
From: Chao Gao <chao.gao@xxxxxxxxx>
In two situations, hypervisor delivers a msi to a hvm guest. One is
when qemu sends a request to hypervisor through XEN_DMOP_inject_msi.
The other is when a physical interrupt arrives and it has been bound
to a guest msi.
For the former, the msi is routed to common vIOMMU layer if it is in
remapping format. For the latter, if the pt irq is bound to a guest
remapping msi, a new remapping msi is constructed based on the binding
information and routed to common vIOMMU layer.
Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx>
---
xen/arch/x86/hvm/irq.c | 11 ++++++++++
xen/arch/x86/hvm/vmsi.c | 14 ++++++++++--
xen/drivers/passthrough/io.c | 52 +++++++++++++++++++++++++++++++++-----------
xen/include/asm-x86/msi.h | 3 +++
4 files changed, 65 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 8625584..abe2f77 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -26,6 +26,7 @@
#include <asm/hvm/domain.h>
#include <asm/hvm/support.h>
#include <asm/msi.h>
+#include <asm/viommu.h>
/* Must be called with hvm_domain->irq_lock hold */
static void assert_gsi(struct domain *d, unsigned ioapic_gsi)
@@ -298,6 +299,16 @@ int hvm_inject_msi(struct domain *d, uint64_t addr,
uint32_t data)
>> MSI_DATA_TRIGGER_SHIFT;
uint8_t vector = data & MSI_DATA_VECTOR_MASK;
+ if ( addr & MSI_ADDR_INTEFORMAT_MASK )
+ {
+ struct irq_remapping_request request;
+
+ irq_request_msi_fill(&request, 0, addr, data);
+ /* Currently, only viommu 0 is supported */
+ viommu_handle_irq_request(d, 0, &request);
+ return 0;
+ }
+
if ( !vector )
{
int pirq = ((addr >> 32) & 0xffffff00) | dest;
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index c4ec0ad..75ceb19 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -114,9 +114,19 @@ void vmsi_deliver_pirq(struct domain *d, const struct
hvm_pirq_dpci *pirq_dpci)
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
- ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI);
+ ASSERT(pirq_dpci->flags & (HVM_IRQ_DPCI_GUEST_MSI |
HVM_IRQ_DPCI_GUEST_MSI_IR));
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI_IR )
+ {
+ struct irq_remapping_request request;
- vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+ irq_request_msi_fill(&request, pirq_dpci->gmsi.intremap.source_id,
+ pirq_dpci->gmsi.intremap.addr,
+ pirq_dpci->gmsi.intremap.data);
+ /* Currently, only viommu 0 is supported */
+ viommu_handle_irq_request(d, 0, &request);
+ }
+ else
+ vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
}
/* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index b4b6e9c..572e60d 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -139,7 +139,9 @@ static void pt_pirq_softirq_reset(struct hvm_pirq_dpci
*pirq_dpci)
bool_t pt_irq_need_timer(uint32_t flags)
{
- return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
+ return !(flags & (HVM_IRQ_DPCI_GUEST_MSI_IR |
+ HVM_IRQ_DPCI_GUEST_MSI |
+ HVM_IRQ_DPCI_TRANSLATE));
}
static int pt_irq_guest_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
@@ -659,7 +661,8 @@ int pt_irq_destroy_bind(
pirq = pirq_info(d, machine_gsi);
pirq_dpci = pirq_dpci(pirq);
- if ( pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI )
+ if ( (pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI_IR) &&
+ (pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI) )
{
unsigned int bus = pt_irq_bind->u.pci.bus;
unsigned int device = pt_irq_bind->u.pci.device;
@@ -824,20 +827,41 @@ static int _hvm_dpci_msi_eoi(struct domain *d,
{
int vector = (long)arg;
- if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
- (pirq_dpci->gmsi.legacy.gvec == vector) )
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI )
{
- int dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
- int dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
+ if ( (pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI) &&
+ (pirq_dpci->gmsi.legacy.gvec == vector) )
+ {
+ int dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
+ int dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
- if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
- dest_mode) )
+ if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
+ dest_mode) )
+ {
+ __msi_pirq_eoi(pirq_dpci);
+ return 1;
+ }
+ }
+ else if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI_IR )
{
- __msi_pirq_eoi(pirq_dpci);
- return 1;
+ int ret;
+ struct irq_remapping_request request;
+ struct irq_remapping_info irq_info;
+
+ irq_request_msi_fill(&request, pirq_dpci->gmsi.intremap.source_id,
+ pirq_dpci->gmsi.intremap.addr,
+ pirq_dpci->gmsi.intremap.data);
+ /* Currently, only viommu 0 is supported */
+ ret = viommu_get_irq_info(d, 0, &request, &irq_info);
+ if ( (!ret) && (irq_info.vector == vector) &&
+ vlapic_match_dest(vcpu_vlapic(current), NULL, 0,
+ irq_info.dest, irq_info.dest_mode) )
+ {
+ __msi_pirq_eoi(pirq_dpci);
+ return 1;
+ }
}
}
-
return 0;
}
@@ -869,14 +893,16 @@ static void hvm_dirq_assist(struct domain *d, struct
hvm_pirq_dpci *pirq_dpci)
{
send_guest_pirq(d, pirq);
- if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
+ if ( pirq_dpci->flags
+ & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_MSI_IR) )
{
spin_unlock(&d->event_lock);
return;
}
}
- if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
+ if ( pirq_dpci->flags
+ & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_MSI_IR) )
{
vmsi_deliver_pirq(d, pirq_dpci);
spin_unlock(&d->event_lock);
diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h
index a5de6a1..c41e2a7 100644
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -49,6 +49,9 @@
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_INTEFORMAT_SHIFT 4
+#define MSI_ADDR_INTEFORMAT_MASK (1 << MSI_ADDR_INTEFORMAT_SHIFT)
+
#define MSI_ADDR_DEST_ID_SHIFT 12
#define MSI_ADDR_DEST_ID_MASK 0x00ff000
#define MSI_ADDR_DEST_ID(dest) (((dest) <<
MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |