[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4] x86/hvm: Add per-vcpu evtchn upcalls



HVM guests have always been confined to using the domain callback
via (see HVM_PARAM_CALLBACK_IRQ) to receive event notifications.
This is usually an IOAPIC vector and is only used if the event
channel is bound to vcpu 0.

PVHVM Linux uses a pre-defined interrupt vector for the event
channel upcall, set using HVM_PARAM_CALLBACK_IRQ by ORing in a
special bit (bit 57) into the value (see params.h). However, it
does not assert the interrupt via the emulated local APIC.

This mechanism is not suitable in the general case since Windows
(and potentially other OSes) because they:

- cannot guarantee the same vector for all VCPUs
- do require the upcall to be asserted via the local APIC

This patch adds a new HVM op allowing a guest to specify a local
APIC vector to use as an upcall notification for a specific vcpu
therefore coping with the case of differing vector numbers.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: David Vrabel <david.vrabel@xxxxxxxxxx>
---
v2:
 - Addressed comments from Andrew Cooper
   - Check vector >=16
   - Put hypercall in x86-specific section

v3:
 - Addressed comments from Jan Beulich
   - More verbose check-in comment

v4:
 - Amended check-in comment as suggested by David Vrabel

 xen/arch/x86/hvm/hvm.c          |   39 +++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/irq.c          |    9 +++++++++
 xen/include/asm-x86/hvm/vcpu.h  |    1 +
 xen/include/public/hvm/hvm_op.h |   20 ++++++++++++++++++++
 4 files changed, 69 insertions(+)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e33cd0a..a436f8e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5462,6 +5462,40 @@ static int hvmop_destroy_ioreq_server(
     return rc;
 }
 
+static int hvmop_set_evtchn_upcall_vector(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_evtchn_upcall_vector_t) uop)
+{
+    xen_hvm_set_evtchn_upcall_vector_t op;
+    struct domain *d;
+    struct vcpu *v;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    d = rcu_lock_current_domain();
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    if ( op.vector < 0x10 )
+        goto out;
+
+    rc = -ENOENT;
+    if ( op.vcpu >= d->max_vcpus || (v = d->vcpu[op.vcpu]) == NULL )
+        goto out;
+
+    printk(XENLOG_G_INFO "%pv: %s %u\n", v, __func__, op.vector);
+
+    v->arch.hvm_vcpu.evtchn_upcall_vector = op.vector;
+    rc = 0;
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it: A guest suspended for migration in the
@@ -5521,6 +5555,11 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
         break;
     
+    case HVMOP_set_evtchn_upcall_vector:
+        rc = hvmop_set_evtchn_upcall_vector(
+            guest_handle_cast(arg, xen_hvm_set_evtchn_upcall_vector_t));
+        break;
+    
     case HVMOP_set_param:
     case HVMOP_get_param:
     {
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 35f4f94..3e4c0b4 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -152,6 +152,13 @@ void hvm_isa_irq_deassert(
     spin_unlock(&d->arch.hvm_domain.irq_lock);
 }
 
+static void hvm_set_upcall_irq(struct vcpu *v)
+{
+    uint8_t vector = v->arch.hvm_vcpu.evtchn_upcall_vector;
+
+    vlapic_set_irq(vcpu_vlapic(v), vector, 0);
+}
+
 static void hvm_set_callback_irq_level(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -220,6 +227,8 @@ void hvm_assert_evtchn_irq(struct vcpu *v)
 
     if ( is_hvm_pv_evtchn_vcpu(v) )
         vcpu_kick(v);
+    else if ( v->arch.hvm_vcpu.evtchn_upcall_vector != 0 )
+        hvm_set_upcall_irq(v);
     else if ( v->vcpu_id == 0 )
         hvm_set_callback_irq_level(v);
 }
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 01e0665..edd4523 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -160,6 +160,7 @@ struct hvm_vcpu {
     } u;
 
     struct tasklet      assert_evtchn_irq_tasklet;
+    u8                  evtchn_upcall_vector;
 
     struct nestedvcpu   nvcpu;
 
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index eeb0a60..cfbf85d 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -369,6 +369,26 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
 
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ *                                 channel upcalls on the specified <vcpu>. If 
set,
+ *                                 this vector will be used in preference to 
the
+ *                                 domain callback via (see 
HVM_PARAM_CALLBACK_IRQ)
+ *                                 and hence allows HVM guests to bind event
+ *                                 event channels to a vcpu other than 0.
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_set_evtchn_upcall_vector {
+    uint32_t vcpu;
+    uint8_t vector;
+};
+typedef struct xen_hvm_set_evtchn_upcall_vector 
xen_hvm_set_evtchn_upcall_vector_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
 
 /*
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.