[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 09/11] viridian: add implementation of synthetic interrupt MSRs



This patch introduces an implementation of the SCONTROL, SVERSION, SIEFP,
SIMP, EOM and SINT0-15 SynIC MSRs. No message source is added and, as such,
nothing will yet generate a synthetic interrupt. A subsequent patch will
add an implementation of synthetic timers which will need the infrastructure
added by this patch to deliver expiry messages to the guest.

NOTE: A 'synic' option is added to the toolstack viridian enlightenments
      enumeration but is deliberately not documented as enabling these
      SynIC registers without a message source is only useful for
      debugging.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>

v6:
 - Address further comments from Jan

v4:
 - Address comments from Jan

v3:
 - Add the 'SintPollingModeAvailable' bit in CPUID leaf 3
---
 tools/libxl/libxl.h                    |   6 +
 tools/libxl/libxl_dom.c                |   3 +
 tools/libxl/libxl_types.idl            |   1 +
 xen/arch/x86/hvm/viridian/synic.c      | 253 ++++++++++++++++++++++++-
 xen/arch/x86/hvm/viridian/viridian.c   |  19 ++
 xen/arch/x86/hvm/vlapic.c              |  31 ++-
 xen/include/asm-x86/hvm/hvm.h          |   3 +
 xen/include/asm-x86/hvm/viridian.h     |  27 +++
 xen/include/public/arch-x86/hvm/save.h |   2 +
 xen/include/public/hvm/params.h        |   7 +-
 10 files changed, 344 insertions(+), 8 deletions(-)

diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index a38e5cdba2..a923a380d3 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -318,6 +318,12 @@
  */
 #define LIBXL_HAVE_VIRIDIAN_CRASH_CTL 1
 
+/*
+ * LIBXL_HAVE_VIRIDIAN_SYNIC indicates that the 'synic' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_SYNIC 1
+
 /*
  * LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE indicates that
  * libxl_domain_build_info has the u.hvm.acpi_laptop_slate field.
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 6160991af3..fb758d2ac3 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -317,6 +317,9 @@ static int hvm_set_viridian_features(libxl__gc *gc, 
uint32_t domid,
     if (libxl_bitmap_test(&enlightenments, 
LIBXL_VIRIDIAN_ENLIGHTENMENT_CRASH_CTL))
         mask |= HVMPV_crash_ctl;
 
+    if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_SYNIC))
+        mask |= HVMPV_synic;
+
     if (mask != 0 &&
         xc_hvm_param_set(CTX->xch,
                          domid,
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index b685ac47ac..9860bcaf5f 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -235,6 +235,7 @@ libxl_viridian_enlightenment = 
Enumeration("viridian_enlightenment", [
     (4, "hcall_remote_tlb_flush"),
     (5, "apic_assist"),
     (6, "crash_ctl"),
+    (7, "synic"),
     ])
 
 libxl_hdtype = Enumeration("hdtype", [
diff --git a/xen/arch/x86/hvm/viridian/synic.c 
b/xen/arch/x86/hvm/viridian/synic.c
index fb560bc162..d00a7bfe2c 100644
--- a/xen/arch/x86/hvm/viridian/synic.c
+++ b/xen/arch/x86/hvm/viridian/synic.c
@@ -13,6 +13,7 @@
 
 #include <asm/apic.h>
 #include <asm/hvm/support.h>
+#include <asm/hvm/vlapic.h>
 
 #include "private.h"
 
@@ -28,6 +29,37 @@ typedef union _HV_VP_ASSIST_PAGE
     uint8_t ReservedZBytePadding[PAGE_SIZE];
 } HV_VP_ASSIST_PAGE;
 
+typedef enum HV_MESSAGE_TYPE {
+    HvMessageTypeNone,
+    HvMessageTimerExpired = 0x80000010,
+} HV_MESSAGE_TYPE;
+
+typedef struct HV_MESSAGE_FLAGS {
+    uint8_t MessagePending:1;
+    uint8_t Reserved:7;
+} HV_MESSAGE_FLAGS;
+
+typedef struct HV_MESSAGE_HEADER {
+    HV_MESSAGE_TYPE MessageType;
+    uint16_t Reserved1;
+    HV_MESSAGE_FLAGS MessageFlags;
+    uint8_t PayloadSize;
+    uint64_t Reserved2;
+} HV_MESSAGE_HEADER;
+
+#define HV_MESSAGE_SIZE 256
+#define HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT 30
+
+typedef struct HV_MESSAGE {
+    HV_MESSAGE_HEADER Header;
+    uint64_t Payload[HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT];
+} HV_MESSAGE;
+
+void __init __maybe_unused build_assertions(void)
+{
+    BUILD_BUG_ON(sizeof(HV_MESSAGE) != HV_MESSAGE_SIZE);
+}
+
 void viridian_apic_assist_set(const struct vcpu *v)
 {
     struct viridian_vcpu *vv = v->arch.hvm.viridian;
@@ -83,6 +115,8 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
     struct viridian_vcpu *vv = v->arch.hvm.viridian;
     struct domain *d = v->domain;
 
+    ASSERT(v == current || !v->is_running);
+
     switch ( idx )
     {
     case HV_X64_MSR_EOI:
@@ -107,6 +141,76 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
             viridian_map_guest_page(d, &vv->vp_assist);
         break;
 
+    case HV_X64_MSR_SCONTROL:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        vv->scontrol = val;
+        break;
+
+    case HV_X64_MSR_SVERSION:
+        return X86EMUL_EXCEPTION;
+
+    case HV_X64_MSR_SIEFP:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        vv->siefp = val;
+        break;
+
+    case HV_X64_MSR_SIMP:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        viridian_unmap_guest_page(&vv->simp);
+        vv->simp.msr.raw = val;
+        viridian_dump_guest_page(v, "SIMP", &vv->simp);
+        if ( vv->simp.msr.enabled )
+            viridian_map_guest_page(d, &vv->simp);
+        break;
+
+    case HV_X64_MSR_EOM:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        vv->msg_pending = 0;
+        break;
+
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+    {
+        unsigned int sintx = idx - HV_X64_MSR_SINT0;
+        union viridian_sint_msr new, *vs =
+            &array_access_nospec(vv->sint, sintx);
+        uint8_t vector;
+
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        /* Vectors must be in the range 0x10-0xff inclusive */
+        new.raw = val;
+        if ( new.vector < 0x10 )
+            return X86EMUL_EXCEPTION;
+
+        /*
+         * Invalidate any previous mapping by setting an out-of-range
+         * index before setting the new mapping.
+         */
+        vector = vs->vector;
+        vv->vector_to_sintx[vector] = ARRAY_SIZE(vv->sint);
+
+        vector = new.vector;
+        vv->vector_to_sintx[vector] = sintx;
+
+        printk(XENLOG_G_INFO "%pv: VIRIDIAN SINT%u: vector: %x\n", v, sintx,
+               vector);
+
+        if ( new.polling )
+            __clear_bit(sintx, &vv->msg_pending);
+
+        *vs = new;
+        break;
+    }
+
     default:
         gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
                  __func__, idx, val);
@@ -118,6 +222,9 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
 
 int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
 {
+    const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    const struct domain *d = v->domain;
+
     switch ( idx )
     {
     case HV_X64_MSR_EOI:
@@ -131,13 +238,69 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
         *val = ((uint64_t)icr2 << 32) | icr;
         break;
     }
+
     case HV_X64_MSR_TPR:
         *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI);
         break;
 
     case HV_X64_MSR_VP_ASSIST_PAGE:
-        *val = v->arch.hvm.viridian->vp_assist.msr.raw;
+        *val = vv->vp_assist.msr.raw;
+        break;
+
+    case HV_X64_MSR_SCONTROL:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        *val = vv->scontrol;
+        break;
+
+    case HV_X64_MSR_SVERSION:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        /*
+         * The specification says that the version number is 0x00000001
+         * and should be in the lower 32-bits of the MSR, while the
+         * upper 32-bits are reserved... but it doesn't say what they
+         * should be set to. Assume everything but the bottom bit
+         * should be zero.
+         */
+        *val = 1ul;
+        break;
+
+    case HV_X64_MSR_SIEFP:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        *val = vv->siefp;
+        break;
+
+    case HV_X64_MSR_SIMP:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        *val = vv->simp.msr.raw;
+        break;
+
+    case HV_X64_MSR_EOM:
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        *val = 0;
+        break;
+
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+    {
+        unsigned int sintx = idx - HV_X64_MSR_SINT0;
+        const union viridian_sint_msr *vs =
+            &array_access_nospec(vv->sint, sintx);
+
+        if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+            return X86EMUL_EXCEPTION;
+
+        *val = vs->raw;
         break;
+    }
 
     default:
         gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
@@ -149,6 +312,20 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
 
 int viridian_synic_vcpu_init(const struct vcpu *v)
 {
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    unsigned int i;
+
+    /*
+     * The specification says that all synthetic interrupts must be
+     * initally masked.
+     */
+    for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
+        vv->sint[i].mask = 1;
+
+    /* Initialize the mapping array with invalid values */
+    for ( i = 0; i < ARRAY_SIZE(vv->vector_to_sintx); i++ )
+        vv->vector_to_sintx[i] = ARRAY_SIZE(vv->sint);
+
     return 0;
 }
 
@@ -159,17 +336,71 @@ int viridian_synic_domain_init(const struct domain *d)
 
 void viridian_synic_vcpu_deinit(const struct vcpu *v)
 {
-    viridian_unmap_guest_page(&v->arch.hvm.viridian->vp_assist);
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+
+    viridian_unmap_guest_page(&vv->vp_assist);
+    viridian_unmap_guest_page(&vv->simp);
 }
 
 void viridian_synic_domain_deinit(const struct domain *d)
 {
 }
 
+void viridian_synic_poll_once(const struct vcpu *v)
+{
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+
+    if ( vv->polled )
+        return;
+
+    /* There are currently no message sources */
+
+    vv->polled = true;
+}
+
+void viridian_synic_poll_unblock(const struct vcpu *v)
+{
+    v->arch.hvm.viridian->polled = false;
+}
+
+bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v,
+                                     unsigned int vector)
+{
+    const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    unsigned int sintx = vv->vector_to_sintx[vector];
+    const union viridian_sint_msr *vs =
+        &array_access_nospec(vv->sint, sintx);
+
+    if ( sintx >= ARRAY_SIZE(vv->sint) )
+        return false;
+
+    return vs->auto_eoi;
+}
+
+void viridian_synic_ack_sint(const struct vcpu *v, unsigned int vector)
+{
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    unsigned int sintx = vv->vector_to_sintx[vector];
+
+    ASSERT(v == current);
+
+    if ( sintx < ARRAY_SIZE(vv->sint) )
+        __clear_bit(array_index_nospec(sintx, ARRAY_SIZE(vv->sint)),
+                    &vv->msg_pending);
+}
+
 void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
                                    struct hvm_viridian_vcpu_context *ctxt)
 {
     const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    unsigned int i;
+
+    BUILD_BUG_ON(ARRAY_SIZE(vv->sint) != ARRAY_SIZE(ctxt->sint_msr));
+
+    for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
+        ctxt->sint_msr[i] = vv->sint[i].raw;
+
+    ctxt->simp_msr = vv->simp.msr.raw;
 
     ctxt->apic_assist_pending = vv->apic_assist_pending;
     ctxt->vp_assist_msr = vv->vp_assist.msr.raw;
@@ -180,12 +411,30 @@ void viridian_synic_load_vcpu_ctxt(
 {
     struct viridian_vcpu *vv = v->arch.hvm.viridian;
     struct domain *d = v->domain;
+    unsigned int i;
 
     vv->vp_assist.msr.raw = ctxt->vp_assist_msr;
     if ( vv->vp_assist.msr.enabled )
         viridian_map_guest_page(d, &vv->vp_assist);
 
     vv->apic_assist_pending = ctxt->apic_assist_pending;
+
+    vv->simp.msr.raw = ctxt->simp_msr;
+    if ( vv->simp.msr.enabled )
+        viridian_map_guest_page(d, &vv->simp);
+
+    for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
+    {
+        uint8_t vector;
+
+        vv->sint[i].raw = ctxt->sint_msr[i];
+
+        vector = vv->sint[i].vector;
+        if ( vector < 0x10 )
+            continue;
+
+        vv->vector_to_sintx[vector] = i;
+    }
 }
 
 void viridian_synic_save_domain_ctxt(
diff --git a/xen/arch/x86/hvm/viridian/viridian.c 
b/xen/arch/x86/hvm/viridian/viridian.c
index 2b045ed88f..f3166fbcd0 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -89,6 +89,7 @@ typedef union _HV_CRASH_CTL_REG_CONTENTS
 
 /* Viridian CPUID leaf 3, Hypervisor Feature Indication */
 #define CPUID3D_CRASH_MSRS (1 << 10)
+#define CPUID3D_SINT_POLLING (1 << 17)
 
 /* Viridian CPUID leaf 4: Implementation Recommendations. */
 #define CPUID4A_HCALL_REMOTE_TLB_FLUSH (1 << 2)
@@ -178,6 +179,8 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
             mask.AccessPartitionReferenceCounter = 1;
         if ( viridian_feature_mask(d) & HVMPV_reference_tsc )
             mask.AccessPartitionReferenceTsc = 1;
+        if ( viridian_feature_mask(d) & HVMPV_synic )
+            mask.AccessSynicRegs = 1;
 
         u.mask = mask;
 
@@ -186,6 +189,8 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
 
         if ( viridian_feature_mask(d) & HVMPV_crash_ctl )
             res->d = CPUID3D_CRASH_MSRS;
+        if ( viridian_feature_mask(d) & HVMPV_synic )
+            res->d |= CPUID3D_SINT_POLLING;
 
         break;
     }
@@ -306,8 +311,16 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, 
uint64_t val)
     case HV_X64_MSR_ICR:
     case HV_X64_MSR_TPR:
     case HV_X64_MSR_VP_ASSIST_PAGE:
+    case HV_X64_MSR_SCONTROL:
+    case HV_X64_MSR_SVERSION:
+    case HV_X64_MSR_SIEFP:
+    case HV_X64_MSR_SIMP:
+    case HV_X64_MSR_EOM:
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
         return viridian_synic_wrmsr(v, idx, val);
 
+    case HV_X64_MSR_TSC_FREQUENCY:
+    case HV_X64_MSR_APIC_FREQUENCY:
     case HV_X64_MSR_REFERENCE_TSC:
         return viridian_time_wrmsr(v, idx, val);
 
@@ -378,6 +391,12 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
     case HV_X64_MSR_ICR:
     case HV_X64_MSR_TPR:
     case HV_X64_MSR_VP_ASSIST_PAGE:
+    case HV_X64_MSR_SCONTROL:
+    case HV_X64_MSR_SVERSION:
+    case HV_X64_MSR_SIEFP:
+    case HV_X64_MSR_SIMP:
+    case HV_X64_MSR_EOM:
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
         return viridian_synic_rdmsr(v, idx, val);
 
     case HV_X64_MSR_TSC_FREQUENCY:
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index a1a43cd792..e59dc895d7 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -461,10 +461,15 @@ void vlapic_EOI_set(struct vlapic *vlapic)
 
 void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
 {
-    struct domain *d = vlapic_domain(vlapic);
+    struct vcpu *v = vlapic_vcpu(vlapic);
+    struct domain *d = v->domain;
+
+    /* All synic SINTx vectors are edge triggered */
 
     if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
         vioapic_update_EOI(d, vector);
+    else if ( has_viridian_synic(d) )
+        viridian_synic_ack_sint(v, vector);
 
     hvm_dpci_msi_eoi(d, vector);
 }
@@ -1301,13 +1306,20 @@ int vlapic_has_pending_irq(struct vcpu *v)
     if ( !vlapic_enabled(vlapic) )
         return -1;
 
+    /*
+     * Poll the viridian message queues before checking the IRR since
+     * a synthetic interrupt may be asserted during the poll.
+     */
+    if ( has_viridian_synic(v->domain) )
+        viridian_synic_poll_once(v);
+
     irr = vlapic_find_highest_irr(vlapic);
     if ( irr == -1 )
-        return -1;
+        goto out;
 
     if ( hvm_funcs.virtual_intr_delivery_enabled &&
          !nestedhvm_vcpu_in_guestmode(v) )
-        return irr;
+        goto out;
 
     /*
      * If APIC assist was set then an EOI may have been avoided.
@@ -1328,9 +1340,13 @@ int vlapic_has_pending_irq(struct vcpu *v)
          (irr & 0xf0) <= (isr & 0xf0) )
     {
         viridian_apic_assist_clear(v);
-        return -1;
+        irr = -1;
     }
 
+out:
+    if ( irr == -1 )
+        viridian_synic_poll_unblock(v);
+
     return irr;
 }
 
@@ -1360,8 +1376,13 @@ int vlapic_ack_pending_irq(struct vcpu *v, int vector, 
bool_t force_ack)
     }
 
  done:
-    vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+    if ( !has_viridian_synic(v->domain) ||
+         !viridian_synic_is_auto_eoi_sint(v, vector) )
+        vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+
     vlapic_clear_irr(vector, vlapic);
+    viridian_synic_poll_unblock(v);
+
     return 1;
 }
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 37c3567a57..f67e9dbd12 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -472,6 +472,9 @@ static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, 
u64 *val)
 #define has_viridian_apic_assist(d) \
     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
 
+#define has_viridian_synic(d) \
+    (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_synic))
+
 static inline void hvm_inject_exception(
     unsigned int vector, unsigned int type,
     unsigned int insn_len, int error_code)
diff --git a/xen/include/asm-x86/hvm/viridian.h 
b/xen/include/asm-x86/hvm/viridian.h
index 8146e2fc46..572904c091 100644
--- a/xen/include/asm-x86/hvm/viridian.h
+++ b/xen/include/asm-x86/hvm/viridian.h
@@ -26,10 +26,31 @@ struct viridian_page
     void *ptr;
 };
 
+union viridian_sint_msr
+{
+    uint64_t raw;
+    struct
+    {
+        uint64_t vector:8;
+        uint64_t reserved_preserved1:8;
+        uint64_t mask:1;
+        uint64_t auto_eoi:1;
+        uint64_t polling:1;
+        uint64_t reserved_preserved2:45;
+    };
+};
+
 struct viridian_vcpu
 {
     struct viridian_page vp_assist;
     bool apic_assist_pending;
+    bool polled;
+    unsigned int msg_pending;
+    uint64_t scontrol;
+    uint64_t siefp;
+    struct viridian_page simp;
+    union viridian_sint_msr sint[16];
+    uint8_t vector_to_sintx[256];
     uint64_t crash_param[5];
 };
 
@@ -90,6 +111,12 @@ void viridian_apic_assist_set(const struct vcpu *v);
 bool viridian_apic_assist_completed(const struct vcpu *v);
 void viridian_apic_assist_clear(const struct vcpu *v);
 
+bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v,
+                                     unsigned int vector);
+void viridian_synic_poll_once(const struct vcpu *v);
+void viridian_synic_poll_unblock(const struct vcpu *v);
+void viridian_synic_ack_sint(const struct vcpu *v, unsigned int vector);
+
 #endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
 
 /*
diff --git a/xen/include/public/arch-x86/hvm/save.h 
b/xen/include/public/arch-x86/hvm/save.h
index 40be84ecda..ec3e4df12c 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -602,6 +602,8 @@ struct hvm_viridian_vcpu_context {
     uint64_t vp_assist_msr;
     uint8_t  apic_assist_pending;
     uint8_t  _pad[7];
+    uint64_t simp_msr;
+    uint64_t sint_msr[16];
 };
 
 DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 72f633ef2d..e7e3c7c892 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -146,6 +146,10 @@
 #define _HVMPV_crash_ctl 6
 #define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
 
+/* Enable SYNIC MSRs */
+#define _HVMPV_synic 7
+#define HVMPV_synic (1 << _HVMPV_synic)
+
 #define HVMPV_feature_mask \
         (HVMPV_base_freq | \
          HVMPV_no_freq | \
@@ -153,7 +157,8 @@
          HVMPV_reference_tsc | \
          HVMPV_hcall_remote_tlb_flush | \
          HVMPV_apic_assist | \
-         HVMPV_crash_ctl)
+         HVMPV_crash_ctl | \
+         HVMPV_synic)
 
 #endif
 
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.