[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 8/8] viridian: introduce struct viridian_page



The 'vp_assist' page is currently an example of a guest page which needs to
be kept mapped throughout the life-time of a guest, but there are other
such examples in the specifiction [1]. This patch therefore introduces a
generic 'viridian_page' type and converts the current vp_assist/apic_assist
related code to use it. Subsequent patches implementing other enlightments
can then also make use of it.

This patch also renames the 'vp_assist_pending' field in struct
hvm_viridian_vcpu_context to 'apic_assist_pending' to more accurately
reflect its meaning. The term 'vp_assist' applies to the whole page rather
than just the EOI-avoidance enlightenment. New versons of the specification
have defined data structures for other enlightenments within the same page.

No functional change.

[1] 
https://github.com/MicrosoftDocs/Virtualization-Documentation/raw/live/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v5.0C.pdf

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 tools/misc/xen-hvmctx.c                |  4 +--
 xen/arch/x86/hvm/viridian/synic.c      | 47 ++++++++++++++++------------------
 xen/include/asm-x86/hvm/viridian.h     | 13 ++++++----
 xen/include/public/arch-x86/hvm/save.h |  2 +-
 4 files changed, 33 insertions(+), 33 deletions(-)

diff --git a/tools/misc/xen-hvmctx.c b/tools/misc/xen-hvmctx.c
index 40e77851be..a4efadf307 100644
--- a/tools/misc/xen-hvmctx.c
+++ b/tools/misc/xen-hvmctx.c
@@ -370,9 +370,9 @@ static void dump_viridian_vcpu(void)
 {
     HVM_SAVE_TYPE(VIRIDIAN_VCPU) p;
     READ(p);
-    printf("    VIRIDIAN_VCPU: vp_assist_msr 0x%llx, vp_assist_pending %s\n",
+    printf("    VIRIDIAN_VCPU: vp_assist_msr 0x%llx, apic_assist_pending %s\n",
           (unsigned long long) p.vp_assist_msr,
-          p.vp_assist_pending ? "true" : "false");
+          p.apic_assist_pending ? "true" : "false");
 }
 
 static void dump_vmce_vcpu(void)
diff --git a/xen/arch/x86/hvm/viridian/synic.c 
b/xen/arch/x86/hvm/viridian/synic.c
index 735c4c897c..f9aa8fd898 100644
--- a/xen/arch/x86/hvm/viridian/synic.c
+++ b/xen/arch/x86/hvm/viridian/synic.c
@@ -36,14 +36,13 @@ static void dump_vp_assist(const struct vcpu *v)
            v, (unsigned long)va->fields.pfn);
 }
 
-static void initialize_vp_assist(struct vcpu *v)
+static void initialize_guest_page(struct vcpu *v, struct viridian_page *vp)
 {
     struct domain *d = v->domain;
-    unsigned long gmfn = v->arch.hvm.viridian.vp_assist.msr.fields.pfn;
+    unsigned long gmfn = vp->msr.fields.pfn;
     struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
-    HV_VP_ASSIST_PAGE *ptr;
 
-    ASSERT(!v->arch.hvm.viridian.vp_assist.ptr);
+    ASSERT(!vp->ptr);
 
     if ( !page )
         goto fail;
@@ -54,16 +53,14 @@ static void initialize_vp_assist(struct vcpu *v)
         goto fail;
     }
 
-    ptr = __map_domain_page_global(page);
-    if ( !ptr )
+    vp->ptr = __map_domain_page_global(page);
+    if ( !vp->ptr )
     {
         put_page_and_type(page);
         goto fail;
     }
 
-    clear_page(ptr);
-
-    v->arch.hvm.viridian.vp_assist.ptr = ptr;
+    clear_page(vp->ptr);
     return;
 
  fail:
@@ -71,19 +68,18 @@ static void initialize_vp_assist(struct vcpu *v)
              gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
 }
 
-static void teardown_vp_assist(struct vcpu *v)
+static void teardown_guest_page(struct viridian_page *vp)
 {
-    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr;
     struct page_info *page;
 
-    if ( !ptr )
+    if ( !vp->ptr )
         return;
 
-    v->arch.hvm.viridian.vp_assist.ptr = NULL;
+    page = mfn_to_page(domain_page_map_to_mfn(vp->ptr));
 
-    page = mfn_to_page(domain_page_map_to_mfn(ptr));
+    unmap_domain_page_global(vp->ptr);
+    vp->ptr = NULL;
 
-    unmap_domain_page_global(ptr);
     put_page_and_type(page);
 }
 
@@ -99,10 +95,10 @@ void viridian_apic_assist_set(struct vcpu *v)
      * wrong and the VM will most likely hang so force a crash now
      * to make the problem clear.
      */
-    if ( v->arch.hvm.viridian.vp_assist.pending )
+    if ( v->arch.hvm.viridian.apic_assist_pending )
         domain_crash(v->domain);
 
-    v->arch.hvm.viridian.vp_assist.pending = true;
+    v->arch.hvm.viridian.apic_assist_pending = true;
     ptr->ApicAssist.no_eoi = 1;
 }
 
@@ -113,11 +109,11 @@ bool viridian_apic_assist_completed(struct vcpu *v)
     if ( !ptr )
         return false;
 
-    if ( v->arch.hvm.viridian.vp_assist.pending &&
+    if ( v->arch.hvm.viridian.apic_assist_pending &&
          !ptr->ApicAssist.no_eoi )
     {
         /* An EOI has been avoided */
-        v->arch.hvm.viridian.vp_assist.pending = false;
+        v->arch.hvm.viridian.apic_assist_pending = false;
         return true;
     }
 
@@ -132,7 +128,7 @@ void viridian_apic_assist_clear(struct vcpu *v)
         return;
 
     ptr->ApicAssist.no_eoi = 0;
-    v->arch.hvm.viridian.vp_assist.pending = false;
+    v->arch.hvm.viridian.apic_assist_pending = false;
 }
 
 int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
@@ -158,11 +154,12 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
         break;
 
     case HV_X64_MSR_VP_ASSIST_PAGE:
-        teardown_vp_assist(v); /* release any previous mapping */
+        /* release any previous mapping */
+        teardown_guest_page(&v->arch.hvm.viridian.vp_assist);
         v->arch.hvm.viridian.vp_assist.msr.raw = val;
         dump_vp_assist(v);
         if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
-            initialize_vp_assist(v);
+            initialize_guest_page(v, &v->arch.hvm.viridian.vp_assist);
         break;
 
     default:
@@ -211,7 +208,7 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
 void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
                                    struct hvm_viridian_vcpu_context *ctxt)
 {
-    ctxt->vp_assist_pending = v->arch.hvm.viridian.vp_assist.pending;
+    ctxt->apic_assist_pending = v->arch.hvm.viridian.apic_assist_pending;
     ctxt->vp_assist_msr = v->arch.hvm.viridian.vp_assist.msr.raw;
 }
 
@@ -220,9 +217,9 @@ void viridian_synic_load_vcpu_ctxt(struct vcpu *v,
 {
     v->arch.hvm.viridian.vp_assist.msr.raw = ctxt->vp_assist_msr;
     if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
-        initialize_vp_assist(v);
+        initialize_guest_page(v, &v->arch.hvm.viridian.vp_assist);
 
-    v->arch.hvm.viridian.vp_assist.pending = !!ctxt->vp_assist_pending;
+    v->arch.hvm.viridian.apic_assist_pending = !!ctxt->apic_assist_pending;
 }
 
 /*
diff --git a/xen/include/asm-x86/hvm/viridian.h 
b/xen/include/asm-x86/hvm/viridian.h
index 66aa24c43e..8d5690582e 100644
--- a/xen/include/asm-x86/hvm/viridian.h
+++ b/xen/include/asm-x86/hvm/viridian.h
@@ -88,13 +88,16 @@ union viridian_page_msr
     } fields;
 };
 
+struct viridian_page
+{
+    union viridian_page_msr msr;
+    void *ptr;
+};
+
 struct viridian_vcpu
 {
-    struct {
-        union viridian_page_msr msr;
-        void *ptr;
-        bool pending;
-    } vp_assist;
+    struct viridian_page vp_assist;
+    bool apic_assist_pending;
     uint64_t crash_param[5];
 };
 
diff --git a/xen/include/public/arch-x86/hvm/save.h 
b/xen/include/public/arch-x86/hvm/save.h
index 4691d4d4aa..80e762c335 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -600,7 +600,7 @@ DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct 
hvm_viridian_domain_context);
 
 struct hvm_viridian_vcpu_context {
     uint64_t vp_assist_msr;
-    uint8_t  vp_assist_pending;
+    uint8_t  apic_assist_pending;
     uint8_t  _pad[7];
 };
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.