| 
    
 [Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/2] x86/vMCE: change address space for incident reporting
 PFNs are purely a software construct. In particular their association
with MFNs can change at any time. Switch to reporting back GFNs through
the hypercall interface (but stick to PFNs / paddr for the MSR one).
(Note that unmmap_broken_page() validly expects a GFN anyway.)
While doing the adjustments, replace an open-coded instance of
PAGE_OFFSET().
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
RFC because of the change to the hypercall interface, for which the
address space is not documented anywhere anyway. Aiui the main purpose
is to get things logged, and in a (system wide, even if maintained by
Dom0) log system wide meaningful addresses are surely of more use.
--- a/xen/arch/x86/cpu/mcheck/mcaction.c
+++ b/xen/arch/x86/cpu/mcheck/mcaction.c
@@ -1,5 +1,6 @@
 #include <xen/types.h>
 #include <xen/sched.h>
+#include <asm/p2m.h>
 #include "mcaction.h"
 #include "vmce.h"
 #include "mce.h"
@@ -43,7 +44,6 @@ mc_memerr_dhandler(struct mca_binfo *bin
     struct mcinfo_global *global = binfo->mig;
     struct domain *d;
     mfn_t mfn;
-    unsigned long gfn;
     uint32_t status;
     int vmce_vcpuid;
     unsigned int mc_vcpuid;
@@ -87,11 +87,13 @@ mc_memerr_dhandler(struct mca_binfo *bin
             BUG_ON( bank->mc_domid == DOMID_COW );
             if ( bank->mc_domid != DOMID_XEN )
             {
+                gfn_t gfn;
+
                 d = rcu_lock_domain_by_id(bank->mc_domid);
                 ASSERT(d);
-                gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
 
-                if ( unmmap_broken_page(d, mfn, _gfn(gfn)) )
+                gfn = mfn_to_gfn(d, mfn);
+                if ( unmmap_broken_page(d, mfn, gfn) )
                 {
                     printk("Unmap broken memory %"PRI_mfn" for DOM%d failed\n",
                            mfn_x(mfn), d->domain_id);
@@ -115,8 +117,7 @@ mc_memerr_dhandler(struct mca_binfo *bin
                 else
                     vmce_vcpuid = mc_vcpuid;
 
-                bank->mc_addr = gfn << PAGE_SHIFT |
-                                (bank->mc_addr & (PAGE_SIZE - 1));
+                bank->mc_addr = gfn_to_gaddr(gfn) | PAGE_OFFSET(bank->mc_addr);
                 if ( fill_vmsr_data(bank, d, global->mc_gstatus, vmce_vcpuid) )
                 {
                     mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d "
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -465,6 +465,7 @@ int fill_vmsr_data(struct mcinfo_bank *m
 {
     struct vcpu *v = d->vcpu[0];
     bool broadcast = (vmce_vcpuid == VMCE_INJECT_BROADCAST);
+    paddr_t addr = mc_bank->mc_addr;
     int ret, err;
 
     if ( mc_bank->mc_domid == DOMID_INVALID )
@@ -479,6 +480,14 @@ int fill_vmsr_data(struct mcinfo_bank *m
     }
 
     /*
+     * Provide a PADDR through the MSR interface, for historical reasons. What
+     * we are being passed is a GADDR (i.e. MADDR for PV and PADDR for HVM).
+     */
+    if ( !paging_mode_translate(d) )
+        addr = pfn_to_paddr(get_gpfn_from_mfn(mfn_x(maddr_to_mfn(addr)))) |
+               PAGE_OFFSET(addr);
+
+    /*
      * vMCE with the actual error information is injected to vCPU0,
      * and, if broadcast is required, we choose to inject less severe
      * vMCEs to other vCPUs. Thus guest can always get the severest
@@ -487,7 +496,7 @@ int fill_vmsr_data(struct mcinfo_bank *m
      * vCPUs will not prevent guest from recovering on those vCPUs.
      */
     ret = vcpu_fill_mc_msrs(v, gstatus, mc_bank->mc_status,
-                            mc_bank->mc_addr, mc_bank->mc_misc);
+                            addr, mc_bank->mc_misc);
     if ( broadcast )
         for_each_vcpu ( d, v )
         {
 
  | 
  
![]()  | 
            
         Lists.xenproject.org is hosted with RackSpace, monitoring our  |