[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: add virtual x2apic support for apicv


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Tue, 18 Sep 2012 08:22:10 +0000
  • Delivery-date: Tue, 18 Sep 2012 08:22:17 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jiongxi Li <jiongxi.li@xxxxxxxxx>
# Date 1347912362 -3600
# Node ID c2578dd96b8318e108fff0f340411135dedaa47d
# Parent  713b8849b11afa05f1dde157a3f5086fa3aaad08
xen: add virtual x2apic support for apicv

basically to benefit from apicv, we need clear MSR bitmap for
corresponding x2apic MSRs:
  0x800 - 0x8ff: no read intercept for apicv register virtualization
  TPR,EOI,SELF-IPI: no write intercept for virtual interrupt
    delivery

Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 713b8849b11a -r c2578dd96b83 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Mon Sep 17 21:05:11 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Mon Sep 17 21:06:02 2012 +0100
@@ -643,7 +643,7 @@ static void vmx_set_host_env(struct vcpu
               (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
 }
 
-void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
+void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type)
 {
     unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
 
@@ -658,14 +658,18 @@ void vmx_disable_intercept_for_msr(struc
      */
     if ( msr <= 0x1fff )
     {
-        __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
-        __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
+        if (type & MSR_TYPE_R)
+            __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
+        if (type & MSR_TYPE_W)
+            __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low 
*/
     }
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
     {
         msr &= 0x1fff;
-        __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
-        __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
+        if (type & MSR_TYPE_R)
+            __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high 
*/
+        if (type & MSR_TYPE_W)
+            __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high 
*/
     }
 }
 
@@ -761,13 +765,25 @@ static int construct_vmcs(struct vcpu *v
         v->arch.hvm_vmx.msr_bitmap = msr_bitmap;
         __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
 
-        vmx_disable_intercept_for_msr(v, MSR_FS_BASE);
-        vmx_disable_intercept_for_msr(v, MSR_GS_BASE);
-        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
-        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
-        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
+        vmx_disable_intercept_for_msr(v, MSR_FS_BASE, MSR_TYPE_R | MSR_TYPE_W);
+        vmx_disable_intercept_for_msr(v, MSR_GS_BASE, MSR_TYPE_R | MSR_TYPE_W);
+        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS, MSR_TYPE_R | 
MSR_TYPE_W);
+        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP, MSR_TYPE_R | 
MSR_TYPE_W);
+        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, MSR_TYPE_R | 
MSR_TYPE_W);
         if ( cpu_has_vmx_pat && paging_mode_hap(d) )
-            vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
+            vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | 
MSR_TYPE_W);
+        if ( cpu_has_vmx_apic_reg_virt )
+        {
+            int msr;
+            for (msr = MSR_IA32_APICBASE_MSR; msr <= MSR_IA32_APICBASE_MSR + 
0xff; msr++)
+                vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R);
+        }
+        if ( cpu_has_vmx_virtual_intr_delivery )
+        {
+            vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, MSR_TYPE_W);
+            vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, MSR_TYPE_W);
+            vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, 
MSR_TYPE_W);
+        }
     }
 
     /* I/O access bitmap. */
diff -r 713b8849b11a -r c2578dd96b83 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 17 21:05:11 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 17 21:06:02 2012 +0100
@@ -1962,7 +1962,7 @@ static int vmx_msr_write_intercept(unsig
             for ( ; (rc == 0) && lbr->count; lbr++ )
                 for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
                     if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
-                        vmx_disable_intercept_for_msr(v, lbr->base + i);
+                        vmx_disable_intercept_for_msr(v, lbr->base + i, 
MSR_TYPE_R | MSR_TYPE_W);
         }
 
         if ( (rc < 0) ||
diff -r 713b8849b11a -r c2578dd96b83 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Mon Sep 17 21:05:11 2012 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Mon Sep 17 21:06:02 2012 +0100
@@ -405,7 +405,9 @@ enum vmcs_field {
 
 #define VMCS_VPID_WIDTH 16
 
-void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
+#define MSR_TYPE_R 1
+#define MSR_TYPE_W 2
+void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
 int vmx_read_guest_msr(u32 msr, u64 *val);
 int vmx_write_guest_msr(u32 msr, u64 val);
 int vmx_add_guest_msr(u32 msr);
diff -r 713b8849b11a -r c2578dd96b83 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h   Mon Sep 17 21:05:11 2012 +0100
+++ b/xen/include/asm-x86/msr-index.h   Mon Sep 17 21:06:02 2012 +0100
@@ -291,6 +291,9 @@
 #define MSR_IA32_APICBASE_ENABLE       (1<<11)
 #define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
 #define MSR_IA32_APICBASE_MSR           0x800
+#define MSR_IA32_APICTPR_MSR            0x808
+#define MSR_IA32_APICEOI_MSR            0x80b
+#define MSR_IA32_APICSELF_MSR           0x83f
 
 #define MSR_IA32_UCODE_WRITE           0x00000079
 #define MSR_IA32_UCODE_REV             0x0000008b

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.