[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 5/5] xen: Write CR0, CR3 and CR4 in arch_set_info_guest()



On 05/06/2015 01:12 PM, Razvan Cojocaru wrote:
Arch_set_info_guest() doesn't set CR0, CR3 or CR4. Added code
that does that.

Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
  xen/arch/x86/domain.c             |    3 +++
  xen/arch/x86/hvm/emulate.c        |    6 +++---
  xen/arch/x86/hvm/hvm.c            |   30 ++++++++++++++++--------------
  xen/arch/x86/hvm/svm/nestedsvm.c  |   14 +++++++-------
  xen/arch/x86/hvm/vmx/vmx.c        |    2 +-
  xen/arch/x86/hvm/vmx/vvmx.c       |   12 ++++++------
  xen/include/asm-x86/hvm/support.h |    6 +++---
  7 files changed, 39 insertions(+), 34 deletions(-)

For SVM bits:

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>


diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 1f1550e..fc2a64d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -814,6 +814,9 @@ int arch_set_info_guest(
          for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
              v->arch.debugreg[i] = c(debugreg[i]);
+ hvm_set_cr0(v, c(ctrlreg[0]), 0);
+        hvm_set_cr4(v, c(ctrlreg[4]), 0);
+        hvm_set_cr3(v, c(ctrlreg[3]), 0);
          hvm_set_info_guest(v);
if ( is_hvm_vcpu(v) || v->is_initialised )
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 566eee7..e48c34d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1265,14 +1265,14 @@ static int hvmemul_write_cr(
      switch ( reg )
      {
      case 0:
-        return hvm_set_cr0(val);
+        return hvm_set_cr0(current, val, 1);
      case 2:
          current->arch.hvm_vcpu.guest_cr[2] = val;
          return X86EMUL_OKAY;
      case 3:
-        return hvm_set_cr3(val);
+        return hvm_set_cr3(current, val, 1);
      case 4:
-        return hvm_set_cr4(val);
+        return hvm_set_cr4(current, val, 1);
      default:
          break;
      }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ed0ec9a..d236771 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3074,13 +3074,13 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
      switch ( cr )
      {
      case 0:
-        return hvm_set_cr0(val);
+        return hvm_set_cr0(curr, val, 1);
case 3:
-        return hvm_set_cr3(val);
+        return hvm_set_cr3(curr, val, 1);
case 4:
-        return hvm_set_cr4(val);
+        return hvm_set_cr4(curr, val, 1);
case 8:
          vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
@@ -3177,9 +3177,8 @@ static void hvm_update_cr(struct vcpu *v, unsigned int 
cr, unsigned long value)
      hvm_update_guest_cr(v, cr);
  }
-int hvm_set_cr0(unsigned long value)
+int hvm_set_cr0(struct vcpu *v, unsigned long value, bool_t with_vm_event)
  {
-    struct vcpu *v = current;
      struct domain *d = v->domain;
      unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
      struct page_info *page;
@@ -3278,7 +3277,9 @@ int hvm_set_cr0(unsigned long value)
          hvm_funcs.handle_cd(v, value);
hvm_update_cr(v, 0, value);
-    hvm_event_cr0(value, old_value);
+
+    if ( with_vm_event )
+        hvm_event_cr0(value, old_value);
if ( (value ^ old_value) & X86_CR0_PG ) {
          if ( !nestedhvm_vmswitch_in_progress(v) && 
nestedhvm_vcpu_in_guestmode(v) )
@@ -3294,9 +3295,8 @@ int hvm_set_cr0(unsigned long value)
      return X86EMUL_EXCEPTION;
  }
-int hvm_set_cr3(unsigned long value)
+int hvm_set_cr3(struct vcpu *v, unsigned long value, bool_t with_vm_event)
  {
-    struct vcpu *v = current;
      struct page_info *page;
      unsigned long old;
@@ -3319,7 +3319,9 @@ int hvm_set_cr3(unsigned long value)
      old=v->arch.hvm_vcpu.guest_cr[3];
      v->arch.hvm_vcpu.guest_cr[3] = value;
      paging_update_cr3(v);
-    hvm_event_cr3(value, old);
+
+    if ( with_vm_event )
+        hvm_event_cr3(value, old);
      return X86EMUL_OKAY;
bad_cr3:
@@ -3328,12 +3330,11 @@ int hvm_set_cr3(unsigned long value)
      return X86EMUL_UNHANDLEABLE;
  }
-int hvm_set_cr4(unsigned long value)
+int hvm_set_cr4(struct vcpu *v, unsigned long value, bool_t with_vm_event)
  {
-    struct vcpu *v = current;
      unsigned long old_cr;
- if ( value & hvm_cr4_guest_reserved_bits(v, 0) )
+    if ( value & hvm_cr4_guest_reserved_bits(v, with_vm_event ? 0 : 1) )
      {
          HVM_DBG_LOG(DBG_LEVEL_1,
                      "Guest attempts to set reserved bit in CR4: %lx",
@@ -3359,7 +3360,8 @@ int hvm_set_cr4(unsigned long value)
          goto gpf;
      }
- hvm_update_cr(v, 4, value);
+    if ( with_vm_event )
+        hvm_update_cr(v, 4, value);
      hvm_event_cr4(value, old_cr);
/*
@@ -3824,7 +3826,7 @@ void hvm_task_switch(
          goto out;
- if ( hvm_set_cr3(tss.cr3) )
+    if ( hvm_set_cr3(current, tss.cr3, 1) )
          goto out;
regs->eip = tss.eip;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index be5797a..6f322f4 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -274,7 +274,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct 
cpu_user_regs *regs)
/* CR4 */
      v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4;
-    rc = hvm_set_cr4(n1vmcb->_cr4);
+    rc = hvm_set_cr4(current, n1vmcb->_cr4, 1);
      if (rc != X86EMUL_OKAY)
          gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -283,7 +283,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
          svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]);
      v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
      n1vmcb->rflags &= ~X86_EFLAGS_VM;
-    rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE);
+    rc = hvm_set_cr0(current, n1vmcb->_cr0 | X86_CR0_PE, 1);
      if (rc != X86EMUL_OKAY)
          gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
      svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
@@ -309,7 +309,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct 
cpu_user_regs *regs)
          v->arch.guest_table = pagetable_null();
          /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
      }
-    rc = hvm_set_cr3(n1vmcb->_cr3);
+    rc = hvm_set_cr3(current, n1vmcb->_cr3, 1);
      if (rc != X86EMUL_OKAY)
          gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
@@ -534,7 +534,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) /* CR4 */
      v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
-    rc = hvm_set_cr4(ns_vmcb->_cr4);
+    rc = hvm_set_cr4(current, ns_vmcb->_cr4, 1);
      if (rc != X86EMUL_OKAY)
          gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -542,7 +542,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
      svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
      cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
      v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
-    rc = hvm_set_cr0(cr0);
+    rc = hvm_set_cr0(current, cr0, 1);
      if (rc != X86EMUL_OKAY)
          gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
@@ -558,7 +558,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
          nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
-        rc = hvm_set_cr3(ns_vmcb->_cr3);
+        rc = hvm_set_cr3(current, ns_vmcb->_cr3, 1);
          if (rc != X86EMUL_OKAY)
              gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
      } else if (paging_mode_hap(v->domain)) {
@@ -570,7 +570,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct 
cpu_user_regs *regs)
           * we assume it intercepts page faults.
           */
          /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
-        rc = hvm_set_cr3(ns_vmcb->_cr3);
+        rc = hvm_set_cr3(current, ns_vmcb->_cr3, 1);
          if (rc != X86EMUL_OKAY)
              gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
      } else {
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 562507a..7309c71 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2022,7 +2022,7 @@ static int vmx_cr_access(unsigned long exit_qualification)
                  (VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
                   (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
          HVMTRACE_LONG_1D(LMSW, value);
-        return hvm_set_cr0(value);
+        return hvm_set_cr0(current, value, 1);
      }
      default:
          BUG();
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 01726d5..fb16fc6 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1048,9 +1048,9 @@ static void load_shadow_guest_state(struct vcpu *v)
nvcpu->guest_cr[0] = __get_vvmcs(vvmcs, CR0_READ_SHADOW);
      nvcpu->guest_cr[4] = __get_vvmcs(vvmcs, CR4_READ_SHADOW);
-    hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0));
-    hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
-    hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
+    hvm_set_cr0(current, __get_vvmcs(vvmcs, GUEST_CR0), 1);
+    hvm_set_cr4(current, __get_vvmcs(vvmcs, GUEST_CR4), 1);
+    hvm_set_cr3(current, __get_vvmcs(vvmcs, GUEST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_ENTRY_CONTROLS);
      if ( control & VM_ENTRY_LOAD_GUEST_PAT )
@@ -1250,9 +1250,9 @@ static void load_vvmcs_host_state(struct vcpu *v)
          __vmwrite(vmcs_h2g_field[i].guest_field, r);
      }
- hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0));
-    hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
-    hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
+    hvm_set_cr0(current, __get_vvmcs(vvmcs, HOST_CR0), 1);
+    hvm_set_cr4(current, __get_vvmcs(vvmcs, HOST_CR4), 1);
+    hvm_set_cr3(current, __get_vvmcs(vvmcs, HOST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_EXIT_CONTROLS);
      if ( control & VM_EXIT_LOAD_HOST_PAT )
diff --git a/xen/include/asm-x86/hvm/support.h 
b/xen/include/asm-x86/hvm/support.h
index f55373e..625ad66 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -124,9 +124,9 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long 
value);
/* These functions all return X86EMUL return codes. */
  int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value);
-int hvm_set_cr3(unsigned long value);
-int hvm_set_cr4(unsigned long value);
+int hvm_set_cr0(struct vcpu *v, unsigned long value, bool_t with_vm_event);
+int hvm_set_cr3(struct vcpu *v, unsigned long value, bool_t with_vm_event);
+int hvm_set_cr4(struct vcpu *v, unsigned long value, bool_t with_vm_event);
  int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
  int hvm_msr_write_intercept(
      unsigned int msr, uint64_t msr_content, bool_t event_only);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.