[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN PATCH v1 3/4] x86:hvm: guard calls to nestedhvm routines



Check whether nested HVM is enabled for domain before calling nestedhvm_vcpu_*()
and other not already guarded nestedhvm functions.

Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c      | 6 ++++--
 xen/arch/x86/hvm/svm/asid.c | 2 +-
 xen/arch/x86/hvm/svm/svm.c  | 6 +++---
 3 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2f31180b6f..5abbf7029d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1658,7 +1658,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
     return 0;
 
  fail6:
-    nestedhvm_vcpu_destroy(v);
+    if ( nestedhvm_enabled(d) )
+        nestedhvm_vcpu_destroy(v);
  fail5:
     free_compat_arg_xlat(v);
  fail4:
@@ -1682,7 +1683,8 @@ void hvm_vcpu_destroy(struct vcpu *v)
     if ( hvm_altp2m_supported() )
         altp2m_vcpu_destroy(v);
 
-    nestedhvm_vcpu_destroy(v);
+    if ( nestedhvm_enabled(v->domain) )
+        nestedhvm_vcpu_destroy(v);
 
     free_compat_arg_xlat(v);
 
diff --git a/xen/arch/x86/hvm/svm/asid.c b/xen/arch/x86/hvm/svm/asid.c
index 7977a8e86b..05c8971714 100644
--- a/xen/arch/x86/hvm/svm/asid.c
+++ b/xen/arch/x86/hvm/svm/asid.c
@@ -30,7 +30,7 @@ void svm_asid_handle_vmrun(void)
     struct vcpu *curr = current;
     struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
     struct hvm_vcpu_asid *p_asid =
-        nestedhvm_vcpu_in_guestmode(curr)
+        ( nestedhvm_enabled(curr->domain) && nestedhvm_vcpu_in_guestmode(curr) 
)
         ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid;
     bool need_flush = hvm_asid_handle_vmenter(p_asid);
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 09ac138691..d7d91427fd 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -795,7 +795,7 @@ static void cf_check svm_set_tsc_offset(struct vcpu *v, u64 
offset, u64 at_tsc)
     n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
     n2vmcb = vcpu_nestedhvm(v).nv_n2vmcx;
 
-    if ( nestedhvm_vcpu_in_guestmode(v) )
+    if ( nestedhvm_enabled(d) && nestedhvm_vcpu_in_guestmode(v) )
     {
         n2_tsc_offset = vmcb_get_tsc_offset(n2vmcb) -
                         vmcb_get_tsc_offset(n1vmcb);
@@ -2172,7 +2172,7 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
         return;
     }
 
-    if ( !nestedsvm_vmcb_map(v, vmcbaddr) )
+    if ( nestedhvm_enabled(v->domain) && !nestedsvm_vmcb_map(v, vmcbaddr) )
     {
         gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #GP\n");
         hvm_inject_hw_exception(X86_EXC_GP, 0);
@@ -2190,7 +2190,7 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
     struct page_info *page;
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
 
-    if ( !nestedsvm_vmcb_map(v, vmcbaddr) )
+    if ( nestedhvm_enabled(v->domain) && !nestedsvm_vmcb_map(v, vmcbaddr) )
         return NULL;
 
     /* Need to translate L1-GPA to MPA */
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.