[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [V11 PATCH 16/21] PVH xen: add hypercall support for PVH



This patch expands HVM hcall support to include PVH.

Changes in v8:
  - Carve out PVH support of hvm_op to a small function.

Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Tim Deegan <tim@xxxxxxx>
PV-HVM-Regression-Tested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c      |   80 +++++++++++++++++++++++++++++++++++++------
 xen/arch/x86/x86_64/traps.c |    2 +-
 2 files changed, 70 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 93aa42c..2407396 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3192,6 +3192,17 @@ static long hvm_vcpu_op(
     case VCPUOP_register_vcpu_time_memory_area:
         rc = do_vcpu_op(cmd, vcpuid, arg);
         break;
+
+    case VCPUOP_is_up:
+    case VCPUOP_up:
+    case VCPUOP_initialise:
+        /* PVH fixme: this white list should be removed eventually */
+        if ( is_pvh_vcpu(current) )
+            rc = do_vcpu_op(cmd, vcpuid, arg);
+        else
+            rc = -ENOSYS;
+        break;
+
     default:
         rc = -ENOSYS;
         break;
@@ -3312,6 +3323,24 @@ static hvm_hypercall_t *const 
hvm_hypercall32_table[NR_hypercalls] = {
     HYPERCALL(tmem_op)
 };
 
+/* PVH 32bitfixme. */
+static hvm_hypercall_t *const pvh_hypercall64_table[NR_hypercalls] = {
+    HYPERCALL(platform_op),
+    HYPERCALL(memory_op),
+    HYPERCALL(xen_version),
+    HYPERCALL(console_io),
+    [ __HYPERVISOR_grant_table_op ]  = (hvm_hypercall_t *)hvm_grant_table_op,
+    [ __HYPERVISOR_vcpu_op ]         = (hvm_hypercall_t *)hvm_vcpu_op,
+    HYPERCALL(mmuext_op),
+    HYPERCALL(xsm_op),
+    HYPERCALL(sched_op),
+    HYPERCALL(event_channel_op),
+    [ __HYPERVISOR_physdev_op ]      = (hvm_hypercall_t *)hvm_physdev_op,
+    HYPERCALL(hvm_op),
+    HYPERCALL(sysctl),
+    HYPERCALL(domctl)
+};
+
 int hvm_do_hypercall(struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
@@ -3338,7 +3367,9 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
     if ( (eax & 0x80000000) && is_viridian_domain(curr->domain) )
         return viridian_hypercall(regs);
 
-    if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] )
+    if ( (eax >= NR_hypercalls) ||
+         (is_pvh_vcpu(curr) && !pvh_hypercall64_table[eax]) ||
+         (is_hvm_vcpu(curr) && !hvm_hypercall32_table[eax]) )
     {
         regs->eax = -ENOSYS;
         return HVM_HCALL_completed;
@@ -3353,16 +3384,20 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
                     regs->r10, regs->r8, regs->r9);
 
         curr->arch.hvm_vcpu.hcall_64bit = 1;
-        regs->rax = hvm_hypercall64_table[eax](regs->rdi,
-                                               regs->rsi,
-                                               regs->rdx,
-                                               regs->r10,
-                                               regs->r8,
-                                               regs->r9); 
+        if ( is_pvh_vcpu(curr) )
+            regs->rax = pvh_hypercall64_table[eax](regs->rdi, regs->rsi,
+                                                   regs->rdx, regs->r10,
+                                                   regs->r8, regs->r9);
+        else
+            regs->rax = hvm_hypercall64_table[eax](regs->rdi, regs->rsi,
+                                                   regs->rdx, regs->r10,
+                                                   regs->r8, regs->r9);
         curr->arch.hvm_vcpu.hcall_64bit = 0;
     }
     else
     {
+        ASSERT(!is_pvh_vcpu(curr));   /* PVH 32bitfixme. */
+
         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%x, %x, %x, %x, %x, %x)", eax,
                     (uint32_t)regs->ebx, (uint32_t)regs->ecx,
                     (uint32_t)regs->edx, (uint32_t)regs->esi,
@@ -3760,6 +3795,23 @@ static int hvm_replace_event_channel(struct vcpu *v, 
domid_t remote_domid,
     return 0;
 }
 
+static long pvh_hvm_op(unsigned long op, struct domain *d,
+                       struct xen_hvm_param *harg)
+{
+    long rc = -ENOSYS;
+
+    if ( op == HVMOP_set_param )
+    {
+        if ( harg->index == HVM_PARAM_CALLBACK_IRQ )
+        {
+            hvm_set_callback_via(d, harg->value);
+            hvm_latch_shinfo_size(d);
+            rc = 0;
+        }
+    }
+    return rc;
+}
+
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 {
@@ -3787,12 +3839,18 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             return -ESRCH;
 
         rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto param_fail;
+        if ( is_pv_domain(d) )
+            goto param_done;
 
         rc = xsm_hvm_param(XSM_TARGET, d, op);
         if ( rc )
-            goto param_fail;
+            goto param_done;
+
+        if ( is_pvh_domain(d) )
+        {
+            rc = pvh_hvm_op(op, d, &a);
+            goto param_done;
+        }
 
         if ( op == HVMOP_set_param )
         {
@@ -4001,7 +4059,7 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                     op == HVMOP_set_param ? "set" : "get",
                     a.index, a.value);
 
-    param_fail:
+    param_done:
         rcu_unlock_domain(d);
         break;
     }
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index bc65359..b27d79b 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -622,7 +622,7 @@ static void hypercall_page_initialise_ring3_kernel(void 
*hypercall_page)
 void hypercall_page_initialise(struct domain *d, void *hypercall_page)
 {
     memset(hypercall_page, 0xCC, PAGE_SIZE);
-    if ( is_hvm_domain(d) )
+    if ( !is_pv_domain(d) )
         hvm_hypercall_page_initialise(d, hypercall_page);
     else if ( !is_pv_32bit_domain(d) )
         hypercall_page_initialise_ring3_kernel(hypercall_page);
-- 
1.7.2.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.