[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-ia64-devel] PV-on-HVM driver for IPF



Hi,

  I'll post a new xen-hyper.patch2, whitch is modified about parameter
checking.

Doi.Tsunehisa@xxxxxxxxxxxxxx wrote:
> Hi Tristan,
> 
>   Thank you for your comment.
> 
> You (Tristan.Gingold) said:
>>>   I will post patches of PV-on-HVM for IPF.
>>>
>>>   We wrote the patch under this consideration:
>>>
>>>    * Expand hvm_op hypercall
>>>      + Introduce HVMOP_setup_shared_info_page
>>>        - A page allocated on HVM-guest OS is swapped original shared_info
>>>          page with this hypercall.
>>>        - In x86 code, original shared_info page is used after pv-on-hvm
>>>          setup with remapping feature in arch depend HYPERVISOR_memory_op.
>>>          But, we can't implement same feature for IPF, thus we select to
>>>          implement with this method.
>> Can you explain why you can't reuse the HYPERVISOR_memory_op hcall ?
>> It isn't clear for me.
> 
>   In x86 code (xen/arch/x86/mm.c), it uses only virtual space of page frame
> allocated by GuestOS, and remaps the vitual space to original share_info
> page frame. But, we can't find same method for IPF.
> 
>   Can you suggest us about the remapping method ?
> 
>> About the patch:
>> +static int
>> +vmx_gnttab_setup_table(unsigned long frame_pa, unsigned long nr_frames)
>> +{
>> +    struct domain *d = current->domain;
>> +    int rc = 0, i;
>> +    unsigned long o_grant_shared, pgaddr;
>> +
>> +    if (nr_frames != NR_GRANT_FRAMES) {
>> +        return -1;
>> You'd better to return -EINVAL.
> 
>   I agree. I'll correct it.
> 
>> +    }
>> +    o_grant_shared = (unsigned long)d->grant_table->shared;
>> +    d->grant_table->shared = (struct grant_entry *)domain_mpa_to_imva(d,
>> frame_pa);
>> +
>> +    /* Copy existing grant table shared into new page */
>> +    if (o_grant_shared) {
>> +        memcpy((void*)d->grant_table->shared,
>> +                (void*)o_grant_shared, PAGE_SIZE * nr_frames);
>> You should check the result of domain_mpa_to_imva, as it could fail.
> 
>   I agree. I'll try to correct it. It returns NULL if it fails, I think.
> Is it correct ?
> 
>> +            if (likely(IS_XEN_HEAP_FRAME(virt_to_page(pgaddr)))) {
>> +                free_domheap_page(virt_to_page(pgaddr));
>> +                free_xenheap_page((void *)pgaddr);
>> +            }
>> +            else {
>> +                put_page(virt_to_page(pgaddr));
>> +            }
>> May create a function to be called by gnttab_setup_table and 
>> setup_shared_info_page.
> 
>   I think that these function are for only VT-i domain, thus I used
> vmx_ prefix. What do you think about it ?
> 
> Thanks,
> -- Tsunehisa Doi
Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@xxxxxxxxxxxxxx>

diff -r 3e54734e55f3 xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Aug 23 13:26:46 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Fri Aug 25 11:08:18 2006 +0900
@@ -2,6 +2,7 @@
 /*
  * vmx_hyparcall.c: handling hypercall from domain
  * Copyright (c) 2005, Intel Corporation.
+ * Copyright (c) 2006, Fujitsu Limited.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -17,6 +18,8 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  *
  *  Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx)
+ *  Tsunehisa Doi (Doi.Tsunehisa@xxxxxxxxxxxxxx)
+ *  Tomonari Horikoshi (t.horikoshi@xxxxxxxxxxxxxx)
  */
 
 #include <xen/config.h>
@@ -34,6 +37,94 @@
 #include <public/version.h>
 #include <asm/dom_fw.h>
 #include <xen/domain.h>
+#include <xen/compile.h>
+#include <xen/event.h>
+
+static int
+vmx_gnttab_setup_table(unsigned long frame_pa, unsigned long nr_frames)
+{
+    struct domain *d = current->domain;
+    int rc = 0, i;
+    unsigned long o_grant_shared, pgaddr;
+
+    if ((nr_frames != NR_GRANT_FRAMES) || (frame_pa & (PAGE_SIZE - 1))) {
+        return -EINVAL;
+    }
+
+    pgaddr = domain_mpa_to_imva(d, frame_pa);
+    if (pgaddr == NULL) {
+        return -EFAULT;
+    }
+
+    o_grant_shared = (unsigned long)d->grant_table->shared;
+    d->grant_table->shared = (struct grant_entry *)pgaddr;
+
+    /* Copy existing grant table shared into new page */
+    if (o_grant_shared) {
+        memcpy((void*)d->grant_table->shared,
+                (void*)o_grant_shared, PAGE_SIZE * nr_frames);
+        /* If original page belongs to xen heap, then relinguish back
+         * to xen heap. Or else, leave to domain itself to decide.
+         */
+        for (i = 0; i < NR_GRANT_FRAMES; i++) {
+            pgaddr = o_grant_shared + PAGE_SIZE * i;
+            if (likely(IS_XEN_HEAP_FRAME(virt_to_page(pgaddr)))) {
+                free_domheap_page(virt_to_page(pgaddr));
+                free_xenheap_page((void *)pgaddr);
+            }
+            else {
+                put_page(virt_to_page(pgaddr));
+            }
+        }
+    }
+    else {
+        memset(d->grant_table->shared, 0, PAGE_SIZE * nr_frames);
+    }
+    return rc;
+}
+
+static int
+vmx_setup_shared_info_page(unsigned long gpa)
+{
+    VCPU *vcpu = current;
+    struct domain *d = vcpu->domain;
+    unsigned long o_info, pgaddr;
+    struct vcpu *v;
+
+    if (gpa & (PAGE_SIZE - 1)) {
+        return -EINVAL;
+    }
+
+    pgaddr = domain_mpa_to_imva(d, gpa);
+    if (pgaddr == NULL) {
+        return -EFAULT;
+    }
+
+    o_info = (u64)d->shared_info;
+    d->shared_info= (shared_info_t *)pgaddr;
+
+    /* Copy existing shared info into new page */
+    if (o_info) {
+        memcpy((void*)d->shared_info, (void*)o_info, PAGE_SIZE);
+        for_each_vcpu(d, v) {
+            v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
+        }
+        /* If original page belongs to xen heap, then relinguish back
+         * to xen heap. Or else, leave to domain itself to decide.
+         */
+        if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info)))) {
+            free_domheap_page(virt_to_page(o_info));
+            free_xenheap_page((void *)o_info);
+        }
+        else {
+            put_page(virt_to_page(o_info));
+        }
+    }
+    else {
+        memset(d->shared_info, 0, PAGE_SIZE);
+    }
+    return 0;
+}
 
 long
 do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
@@ -78,6 +169,25 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
         break;
     }
 
+    case HVMOP_setup_gnttab_table:
+    case HVMOP_setup_shared_info_page:
+    {
+        struct xen_hvm_setup a;
+
+        if (copy_from_guest(&a, arg, 1))
+            return -EFAULT;
+
+        switch (op) {
+        case HVMOP_setup_gnttab_table:
+            printk("vmx_gnttab_setup_table: frame_pa=%#lx,"
+                            "nr_frame=%ld\n", a.arg1, a.arg2);
+            return vmx_gnttab_setup_table(a.arg1, a.arg2);
+        case HVMOP_setup_shared_info_page:
+            printk("vmx_setup_shared_info_page: gpa=0x%lx\n", a.arg1);
+            return vmx_setup_shared_info_page(a.arg1);
+        }
+    }
+
     default:
         DPRINTK("Bad HVM op %ld.\n", op);
         rc = -ENOSYS;
diff -r 3e54734e55f3 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Wed Aug 23 13:26:46 2006 -0600
+++ b/xen/include/public/arch-ia64.h    Fri Aug 25 11:08:18 2006 +0900
@@ -335,6 +335,17 @@ struct vcpu_guest_context {
 };
 typedef struct vcpu_guest_context vcpu_guest_context_t;
 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+// hvm_op expansion
+#define HVMOP_setup_gnttab_table        2
+#define HVMOP_setup_shared_info_page    3
+
+struct xen_hvm_setup {
+    unsigned long arg1;
+    unsigned long arg2;
+};
+typedef struct xen_hvm_setup xen_hvm_setup_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_setup_t);
 
 // dom0 vp op
 #define __HYPERVISOR_ia64_dom0vp_op     __HYPERVISOR_arch_0
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.