[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-ia64-devel] [Patch] Fix for re-enabling PV-on-HVM on IPF



Subject: Re: [Patch] Fix for re-enabling PV-on-HVM on IPF

Hi all,

  I've modified the paches according to Yamahata-san's suggestion.

>   * pvfix.patch
>     - fix for compling PV-on-HVM driver on IPF.
>     - this is same as that I send in last week.
>   * hcall-rval.patch
>     - fix about a return value of hypercall from VT-i domain.

  These patches are the same.

>   * dynmic-gnttab.patch
>     - follow dynamic grant_table for PV-on-HVM on IPF

  This patch is modified to avoid hypervisor crash.

>   * avoid-crash.patch
>     - modify guest_physmap_add_page()

  This patch is obsolated.

  Therefore, I'll re-submit paches as follow:

  * pvfix.patch
    - fix for compling PV-on-HVM driver on IPF.
  * hcall-rval.patch
    - fix about a return value of hypercall from VT-i domain.
  * dynmic-gnttab.patch2
    - follow dynamic grant_table for PV-on-HVM on IPF
    - It has be introduced pre-increment page counter and
      post-set PGC_allocated flag.

  And, it requires the patch of xen-unstable.hg(cs:14089) for
PV-on-HVM enabling.

  In out simple test, PV-on-HVM on IPF works.

Thanks,
- Tsunehisa Doi
# HG changeset patch
# User Doi.Tsunehisa@xxxxxxxxxxxxxx
# Date 1173182115 -32400
# Node ID 59655cf89ac90a1f566bf8a59dbb65cf76d980e5
# Parent  8a58ea36e4207e6d47f8870632ab8fe14e3622cb
Fix for compiling PV-on-HVM driver on IPF

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx>

diff -r 8a58ea36e420 -r 59655cf89ac9 
unmodified_drivers/linux-2.6/platform-pci/xen_support.c
--- a/unmodified_drivers/linux-2.6/platform-pci/xen_support.c   Thu Mar 01 
15:02:09 2007 -0700
+++ b/unmodified_drivers/linux-2.6/platform-pci/xen_support.c   Tue Mar 06 
20:55:15 2007 +0900
@@ -45,7 +45,13 @@ unsigned long __hypercall(unsigned long 
        return __res;
 }
 EXPORT_SYMBOL(__hypercall);
-#endif
+
+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
+{
+       return xencomm_mini_hypercall_grant_table_op(cmd, uop, count);
+}
+EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
+#endif /* __ia64__ */
 
 void xen_machphys_update(unsigned long mfn, unsigned long pfn)
 {
# HG changeset patch
# User Doi.Tsunehisa@xxxxxxxxxxxxxx
# Date 1173183097 -32400
# Node ID c3fdaff60c05896fbbf13fa096eb1e4803c6ae8a
# Parent  4a02f5baf293c5f0dcfe425a42957b61aef619df
Fix about a return value of hypercall from VT-i domain

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx>

diff -r 4a02f5baf293 -r c3fdaff60c05 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S     Tue Mar 06 21:08:31 2007 +0900
+++ b/xen/arch/ia64/vmx/vmx_entry.S     Tue Mar 06 21:11:37 2007 +0900
@@ -477,6 +477,11 @@ GLOBAL_ENTRY(ia64_leave_hypercall)
      * resumes at .work_processed_syscall with p6 set to 1 if the 
extra-work-check
      * needs to be redone.
      */
+    ;;
+    adds r16=PT(R8)+16,r12
+    ;;
+    st8 [r16]=r8
+    ;;
 (pUStk) rsm psr.i
     cmp.eq pLvSys,p0=r0,r0             // pLvSys=1: leave from syscall
 (pUStk) cmp.eq.unc p6,p0=r0,r0         // p6 <- pUStk
@@ -484,6 +489,11 @@ GLOBAL_ENTRY(ia64_leave_hypercall)
     br.call.sptk.many b0=leave_hypervisor_tail
 .work_processed_syscall:
     //clean up bank 1 registers
+    ;;
+    adds r16=PT(R8)+16,r12
+    ;;
+    ld8 r8=[r16]
+    ;;
     mov r16=r0
     mov r17=r0
     mov r18=r0
# HG changeset patch
# User Doi.Tsunehisa@xxxxxxxxxxxxxx
# Date 1173322666 -32400
# Node ID b602dd142385577c143b99e47c56b2a4f9dca90a
# Parent  61eb6589e720d95f0b26e435e9d3cd2b12718700
Follow dynamic grant_table for PV-on-HVM on IPF

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx>

diff -r 61eb6589e720 -r b602dd142385 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Tue Mar 06 21:11:37 2007 +0900
+++ b/xen/arch/ia64/xen/mm.c    Thu Mar 08 11:57:46 2007 +0900
@@ -2087,18 +2087,33 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(
             break;
         case XENMAPSPACE_grant_table:
             spin_lock(&d->grant_table->lock);
+
+            if ((xatp.idx >= nr_grant_frames(d->grant_table)) &&
+                (xatp.idx < max_nr_grant_frames))
+                gnttab_grow_table(d, xatp.idx + 1);
+
             if (xatp.idx < nr_grant_frames(d->grant_table))
-                mfn = virt_to_mfn(d->grant_table->shared) + xatp.idx;
+                mfn = virt_to_mfn(d->grant_table->shared[xatp.idx]);
+
             spin_unlock(&d->grant_table->lock);
             break;
         default:
             break;
         }
 
+        if (mfn == 0) {
+            put_domain(d);
+            return -EINVAL;
+        }
+
         LOCK_BIGLOCK(d);
 
+        /* Check remapping necessity */
+        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+        if (mfn == prev_mfn)
+            goto out;
+
         /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
         if (prev_mfn && mfn_valid(prev_mfn)) {
             if (IS_XEN_HEAP_FRAME(mfn_to_page(prev_mfn)))
                 /* Xen heap frames are simply unhooked from this phys slot. */
@@ -2110,12 +2125,31 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(
 
         /* Unmap from old location, if any. */
         gpfn = get_gpfn_from_mfn(mfn);
-        if (gpfn != INVALID_M2P_ENTRY)
+        if (gpfn != INVALID_M2P_ENTRY) {
+            unsigned long x, nx;
+            /*
+             * guest_physmap_remove_page() (for IPF) descrements page
+             * counter and unset PGC_allocated flag,
+             * so pre-increment page counter and post-set flag inserted
+             */
+            /* pre-increment page counter */
+            get_page(mfn_to_page(mfn), d);
+
             guest_physmap_remove_page(d, gpfn, mfn);
+
+            /* post-set PGC_allocated flag */
+            do {
+                x = mfn_to_page(mfn)->count_info;
+                if ((x & PGC_count_mask) == 0)
+                    goto out;
+                nx = x | PGC_allocated;
+            } while (cmpxchg_acq(&mfn_to_page(mfn)->count_info, x, nx) != x);
+        }
 
         /* Map at new location. */
         guest_physmap_add_page(d, xatp.gpfn, mfn);
 
+    out:
         UNLOCK_BIGLOCK(d);
         
         put_domain(d);
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.