[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-ia64-devel] [PATCH 1/2] remove xencomm page size limit(xen side)



# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1185762640 -32400
# Node ID fb5043d15cae94f0686f5ad009d67ff97d0a1c3c
# Parent  4492a0285bae734ee18f6acbb6b3f9c80f153be7
remove xencomm page size limit.
Currently xencomm has page size limit so that a domain with many memory
(e.g. 100GB+) can't be created.
This patch allows that the address array of struct xencomm_desc to cross
page boundary so that the size of struct xencomm_desc can exceed page size.
Note that struct xencomm_desc itself can't page boundary.
PATCHNAME: remove_xencomm_page_size_limit_xen_side

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r 4492a0285bae -r fb5043d15cae xen/arch/ia64/xen/xencomm.c
--- a/xen/arch/ia64/xen/xencomm.c       Fri Jul 27 08:15:16 2007 -0600
+++ b/xen/arch/ia64/xen/xencomm.c       Mon Jul 30 11:30:40 2007 +0900
@@ -34,6 +34,15 @@ static int xencomm_debug = 1; /* extreme
 #else
 #define xencomm_debug 0
 #endif
+
+static int
+xencomm_desc_cross_page_boundary(unsigned long paddr)
+{
+    unsigned long offset = paddr & ~PAGE_MASK;
+    if (offset > PAGE_SIZE - sizeof(struct xencomm_desc))
+        return 1;
+    return 0;
+}
 
 static int
 xencomm_copy_chunk_from(
@@ -85,15 +94,18 @@ xencomm_copy_from_guest(
     unsigned int n,
     unsigned int skip)
 {
+    unsigned long from_ulong = (unsigned long)from;
     struct xencomm_desc *desc;
     unsigned long desc_addr;
+    struct xencomm_desc *desc_paddr;
+    unsigned long *address;
     unsigned int from_pos = 0;
     unsigned int to_pos = 0;
     unsigned int i = 0;
 
     if (xencomm_debug)
         printk("xencomm_copy_from_guest: from=%lx+%u n=%u\n",
-               (unsigned long)from, skip, n);
+               from_ulong, skip, n);
 
     if (XENCOMM_IS_INLINE(from)) {
         unsigned long src_paddr = XENCOMM_INLINE_ADDR(from);
@@ -121,8 +133,11 @@ xencomm_copy_from_guest(
         return 0;
     }
 
+    /* check if struct desc doesn't cross page boundry */
+    if (xencomm_desc_cross_page_boundary(from_ulong))
+        return -EINVAL;
     /* first we need to access the descriptor */
-    desc_addr = xencomm_paddr_to_maddr((unsigned long)from);
+    desc_addr = xencomm_paddr_to_maddr(from_ulong);
     if (desc_addr == 0)
         return -EFAULT;
 
@@ -132,18 +147,26 @@ xencomm_copy_from_guest(
                __func__, desc, desc->magic);
         return -EFAULT;
     }
+    desc_paddr = (struct xencomm_desc *)from;
+    address = &desc->address[i];
 
     /* iterate through the descriptor, copying up to a page at a time */
     while ((to_pos < n) && (i < desc->nr_addrs)) {
-        unsigned long src_paddr = desc->address[i];
+        unsigned long src_paddr;
         unsigned int pgoffset;
         unsigned int chunksz;
         unsigned int chunk_skip;
 
-        if (src_paddr == XENCOMM_INVALID) {
-            i++;
-            continue;
-        }
+        /* When crossing page boundary, machine address must be calculated. */
+        if (((unsigned long)address & ~PAGE_MASK) == 0) {
+            address = (unsigned long*)xencomm_paddr_to_maddr(
+                (unsigned long)&desc_paddr->address[i]);
+            if (address == NULL)
+                return -EFAULT;
+        }
+        src_paddr = *address;
+        if (src_paddr == XENCOMM_INVALID)
+            goto skip_to_next;
 
         pgoffset = src_paddr % PAGE_SIZE;
         chunksz = PAGE_SIZE - pgoffset;
@@ -170,7 +193,9 @@ xencomm_copy_from_guest(
             to_pos += bytes;
         }
 
+    skip_to_next:
         i++;
+        address++;
     }
 
     return n - to_pos;
@@ -226,15 +251,18 @@ xencomm_copy_to_guest(
     unsigned int n,
     unsigned int skip)
 {
+    unsigned long to_ulong = (unsigned long)to;
     struct xencomm_desc *desc;
     unsigned long desc_addr;
+    struct xencomm_desc *desc_paddr;
+    unsigned long *address;
     unsigned int from_pos = 0;
     unsigned int to_pos = 0;
     unsigned int i = 0;
 
     if (xencomm_debug)
         printk ("xencomm_copy_to_guest: to=%lx+%u n=%u\n",
-                (unsigned long)to, skip, n);
+                to_ulong, skip, n);
 
     if (XENCOMM_IS_INLINE(to)) {
         unsigned long dest_paddr = XENCOMM_INLINE_ADDR(to);
@@ -263,8 +291,11 @@ xencomm_copy_to_guest(
         return 0;
     }
 
+    /* check if struct desc doesn't cross page boundry */
+    if (xencomm_desc_cross_page_boundary(to_ulong))
+        return -EINVAL;
     /* first we need to access the descriptor */
-    desc_addr = xencomm_paddr_to_maddr((unsigned long)to);
+    desc_addr = xencomm_paddr_to_maddr(to_ulong);
     if (desc_addr == 0)
         return -EFAULT;
 
@@ -273,18 +304,26 @@ xencomm_copy_to_guest(
         printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
         return -EFAULT;
     }
+    desc_paddr = (struct xencomm_desc*)to;
+    address = &desc->address[i];
 
     /* iterate through the descriptor, copying up to a page at a time */
     while ((from_pos < n) && (i < desc->nr_addrs)) {
-        unsigned long dest_paddr = desc->address[i];
+        unsigned long dest_paddr;
         unsigned int pgoffset;
         unsigned int chunksz;
         unsigned int chunk_skip;
 
-        if (dest_paddr == XENCOMM_INVALID) {
-            i++;
-            continue;
-        }
+        /* When crossing page boundary, machine address must be calculated. */
+        if (((unsigned long)address & ~PAGE_MASK) == 0) {
+            address = (unsigned long*)xencomm_paddr_to_maddr(
+                (unsigned long)&desc_paddr->address[i]);
+            if (address == NULL)
+                return -EFAULT;
+        }
+        dest_paddr = *address;
+        if (dest_paddr == XENCOMM_INVALID)
+            goto skip_to_next;
 
         pgoffset = dest_paddr % PAGE_SIZE;
         chunksz = PAGE_SIZE - pgoffset;
@@ -308,7 +347,9 @@ xencomm_copy_to_guest(
             to_pos += bytes;
         }
 
+    skip_to_next:
         i++;
+        address++;
     }
     return n - from_pos;
 }
@@ -320,15 +361,21 @@ xencomm_add_offset(
     void         *handle,
     unsigned int bytes)
 {
+    unsigned long handle_ulong = (unsigned long)handle;
     struct xencomm_desc *desc;
     unsigned long desc_addr;
+    struct xencomm_desc *desc_paddr;
+    unsigned long *address;
     int i = 0;
 
     if (XENCOMM_IS_INLINE(handle))
         return (void *)((unsigned long)handle + bytes);
 
+    /* check if struct desc doesn't cross page boundry */
+    if (xencomm_desc_cross_page_boundary(handle_ulong))
+        return NULL;
     /* first we need to access the descriptor */
-    desc_addr = xencomm_paddr_to_maddr((unsigned long)handle);
+    desc_addr = xencomm_paddr_to_maddr(handle_ulong);
     if (desc_addr == 0)
         return NULL;
 
@@ -337,18 +384,26 @@ xencomm_add_offset(
         printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
         return NULL;
     }
+    desc_paddr = (struct xencomm_desc*)handle;
+    address = &desc->address[i];
 
     /* iterate through the descriptor incrementing addresses */
     while ((bytes > 0) && (i < desc->nr_addrs)) {
-        unsigned long dest_paddr = desc->address[i];
+        unsigned long dest_paddr;
         unsigned int pgoffset;
         unsigned int chunksz;
         unsigned int chunk_skip;
 
-        if (dest_paddr == XENCOMM_INVALID) {
-            i++;
-            continue;
-        }
+        /* When crossing page boundary, machine address must be calculated. */
+        if (((unsigned long)address & ~PAGE_MASK) == 0) {
+            address = (unsigned long*)xencomm_paddr_to_maddr(
+                (unsigned long)&desc_paddr->address[i]);
+            if (address == NULL)
+                return NULL;
+        }
+        dest_paddr = *address;
+        if (dest_paddr == XENCOMM_INVALID)
+            goto skip_to_next;
 
         pgoffset = dest_paddr % PAGE_SIZE;
         chunksz = PAGE_SIZE - pgoffset;
@@ -356,13 +411,15 @@ xencomm_add_offset(
         chunk_skip = min(chunksz, bytes);
         if (chunk_skip == chunksz) {
             /* exhausted this page */
-            desc->address[i] = XENCOMM_INVALID;
+            *address = XENCOMM_INVALID;
         } else {
-            desc->address[i] += chunk_skip;
+            *address += chunk_skip;
         }
         bytes -= chunk_skip;
-       
-       i++;
+
+    skip_to_next:
+        i++;
+        address++;
     }
     return handle;
 }


-- 
yamahata

Attachment: 15660_fb5043d15cae_remove_xencomm_page_size_limit_xen_side.patch
Description: Text Data

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.