[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-ia64-devel] [PATCH 2/2] remove xencomm page size limit(linux side)



# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1185763112 -32400
# Node ID 9536c4366949dcd4a163d2129e18e319bf6d1ac2
# Parent  b0bf9ba32bfe341af07da97d57572659c920fd30
remove xencomm page size limit.
Currently xencomm has page size limit so that a domain with many memory
(e.g. 100GB~) can't be created.

Now that xencomm of xen side accepts struct xencomm_desc whose address array
crosses page boundary. Thus it isn't necessary to allocate single page
not to cross page boundary. We can allocate exact sized memory.
Note that struct xencomm_desc can't cross page boundary.
For that sake, this implementation depends on the slab allocator
implementation and sizeof(struct xencomm_desc) = 8 = sizeof (void*).
This is true on ia64, but may not be true on 32bit environment.
PATCHNAME: remove_xencomm_page_size_limit

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r b0bf9ba32bfe -r 9536c4366949 arch/ia64/xen/xencomm.c
--- a/arch/ia64/xen/xencomm.c   Fri Jul 27 08:15:50 2007 -0600
+++ b/arch/ia64/xen/xencomm.c   Mon Jul 30 11:38:32 2007 +0900
@@ -158,16 +158,25 @@ xencomm_init_desc(struct xencomm_desc *d
 }
 
 static struct xencomm_desc *
-xencomm_alloc(gfp_t gfp_mask)
-{
-       struct xencomm_desc *desc;
-
-       desc = (struct xencomm_desc *)__get_free_page(gfp_mask);
+xencomm_alloc(gfp_t gfp_mask, void *buffer, unsigned long bytes)
+{
+       struct xencomm_desc *desc;
+       unsigned long buffer_ulong = (unsigned long)buffer;
+       unsigned long start = buffer_ulong & PAGE_MASK;
+       unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
+       unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
+       unsigned long size = sizeof(*desc) +
+               sizeof(desc->address[0]) * nr_addrs;
+
+       /*
+        * NOTE: kmalloc returns at least 64bit aligned value so that
+        *       struct xencomm_desc doesn't cross page boundary.
+        */
+       BUILD_BUG_ON(sizeof(*desc) > sizeof(void*));
+       desc = kmalloc(size, gfp_mask);
        if (desc == NULL)
                panic("%s: page allocation failed\n", __func__);
-
-       desc->nr_addrs = (PAGE_SIZE - sizeof(struct xencomm_desc)) /
-                        sizeof(*desc->address);
+       desc->nr_addrs = nr_addrs;
 
        return desc;
 }
@@ -176,7 +185,7 @@ xencomm_free(struct xencomm_handle *desc
 xencomm_free(struct xencomm_handle *desc)
 {
        if (desc)
-               free_page((unsigned long)__va(desc));
+               kfree(__va(desc));
 }
 
 int
@@ -195,7 +204,7 @@ xencomm_create(void *buffer, unsigned lo
                return 0;
        }
 
-       desc = xencomm_alloc(gfp_mask);
+       desc = xencomm_alloc(gfp_mask, buffer, bytes);
        if (!desc) {
                printk("%s failure\n", "xencomm_alloc");
                return -ENOMEM;


-- 
yamahata

Attachment: 158_9536c4366949_remove_xencomm_page_size_limit.patch
Description: Text Data

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.