[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XenPPC] [pushed][ppc] xencomm needs to controll how it uses Linux page allocations



changeset:   22038:05a141988d9e
user:        jimix@xxxxxxxxxxxxxxxxxxxxx
date:        Thu Mar 23 16:41:49 2006 -0500
summary:     [ppc] xencomm needs to controll how it uses Linux page allocations

diff -r 95fc918cb989 -r 05a141988d9e arch/powerpc/platforms/xen/hcall.c
--- a/arch/powerpc/platforms/xen/hcall.c        Tue Mar 14 21:06:23 2006 -0500
+++ b/arch/powerpc/platforms/xen/hcall.c        Thu Mar 23 16:41:49 2006 -0500
@@ -1,6 +1,7 @@
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/gfp.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/dom0_ops.h>
 #include <xen/interface/memory.h>
@@ -27,7 +28,8 @@ static int xenppc_privcmd_dom0_op(privcm
        if (kern_op.interface_version != DOM0_INTERFACE_VERSION)
                return -EACCES;
 
-       ret = xencomm_create(&kern_op, sizeof(dom0_op_t), &op_desc);
+       ret = xencomm_create(&kern_op, sizeof(dom0_op_t),
+                            &op_desc, GFP_KERNEL);
        if (ret)
                return ret;
 
@@ -35,50 +37,54 @@ static int xenppc_privcmd_dom0_op(privcm
                case DOM0_GETMEMLIST:
                        ret = xencomm_create(kern_op.u.getmemlist.buffer,
                                                kern_op.u.getmemlist.max_pfns * 
sizeof(unsigned long),
-                                               &desc);
+                                               &desc, GFP_KERNEL);
                        kern_op.u.getmemlist.buffer = (void *)__pa(desc);
                        break;
 #ifdef THIS_WAS_SUCH_A_GOOD_IDEA
                case DOM0_SETDOMAININFO:
                        ret = xencomm_create(kern_op.u.setdomaininfo.ctxt,
-                                               sizeof(vcpu_guest_context_t), 
&desc);
+                                            sizeof(vcpu_guest_context_t),
+                                            &desc, GFP_KERNEL);
                        kern_op.u.setdomaininfo.ctxt = (void *)__pa(desc);
                        break;
 #endif
                case DOM0_READCONSOLE:
                        ret = xencomm_create(kern_op.u.readconsole.buffer,
-                                               kern_op.u.readconsole.count, 
&desc);
+                                            kern_op.u.readconsole.count,
+                                            &desc, GFP_KERNEL);
                        kern_op.u.readconsole.buffer = (void *)__pa(desc);
                        break;
                case DOM0_GETPAGEFRAMEINFO2:
                        ret = xencomm_create(kern_op.u.getpageframeinfo2.array,
-                                               
kern_op.u.getpageframeinfo2.num, &desc);
+                                            kern_op.u.getpageframeinfo2.num,
+                                            &desc, GFP_KERNEL);
                        kern_op.u.getpageframeinfo2.array = (void *)__pa(desc);
                        break;
                case DOM0_PERFCCONTROL:
                        ret = xencomm_create(kern_op.u.perfccontrol.desc,
-                                               
kern_op.u.perfccontrol.nr_counters *
-                                                       
sizeof(dom0_perfc_desc_t),
-                                               &desc);
+                                            kern_op.u.perfccontrol.nr_counters 
*
+                                            sizeof(dom0_perfc_desc_t),
+                                            &desc, GFP_KERNEL);
                        kern_op.u.perfccontrol.desc = (void *)__pa(desc);
                        break;
                case DOM0_GETVCPUCONTEXT:
                        ret = xencomm_create(kern_op.u.getvcpucontext.ctxt,
-                                               sizeof(vcpu_guest_context_t), 
&desc);
+                                            sizeof(vcpu_guest_context_t),
+                                            &desc, GFP_KERNEL);
                        kern_op.u.getvcpucontext.ctxt = (void *)__pa(desc);
                        break;
                case DOM0_GETDOMAININFOLIST:
                        ret = xencomm_create(kern_op.u.getdomaininfolist.buffer,
-                                               
kern_op.u.getdomaininfolist.num_domains *
-                                                       
sizeof(dom0_getdomaininfo_t),
-                                               &desc);
+                                            
kern_op.u.getdomaininfolist.num_domains *
+                                            sizeof(dom0_getdomaininfo_t),
+                                            &desc, GFP_KERNEL);
                        kern_op.u.getdomaininfolist.buffer = (void *)__pa(desc);
                        break;
                case DOM0_PHYSICAL_MEMORY_MAP:
                        ret = 
xencomm_create(kern_op.u.physical_memory_map.memory_map,
-                                               
kern_op.u.physical_memory_map.nr_map_entries *
-                                                       sizeof(struct 
dom0_memory_map_entry),
-                                               &desc);
+                                            
kern_op.u.physical_memory_map.nr_map_entries *
+                                            sizeof(struct 
dom0_memory_map_entry),
+                                            &desc, GFP_KERNEL);
                        kern_op.u.physical_memory_map.memory_map = (void 
*)__pa(desc);
                        break;
 
@@ -147,7 +153,8 @@ static int xenppc_privcmd_memory_op(priv
        if (copy_from_user(&kern_op, user_op, sizeof(xen_memory_reservation_t)))
                return -EFAULT;
 
-       ret = xencomm_create(&kern_op, sizeof(xen_memory_reservation_t), 
&op_desc);
+       ret = xencomm_create(&kern_op, sizeof(xen_memory_reservation_t),
+                            &op_desc, GFP_KERNEL);
        if (ret)
                return ret;
 
@@ -157,8 +164,8 @@ static int xenppc_privcmd_memory_op(priv
                        struct xencomm_desc *desc = NULL;
                        if (kern_op.extent_start) {
                                ret = xencomm_create(kern_op.extent_start, 
-                                                       kern_op.nr_extents * 
sizeof(*kern_op.extent_start),
-                                                       &desc);
+                                                    kern_op.nr_extents * 
sizeof(*kern_op.extent_start),
+                                                    &desc, GFP_KERNEL);
                                if (ret)
                                        goto out;
 
diff -r 95fc918cb989 -r 05a141988d9e drivers/xen/core/xencomm.c
--- a/drivers/xen/core/xencomm.c        Tue Mar 14 21:06:23 2006 -0500
+++ b/drivers/xen/core/xencomm.c        Thu Mar 23 16:41:49 2006 -0500
@@ -91,12 +91,12 @@ static int __xencomm_init(struct xencomm
 }
 
 /* XXX use slab allocator */
-struct xencomm_desc *xencomm_alloc(void)
+static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask)
 {
        struct xencomm_desc *desc;
 
        /* XXX could we call this from irq context? */
-       desc = (struct xencomm_desc *)__get_free_page(GFP_KERNEL);
+       desc = (struct xencomm_desc *)__get_free_page(gfp_mask);
        desc->nr_addrs = (PAGE_SIZE - sizeof(struct xencomm_desc)) /
                        sizeof(*desc->address);
 
@@ -109,7 +109,7 @@ void xencomm_free(struct xencomm_desc *d
                free_page((unsigned long)desc);
 }
 
-int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc 
**ret)
+int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc 
**ret, gfp_t gfp_mask)
 {
        struct xencomm_desc *desc;
        int rc;
@@ -122,7 +122,7 @@ int xencomm_create(void *buffer, unsigne
                printk("%s: %p[%ld]\n", __func__, buffer, bytes);
        }
 
-       desc = xencomm_alloc();
+       desc = xencomm_alloc(gfp_mask);
        if (!desc) {
                printk("%s failure\n", "xencomm_alloc");
                return -ENOMEM;
diff -r 95fc918cb989 -r 05a141988d9e include/asm-powerpc/xen/asm/hypercall.h
--- a/include/asm-powerpc/xen/asm/hypercall.h   Tue Mar 14 21:06:23 2006 -0500
+++ b/include/asm-powerpc/xen/asm/hypercall.h   Thu Mar 23 16:41:49 2006 -0500
@@ -66,7 +66,7 @@ static inline int HYPERVISOR_event_chann
        struct xencomm_desc *desc;
        int rc;
 
-       rc = xencomm_create(op, sizeof(evtchn_op_t), &desc);
+       rc = xencomm_create(op, sizeof(evtchn_op_t), &desc, GFP_ATOMIC);
        if (rc)
                return rc;
 
@@ -106,7 +106,7 @@ static inline int HYPERVISOR_xen_version
                        return -ENOSYS;
        }
 
-       rc = xencomm_create(arg, size, &desc);
+       rc = xencomm_create(arg, size, &desc, GFP_KERNEL);
        if (rc)
                return rc;
 
@@ -122,7 +122,7 @@ static inline int HYPERVISOR_physdev_op(
        struct xencomm_desc *desc;
        int rc;
 
-       rc = xencomm_create(op, sizeof(physdev_op_t), &desc);
+       rc = xencomm_create(op, sizeof(physdev_op_t), &desc, GFP_ATOMIC);
        if (rc)
                return rc;
 
diff -r 95fc918cb989 -r 05a141988d9e include/xen/xencomm.h
--- a/include/xen/xencomm.h     Tue Mar 14 21:06:23 2006 -0500
+++ b/include/xen/xencomm.h     Thu Mar 23 16:41:49 2006 -0500
@@ -27,9 +27,8 @@ struct xencomm_mini {
     uint64_t address[XENCOMM_MINI_ADDRS];
 };
 
-extern struct xencomm_desc *xencomm_alloc(void);
 extern int xencomm_create(void *buffer, unsigned long bytes,
-                       struct xencomm_desc **desc);
+                         struct xencomm_desc **desc, gfp_t type);
 extern void xencomm_free(struct xencomm_desc *desc);
 extern int xencomm_create_mini(void *area, int arealen, void *buffer,
             unsigned long bytes, struct xencomm_desc **ret);



_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.