[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XenPPC] [RFC] 64mb Chunk Allocator



Initial code, walking device-tree to setup chunk bit-vector,
and allocating RMA memory for Dom0 using 1st available chunk. 

note - 'diff' parms are probably not standard.

--- setup.c.cow 2006-07-07 17:34:16.000000000 -0400
+++ setup.c     2006-07-24 16:03:34.000000000 -0400
@@ -44,6 +44,12 @@
 #include "of-devtree.h"
 
 #define DEBUG
+
+#define CHUNK_SIZE     0x4000000
+/* track available 64mb physical memory chunks */
+/* 64 chunks cover 4gb; this vector tracks 16gb ***FIXME */
+unsigned long chunk_vector[4] = {0,0,0,0};
+
 unsigned long xenheap_phys_end;
 
 /* opt_noht: If true, Hyperthreading is ignored. */
@@ -170,6 +176,11 @@
     ulong bytes = 0;
     ulong freemem = (ulong)_end;
     ulong oftree_end;
+    ulong nr;
+
+    ofdn_t n;
+    int i, p_len;
+    ulong membuf[16], memstart, memlen;
 
     memcpy(0, exception_vectors, exception_vectors_end - exception_vectors);
     synchronize_caches(0, exception_vectors_end - exception_vectors);
@@ -265,6 +276,33 @@
     ofd_walk((void *)oftree, OFD_ROOT, ofd_dump_props, OFD_DUMP_ALL);
 #endif
 
+    n = ofd_node_find((void *)oftree, "/memory");
+    if (n <= 0){
+        printk("ofd_node_find(oftree, '/memory')  failed\n");
+    } else {
+        p_len = ofd_getprop((void *)oftree, n, "reg", membuf, sizeof (membuf));
+        if (p_len <= 0){
+            printk("ofd_getprop(oftree, n, 'reg', ... failed\n");
+        } else {
+            printk("/memory/reg  0x%x\n", p_len);
+            printk("0x%016lx 0x%016lx\n", membuf[0], membuf[1]);
+            printk("0x%016lx 0x%016lx\n", membuf[2], membuf[3]);
+        }
+    }
+
+    /* 1st chunk goes to hypervisor; remaining chunks to guests */
+    /* (need to verify that eomem can be contained within chunk_vector) */
+    for (i = 0; i < (p_len / sizeof(ulong)); i += 2){
+        memstart = membuf[i]; memlen = membuf[i+1];
+        for (nr = memstart/CHUNK_SIZE; nr < ((memstart + memlen)/CHUNK_SIZE); 
nr++) {
+            __set_bit(nr, chunk_vector);
+        }
+    }
+    __clear_bit(0, chunk_vector);
+
+    printk("chunk_vector: 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
+           chunk_vector[0], chunk_vector[1], chunk_vector[2], chunk_vector[3]);
+
     /* mark all memory from modules onward as unused */
     init_boot_pages(freemem, eomem);
 

--- domain.c.cow        2006-07-07 17:34:16.000000000 -0400
+++ domain.c    2006-07-18 15:35:13.000000000 -0400
@@ -31,6 +31,9 @@
 #include <asm/current.h>
 #include <asm/hcalls.h>
 
+#define CHUNK_SIZE     0x4000000
+extern unsigned long chunk_vector[4];
+
 extern void idle_loop(void);
 
 #define next_arg(fmt, args) ({                                              \
@@ -74,6 +77,8 @@
 int arch_domain_create(struct domain *d)
 {
 
+    ulong avail_chunk;
+
     if (d->domain_id == IDLE_DOMAIN_ID) {
         d->shared_info = (void *)alloc_xenheap_page();
         clear_page(d->shared_info);
@@ -81,9 +86,28 @@
         return 0;
     }
 
+#if 0
     /* XXX the hackage... hardcode 64M domains */
     d->arch.rma_base = (64<<20) * (d->domain_id + 1);
-    d->arch.rma_size = (64<<20);
+    d->arch.rma_size = (128<<20);
+#endif
+
+    do {
+       avail_chunk = find_first_bit(chunk_vector, sizeof(chunk_vector) * 8);
+       if ( avail_chunk >= sizeof(chunk_vector) * 8 ) {
+           printf("No memory available for domain\n");
+           machine_halt();
+       }
+       
+       if ( test_and_clear_bit(avail_chunk, chunk_vector) ) {
+           d->arch.rma_base = CHUNK_SIZE * avail_chunk;
+           d->arch.rma_size = CHUNK_SIZE;
+           break;
+       }
+    } while ( 1 );
+
+    printk("chunk_vector: 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
+           chunk_vector[0], chunk_vector[1], chunk_vector[2], chunk_vector[3]);
 
     printk("clearing RMO: 0x%lx[0x%lx]\n", d->arch.rma_base, d->arch.rma_size);
     memset((void*)d->arch.rma_base, 0, d->arch.rma_size);
 
-- 


_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.