[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] balloon patch
Finally had time this week to look at memory sharing between domains. Here is a balloon driver patch for xenolinux. Inflation and deflation no longer crash the guest os. I'm not a Linux vm system expert so some of this is guesswork. David *** ../../xeno-unstable.bk/linux-2.4.26-xen-sparse//arch/xen/drivers/balloon/balloon.c Tue Jun 22 09:58:19 2004 --- linux-xen-sparse/arch/xen/drivers/balloon/balloon.c Wed Jun 30 09:32:40 2004 *************** *** 17,22 **** --- 17,23 ---- #include <linux/mman.h> #include <linux/smp_lock.h> #include <linux/pagemap.h> + #include <linux/vmalloc.h> #include <asm/hypervisor.h> #include <asm/pgalloc.h> *************** *** 64,77 **** unsigned long vaddr; unsigned long i, j; ! parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long), ! GFP_KERNEL); currp = parray; for ( i = 0; i < num_pages; i++ ) { /* Try to obtain a free page (has to be done with GFP_ATOMIC). */ ! vaddr = __get_free_page(GFP_ATOMIC); /* If allocation fails then free all reserved pages. */ if ( vaddr == 0 ) --- 65,82 ---- unsigned long vaddr; unsigned long i, j; ! parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long)); ! if (!parray) ! { ! printk("inflate_balloon: Unable to vmalloc parray\n"); ! return 0; ! } currp = parray; for ( i = 0; i < num_pages; i++ ) { /* Try to obtain a free page (has to be done with GFP_ATOMIC). */ ! vaddr = __get_free_page(GFP_KERNEL); /* If allocation fails then free all reserved pages. */ if ( vaddr == 0 ) *************** *** 113,119 **** ret = num_pages; cleanup: ! kfree(parray); return ret; } --- 119,125 ---- ret = num_pages; cleanup: ! vfree(parray); return ret; } *************** *** 143,150 **** if ( phys_to_machine_mapping[i] == DEAD ) { phys_to_machine_mapping[i] = *curr; ! queue_l1_entry_update( ! (pte_t *)((i << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE), i); queue_l1_entry_update( get_ptep((unsigned long)__va(i << PAGE_SHIFT)), ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL)); --- 149,155 ---- if ( phys_to_machine_mapping[i] == DEAD ) { phys_to_machine_mapping[i] = *curr; ! queue_machphys_update(*curr, i); queue_l1_entry_update( get_ptep((unsigned long)__va(i << PAGE_SHIFT)), ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL)); *************** *** 181,188 **** return -EAGAIN; } ! parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long), ! GFP_KERNEL); ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, parray, num_pages); --- 186,198 ---- return -EAGAIN; } ! parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long)); ! if (!parray) ! { ! printk("deflate_balloon: Unable to vmalloc parray\n"); ! return 0; ! } ! XEN_flush_page_update_queue(); ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, parray, num_pages); *************** *** 203,209 **** credit -= num_pages; cleanup: ! kfree(parray); return ret; } --- 213,219 ---- credit -= num_pages; cleanup: ! vfree(parray); return ret; } ------------------------------------------------------- This SF.Net email sponsored by Black Hat Briefings & Training. Attend Black Hat Briefings & Training, Las Vegas July 24-29 - digital self defense, top technical experts, no vendor pitches, unmatched networking opportunities. Visit www.blackhat.com _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.sourceforge.net/lists/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |