[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2 of 4] x86: make the pv-only e820 array be dynamic
# HG changeset patch # Parent 6c05be60fbc74f5d3c751a379ce5c4a147e34132 x86: make the pv-only e820 array be dynamic. During creation of the PV domain we allocate the E820 structure to have the amount of E820 entries on the machine, plus the number three. This will allow the tool stack to fill the E820 with more than three entries. Specifically the use cases is , where the toolstack retrieves the E820, sanitizes it, and then sets it for the PV guest (for PCI passthrough), this dynamic number of E820 is just right. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Signed-off-by: Keir Fraser <keir@xxxxxxx> diff -r 6c05be60fbc7 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Tue Apr 12 14:46:05 2011 -0400 +++ b/xen/arch/x86/domain.c Wed Apr 13 09:11:22 2011 -0400 @@ -657,6 +657,8 @@ int arch_domain_create(struct domain *d, /* 32-bit PV guest by default only if Xen is not 64-bit. */ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = (CONFIG_PAGING_LEVELS != 4); + + spin_lock_init(&d->arch.pv_domain.e820_lock); } /* initialize default tsc behavior in case tools don't */ @@ -696,6 +698,8 @@ void arch_domain_destroy(struct domain * if ( is_hvm_domain(d) ) hvm_domain_destroy(d); + else + xfree(d->arch.pv_domain.e820); vmce_destroy_msr(d); pci_release_devices(d); diff -r 6c05be60fbc7 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Tue Apr 12 14:46:05 2011 -0400 +++ b/xen/arch/x86/mm.c Wed Apr 13 09:11:22 2011 -0400 @@ -100,6 +100,7 @@ #include <xen/iocap.h> #include <xen/guest_access.h> #include <xen/pfn.h> +#include <xen/xmalloc.h> #include <asm/paging.h> #include <asm/shadow.h> #include <asm/page.h> @@ -4706,11 +4707,12 @@ long arch_memory_op(int op, XEN_GUEST_HA { struct xen_foreign_memory_map fmap; struct domain *d; + struct e820entry *e820; if ( copy_from_guest(&fmap, arg, 1) ) return -EFAULT; - if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.pv_domain.e820) ) + if ( fmap.map.nr_entries > E820MAX ) return -EINVAL; rc = rcu_lock_target_domain_by_id(fmap.domid, &d); @@ -4730,9 +4732,25 @@ long arch_memory_op(int op, XEN_GUEST_HA return -EPERM; } - rc = copy_from_guest(d->arch.pv_domain.e820, fmap.map.buffer, - fmap.map.nr_entries) ? -EFAULT : 0; + e820 = xmalloc_array(e820entry_t, fmap.map.nr_entries); + if ( e820 == NULL ) + { + rcu_unlock_domain(d); + return -ENOMEM; + } + + if ( copy_from_guest(e820, fmap.map.buffer, fmap.map.nr_entries) ) + { + xfree(e820); + rcu_unlock_domain(d); + return -EFAULT; + } + + spin_lock(&d->arch.pv_domain.e820_lock); + xfree(d->arch.pv_domain.e820); + d->arch.pv_domain.e820 = e820; d->arch.pv_domain.nr_e820 = fmap.map.nr_entries; + spin_unlock(&d->arch.pv_domain.e820_lock); rcu_unlock_domain(d); return rc; @@ -4743,19 +4761,29 @@ long arch_memory_op(int op, XEN_GUEST_HA struct xen_memory_map map; struct domain *d = current->domain; - /* Backwards compatibility. */ - if ( d->arch.pv_domain.nr_e820 == 0 ) - return -ENOSYS; - if ( copy_from_guest(&map, arg, 1) ) return -EFAULT; + spin_lock(&d->arch.pv_domain.e820_lock); + + /* Backwards compatibility. */ + if ( (d->arch.pv_domain.nr_e820 == 0) || + (d->arch.pv_domain.e820 == NULL) ) + { + spin_unlock(&d->arch.pv_domain.e820_lock); + return -ENOSYS; + } + map.nr_entries = min(map.nr_entries, d->arch.pv_domain.nr_e820); if ( copy_to_guest(map.buffer, d->arch.pv_domain.e820, map.nr_entries) || copy_to_guest(arg, &map, 1) ) + { + spin_unlock(&d->arch.pv_domain.e820_lock); return -EFAULT; - + } + + spin_unlock(&d->arch.pv_domain.e820_lock); return 0; } diff -r 6c05be60fbc7 xen/include/asm-x86/domain.h --- a/xen/include/asm-x86/domain.h Tue Apr 12 14:46:05 2011 -0400 +++ b/xen/include/asm-x86/domain.h Wed Apr 13 09:11:22 2011 -0400 @@ -241,7 +241,8 @@ struct pv_domain unsigned long pirq_eoi_map_mfn; /* Pseudophysical e820 map (XENMEM_memory_map). */ - struct e820entry e820[3]; + spinlock_t e820_lock; + struct e820entry *e820; unsigned int nr_e820; }; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |