[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH linux-2.6.18/ACPI: eliminate restriction on ACPI processor ID range
On 06/12/2011 13:17, "Jan Beulich" <JBeulich@xxxxxxxx> wrote: > Rather than storing the reference acpi_device pointers in an ACPI-ID- > indexed array (and having a bogus BUG_ON() check on a platform provided > value), use a radix tree instead. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> > --- a/drivers/acpi/processor_core.c > +++ b/drivers/acpi/processor_core.c > @@ -513,7 +513,14 @@ static int acpi_processor_get_info(struc > return 0; > } > > -static void *processor_device_array[NR_ACPI_CPUS]; > +#ifndef CONFIG_XEN > +static void *processor_device_array[NR_CPUS]; > +#else > +#include <linux/mutex.h> > +#include <linux/radix-tree.h> > +static DEFINE_MUTEX(processor_device_mutex); > +static RADIX_TREE(processor_device_tree, GFP_KERNEL); > +#endif > > static int acpi_processor_start(struct acpi_device *device) > { > @@ -541,9 +548,20 @@ static int __cpuinit acpi_processor_add( > * Don't trust it blindly > */ > #ifdef CONFIG_XEN > - BUG_ON(pr->acpi_id >= NR_ACPI_CPUS); > - if (processor_device_array[pr->acpi_id] != NULL && > - processor_device_array[pr->acpi_id] != (void *)device) { > + mutex_lock(&processor_device_mutex); > + result = radix_tree_insert(&processor_device_tree, > + pr->acpi_id, device); > + switch (result) { > + default: > + goto end_unlock; > + case -EEXIST: > + if (radix_tree_lookup(&processor_device_tree, > + pr->acpi_id) == device) { > + case 0: > + mutex_unlock(&processor_device_mutex); > + break; > + } > + mutex_unlock(&processor_device_mutex); > #else > if (processor_device_array[pr->id] != NULL && > processor_device_array[pr->id] != (void *)device) { > @@ -553,14 +571,11 @@ static int __cpuinit acpi_processor_add( > return -ENODEV; > } > #ifdef CONFIG_XEN > - processor_device_array[pr->acpi_id] = (void *)device; > if (pr->id != -1) > - processors[pr->id] = pr; > #else > processor_device_array[pr->id] = (void *)device; > - > - processors[pr->id] = pr; > #endif /* CONFIG_XEN */ > + processors[pr->id] = pr; > > result = acpi_processor_add_fs(device); > if (result) > @@ -602,6 +617,14 @@ err_thermal_unregister: > } > > end: > +#ifdef CONFIG_XEN > + mutex_lock(&processor_device_mutex); > + if (radix_tree_lookup(&processor_device_tree, > + pr->acpi_id) == device) > + radix_tree_delete(&processor_device_tree, pr->acpi_id); > + end_unlock: > + mutex_unlock(&processor_device_mutex); > +#endif > > return result; > } > @@ -692,10 +715,8 @@ static int acpi_processor_remove(struct > > #ifdef CONFIG_XEN > if (pr->id != -1) > - processors[pr->id] = NULL; > -#else > +#endif > processors[pr->id] = NULL; > -#endif /* CONFIG_XEN */ > > > kfree(pr); > @@ -992,6 +1013,30 @@ static void __exit acpi_processor_exit(v > > remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); > > +#ifdef CONFIG_XEN > + { > + struct acpi_device *dev; > + unsigned int idx = 0; > + > + while (radix_tree_gang_lookup(&processor_device_tree, > + (void **)&dev, idx, 1)) { > + struct acpi_processor *pr = acpi_driver_data(dev); > + > + /* prevent live lock */ > + if (pr->acpi_id < idx) { > + printk(KERN_WARNING PREFIX "ID %u unexpected" > + " (less than %u); leaking memory\n", > + pr->acpi_id, idx); > + break; > + } > + idx = pr->acpi_id; > + radix_tree_delete(&processor_device_tree, idx); > + if (!++idx) > + break; > + } > + } > +#endif > + > return; > } > > --- a/include/acpi/processor.h > +++ b/include/acpi/processor.h > @@ -24,12 +24,6 @@ > #define ACPI_TSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */ > #define ACPI_TSD_REV0_ENTRIES 5 > > -#ifdef CONFIG_XEN > -#define NR_ACPI_CPUS (NR_CPUS < 256 ? 256 : NR_CPUS) > -#else > -#define NR_ACPI_CPUS NR_CPUS > -#endif /* CONFIG_XEN */ > - > /* > * Types of coordination defined in ACPI 3.0. Same macros can be used across > * P, C and T states > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxxxxxxxx > http://lists.xensource.com/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |