[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH v1 00/21] ARM: Add Xen NUMA support
Hi Julien, On Thu, Feb 9, 2017 at 10:01 PM, Julien Grall <julien.grall@xxxxxxx> wrote: > Hi Vijay, > > On 02/09/2017 03:56 PM, vijay.kilari@xxxxxxxxx wrote: >> >> Note: Please use this patch series only for review. >> For testing, patch to boot allocator is required. Which will >> be sent outside this series. > > > Can you expand here? Is this patch a NUMA specific? Yes it is NUMA specific, which I have reported here. I have workaround for this. Need to prepare a patch. ( I hope till now there is no patch from anyone else for this issue) https://www.mail-archive.com/xen-devel@xxxxxxxxxxxxx/msg92093.html > > Also in a previous thread you mentioned issue to boot Xen with NUMA on Xen > unstable. So how did you test it? This issue (panic in page_alloc.c) that I reported is seen when I boot plain unstable xen on NUMA board without any NUMA or ITS patches. This issue is seen only with on NUMA board with DT I have tested this series with ACPI using unstable version and DT on 4.7 version. Also, I have prepared a small patch as below (just adhoc way), where in I called cpu_to_node() for all cpus and print phys_to_nid() to see if the node id is correct or not. ----------------------------------------------------------------------------------------- diff --git a/xen/arch/arm/numa.c b/xen/arch/arm/numa.c index d296523..d28e6bf 100644 --- a/xen/arch/arm/numa.c +++ b/xen/arch/arm/numa.c @@ -43,9 +43,11 @@ void __init numa_set_cpu_node(int cpu, unsigned long hwid) unsigned node; node = hwid >> 16 & 0xf; + printk("In %s cpu %d node %d\n",__func__, cpu, node); if ( !node_isset(node, numa_nodes_parsed) || node == MAX_NUMNODES ) node = 0; + printk("In %s cpu %d node %d\n",__func__, cpu, node); numa_set_node(cpu, node); numa_add_cpu(cpu); } @@ -245,3 +247,52 @@ int __init arch_numa_setup(char *opt) { return 1; } + +struct mem_list { + u64 start; + u64 end; +}; + +void numa_test(void) +{ + int i; + + struct mem_list ml[] = + { + { 0x0000000001400000, 0x00000000fffecfff }, + { 0x0000000100000000 , 0x0000000ff7ffffff }, + { 0x0000000ff8000000 , 0x0000000ff801ffff }, + { 0x0000000ff8020000 , 0x0000000fffa9cfff }, + { 0x0000000fffa9d000 , 0x0000000fffffffff }, + { 0x0000010000400000 , 0x0000010ff57b2fff }, + { 0x0000010ff6618000 , 0x0000010ff6ff0fff }, + { 0x0000010ff6ff1000 , 0x0000010ff724ffff }, + { 0x0000010ff734b000 , 0x0000010ff73defff }, + { 0x0000010ff73f0000 , 0x0000010ff73fbfff }, + { 0x0000010ff73fc000 , 0x0000010ff74defff }, + { 0x0000010ff74df000 , 0x0000010ff9718fff }, + { 0x0000010ff97a2000 , 0x0000010ff97acfff }, + { 0x0000010ff97ad000 , 0x0000010ff97b3fff }, + { 0x0000010ff97b5000 , 0x0000010ff9813fff }, + { 0x0000010ff9814000 , 0x0000010ff9819fff }, + { 0x0000010ff981a000 , 0x0000010ff984afff }, + { 0x0000010ff984c000 , 0x0000010ff9851fff }, + { 0x0000010ff9935000 , 0x0000010ffaeb5fff }, + { 0x0000010ffaff5000 , 0x0000010ffb008fff }, + { 0x0000010ffb009000 , 0x0000010fffe28fff }, + { 0x0000010fffe29000 , 0x0000010fffe70fff }, + { 0x0000010fffe71000 , 0x0000010ffffb8fff }, + { 0x0000010ffffff000 , 0x0000010fffffffff }, + }; + + for ( i = 0; i < 23; i++ ) + { + printk("NUMA MEM TEST: start 0x%lx in node %d end 0x%lx in node %d\n", + ml[i].start, phys_to_nid(ml[i].start), ml[i].end, phys_to_nid(ml[i].end)); + } + + for ( i = 0; i < NR_CPUS; i++) + { + printk("NUMA CPU TEST: cpu %d in node %d\n", i, cpu_to_node(i)); + } +} diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 5612ba6..0598672 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -698,6 +698,7 @@ void __init setup_cache(void) cacheline_bytes = 1U << (4 + (ccsid & 0x7)); } +extern void numa_test(void); /* C entry point for boot CPU */ void __init start_xen(unsigned long boot_phys_offset, unsigned long fdt_paddr, @@ -825,6 +826,7 @@ void __init start_xen(unsigned long boot_phys_offset, } } + numa_test(); printk("Brought up %ld CPUs\n", (long)num_online_cpus()); /* TODO: smp_cpus_done(); */ > > Cheers, > > -- > Julien Grall _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |