[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v6 5/6] xen/x86: move NUMA process nodes nodes code from x86 to common
On 11.10.2022 13:17, Wei Chen wrote: > --- a/xen/arch/x86/numa.c > +++ b/xen/arch/x86/numa.c > @@ -46,6 +46,11 @@ bool arch_numa_disabled(void) > return acpi_numa < 0; > } > > +bool arch_numa_unavailable(void) __init ? > @@ -31,11 +46,334 @@ nodemask_t __read_mostly node_online_map = { { [0] = 1UL > } }; > > bool __ro_after_init numa_off; > > +const char *__ro_after_init numa_fw_nid_name = "NONAME"; Didn't you mean to leave this at NULL for the DT case? (But yes, this way you avoid a conditional at every printk() using it.) I'm also uncertain of "NOMAME" - personally I think e.g. "???" would be better, just in case a message actually is logged with this still un-overridden. > +bool __init numa_update_node_memblks(nodeid_t node, unsigned int arch_nid, > + paddr_t start, paddr_t size, bool > hotplug) > +{ > + unsigned int i; > + bool next = false; > + paddr_t end = start + size; > + paddr_t nd_start = start; > + paddr_t nd_end = end; > + struct node *nd = &nodes[node]; > + > + /* > + * For the node that already has some memory blocks, we will > + * expand the node memory range temporarily to check memory > + * interleaves with other nodes. We will not use this node > + * temp memory range to check overlaps, because it will mask > + * the overlaps in same node. > + * > + * Node with 0 bytes memory doesn't need this expansion. > + */ > + if ( nd->start != nd->end ) > + { > + if ( nd_start > nd->start ) > + nd_start = nd->start; > + > + if ( nd_end < nd->end ) > + nd_end = nd->end; > + } > + > + /* It is fine to add this area to the nodes data it will be used later */ > + switch ( conflicting_memblks(node, start, end, nd_start, nd_end, &i) ) > + { > + case OVERLAP: > + if ( memblk_nodeid[i] == node ) > + { > + bool mismatch = !hotplug != !test_bit(i, memblk_hotplug); > + > + printk("%sNUMA: %s %u [%"PRIpaddr", %"PRIpaddr"] overlaps with > itself [%"PRIpaddr", %"PRIpaddr"]\n", > + mismatch ? KERN_ERR : KERN_WARNING, numa_fw_nid_name, > + arch_nid, start, end - 1, > + node_memblk_range[i].start, node_memblk_range[i].end - 1); > + if ( mismatch ) > + return false; > + break; > + } > + > + printk(KERN_ERR > + "NUMA: %s %u [%"PRIpaddr", %"PRIpaddr"] overlaps with %s %u > [%"PRIpaddr", %"PRIpaddr"]\n", > + numa_fw_nid_name, arch_nid, start, end - 1, numa_fw_nid_name, > + numa_node_to_arch_nid(memblk_nodeid[i]), > + node_memblk_range[i].start, node_memblk_range[i].end - 1); > + return false; > + > + case INTERLEAVE: > + printk(KERN_ERR > + "NUMA: %s %u: [%"PRIpaddr", %"PRIpaddr"] interleaves with %s > %u memblk [%"PRIpaddr", %"PRIpaddr"]\n", > + numa_fw_nid_name, arch_nid, nd_start, nd_end - 1, > + numa_fw_nid_name, numa_node_to_arch_nid(memblk_nodeid[i]), > + node_memblk_range[i].start, node_memblk_range[i].end - 1); > + return false; > + > + case NO_CONFLICT: > + break; > + } > + > + if ( !hotplug ) > + { > + node_set(node, memory_nodes_parsed); > + nd->start = nd_start; > + nd->end = nd_end; > + } > + > + printk(KERN_INFO "NUMA: Node %u %s %u [%"PRIpaddr", %"PRIpaddr"]%s\n", > + node, numa_fw_nid_name, arch_nid, start, end - 1, > + hotplug ? " (hotplug)" : ""); > + > + /* Keep node_memblk_range[] sorted by address. */ > + for ( i = 0; i < num_node_memblks; ++i ) > + if ( node_memblk_range[i].start > start || > + (node_memblk_range[i].start == start && > + node_memblk_range[i].end > end) ) > + break; > + > + memmove(&node_memblk_range[i + 1], &node_memblk_range[i], > + (num_node_memblks - i) * sizeof(*node_memblk_range)); > + node_memblk_range[i].start = start; > + node_memblk_range[i].end = end; > + > + memmove(&memblk_nodeid[i + 1], &memblk_nodeid[i], > + (num_node_memblks - i) * sizeof(*memblk_nodeid)); > + memblk_nodeid[i] = node; > + > + if ( hotplug ) { Nit: Placement of brace. > --- a/xen/common/page_alloc.c > +++ b/xen/common/page_alloc.c > @@ -159,6 +159,8 @@ > #define PGT_TYPE_INFO_INITIALIZER 0 > #endif > > +paddr_t __read_mostly mem_hotplug; Not __ro_after_init? Jan
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |