|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 7/9] xen/x86: rename bad_srat to numa_bad
When NUMA initialization code is failed in scanning SRAT. It will
call bad_srat to set disable NUMA and clear relate data. But this
name is ACPI specific, we have moved generically usable NUMA codes
to common, bad_srat has came with these codes to common code. But
it's not reasonable for other NUMA implementations to implement a
fall back function named bad_srat. So in this patch, we rename
bad_srat to numa_bad.
Signed-off-by: Wei Chen <wei.chen@xxxxxxx>
---
v1 -> v2:
1. New in v2.
---
xen/arch/x86/srat.c | 18 +++++++++---------
xen/common/numa.c | 4 ++--
xen/include/xen/numa.h | 2 +-
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 9ae81afdff..4afb37bf9f 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -93,7 +93,7 @@ nodeid_t setup_node(unsigned pxm)
return node;
}
-void __init bad_srat(void)
+void __init numa_bad(void)
{
int i;
printk(KERN_ERR "SRAT: SRAT not used.\n");
@@ -153,7 +153,7 @@ acpi_numa_x2apic_affinity_init(const struct
acpi_srat_x2apic_cpu_affinity *pa)
if (srat_disabled())
return;
if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
- bad_srat();
+ numa_bad();
return;
}
if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
@@ -166,7 +166,7 @@ acpi_numa_x2apic_affinity_init(const struct
acpi_srat_x2apic_cpu_affinity *pa)
pxm = pa->proximity_domain;
node = setup_node(pxm);
if (node == NUMA_NO_NODE) {
- bad_srat();
+ numa_bad();
return;
}
@@ -189,7 +189,7 @@ acpi_numa_processor_affinity_init(const struct
acpi_srat_cpu_affinity *pa)
if (srat_disabled())
return;
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
- bad_srat();
+ numa_bad();
return;
}
if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
@@ -202,7 +202,7 @@ acpi_numa_processor_affinity_init(const struct
acpi_srat_cpu_affinity *pa)
}
node = setup_node(pxm);
if (node == NUMA_NO_NODE) {
- bad_srat();
+ numa_bad();
return;
}
apicid_to_node[pa->apic_id] = node;
@@ -225,7 +225,7 @@ acpi_numa_memory_affinity_init(const struct
acpi_srat_mem_affinity *ma)
if (srat_disabled())
return;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
- bad_srat();
+ numa_bad();
return;
}
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
@@ -239,7 +239,7 @@ acpi_numa_memory_affinity_init(const struct
acpi_srat_mem_affinity *ma)
if (!numa_memblks_available()) {
dprintk(XENLOG_WARNING,
"Too many numa entries, try bigger NR_NODE_MEMBLKS!\n");
- bad_srat();
+ numa_bad();
return;
}
@@ -248,14 +248,14 @@ acpi_numa_memory_affinity_init(const struct
acpi_srat_mem_affinity *ma)
pxm &= 0xff;
node = setup_node(pxm);
if (node == NUMA_NO_NODE) {
- bad_srat();
+ numa_bad();
return;
}
ret = numa_update_node_memblks(node, pxm, ma->base_address, ma->length,
ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
if (ret)
- bad_srat();
+ numa_bad();
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/xen/common/numa.c b/xen/common/numa.c
index e3b66c54b5..5ab061e991 100644
--- a/xen/common/numa.c
+++ b/xen/common/numa.c
@@ -300,7 +300,7 @@ int __init acpi_scan_nodes(paddr_t start, paddr_t end)
if ( !nodes_cover_memory() )
{
- bad_srat();
+ numa_bad();
return -1;
}
@@ -311,7 +311,7 @@ int __init acpi_scan_nodes(paddr_t start, paddr_t end)
{
printk(KERN_ERR
"SRAT: No NUMA node hash function found. Contact maintainer\n");
- bad_srat();
+ numa_bad();
return -1;
}
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index 564add430c..4c4632ec27 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -52,7 +52,7 @@ extern void numa_initmem_init(unsigned long start_pfn,
unsigned long end_pfn);
extern bool numa_enabled_with_firmware(void);
extern enum numa_mode numa_status;
-extern void bad_srat(void);
+extern void numa_bad(void);
extern void numa_set_node(int cpu, nodeid_t node);
extern void setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end);
--
2.25.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |