|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 3/9] xen/x86: move generically usable NUMA code from x86 to common
On 08.07.2022 16:54, Wei Chen wrote:
> --- /dev/null
> +++ b/xen/common/numa.c
> @@ -0,0 +1,439 @@
> +/*
> + * Generic VM initialization for NUMA setups.
> + * Copyright 2002,2003 Andi Kleen, SuSE Labs.
> + * Adapted for Xen: Ryan Harper <ryanh@xxxxxxxxxx>
> + */
> +
> +#include <xen/init.h>
> +#include <xen/keyhandler.h>
> +#include <xen/mm.h>
> +#include <xen/nodemask.h>
> +#include <xen/numa.h>
> +#include <xen/param.h>
> +#include <xen/sched.h>
> +#include <xen/softirq.h>
> +#include <asm/acpi.h>
Isn't the goal for the now common code to not be dependent upon ACPI?
> +struct node_data node_data[MAX_NUMNODES];
> +
> +/* Mapping from pdx to node id */
> +int memnode_shift;
> +static typeof(*memnodemap) _memnodemap[64];
> +unsigned long memnodemapsize;
> +u8 *memnodemap;
For code moved, please switch to (in this case) uint8_t. I'm on the
edge of asking to go further and
- also use __read_mostly for some / all of these,
- make memnode_shift unsigned int (sadly this looks to require more
adjustments, even if negative shift counts are meaningless),
- convert from plain int to unsigned int elsewhere as appropriate,
- add const where appropriate / possible.
> +nodeid_t cpu_to_node[NR_CPUS] __read_mostly = {
> + [0 ... NR_CPUS-1] = NUMA_NO_NODE
> +};
> +
> +cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
For these two please put __read_mostly into its more conventional
place (right after the type).
> +nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
> +
> +enum numa_mode numa_status;
This should probably have been __read_mostly already in the earlier
patch introducing it.
> +#ifdef CONFIG_NUMA_EMU
> +static int numa_fake __initdata = 0;
The __initdata again wants to move, and the initializer can be omitted.
> +/* [numa=off] */
> +static int __init cf_check numa_setup(const char *opt)
> +{
> + if ( !strncmp(opt,"off",3) )
As you're correcting style violations elsewhere, please also insert the
missing spaces here and below.
> --- a/xen/include/xen/numa.h
> +++ b/xen/include/xen/numa.h
> @@ -18,4 +18,78 @@
> (((d)->vcpu != NULL && (d)->vcpu[0] != NULL) \
> ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
>
> +/* The following content can be used when NUMA feature is enabled */
> +#ifdef CONFIG_NUMA
> +
> +extern nodeid_t cpu_to_node[NR_CPUS];
> +extern cpumask_t node_to_cpumask[];
> +
> +#define cpu_to_node(cpu) (cpu_to_node[cpu])
> +#define parent_node(node) (node)
> +#define node_to_first_cpu(node) (__ffs(node_to_cpumask[node]))
> +#define node_to_cpumask(node) (node_to_cpumask[node])
> +
> +struct node {
> + paddr_t start, end;
> +};
> +
> +extern int compute_hash_shift(struct node *nodes, int numnodes,
> + nodeid_t *nodeids);
> +
> +#define VIRTUAL_BUG_ON(x)
> +
> +/* Enumerations for NUMA status. */
> +enum numa_mode {
> + numa_on = 0,
> + numa_off,
> + /* NUMA turns on, but ACPI table is bad or disabled. */
> + numa_no_acpi,
> + /* NUMA turns on, and ACPI table works well. */
> + numa_acpi,
> +};
> +
> +extern void numa_add_cpu(int cpu);
> +extern void numa_init_array(void);
> +extern void numa_initmem_init(unsigned long start_pfn, unsigned long
> end_pfn);
> +extern bool numa_enabled_with_firmware(void);
> +extern enum numa_mode numa_status;
> +
> +extern void numa_set_node(int cpu, nodeid_t node);
> +extern void setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end);
> +
> +static inline void clear_node_cpumask(int cpu)
> +{
> + cpumask_clear_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
> +}
> +
> +/* Simple perfect hash to map pdx to node numbers */
> +extern int memnode_shift;
> +extern unsigned long memnodemapsize;
> +extern u8 *memnodemap;
> +
> +struct node_data {
> + unsigned long node_start_pfn;
> + unsigned long node_spanned_pages;
> +};
> +
> +extern struct node_data node_data[];
> +
> +static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
Please use __attribute_pure__.
Jan
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |