[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 45/70] x86/cpu: CFI hardening
Control Flow Integrity schemes use toolchain and optionally hardware support to help protect against call/jump/return oriented programming attacks. Use cf_check to annotate function pointer targets for the toolchain. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/cpu/amd.c | 6 +++--- xen/arch/x86/cpu/centaur.c | 2 +- xen/arch/x86/cpu/common.c | 2 +- xen/arch/x86/cpu/cpu.h | 2 +- xen/arch/x86/cpu/hygon.c | 2 +- xen/arch/x86/cpu/intel.c | 6 +++--- xen/arch/x86/cpu/shanghai.c | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index 2d18223f20ef..4999f8be2b11 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -208,7 +208,7 @@ static void __init noinline probe_masking_msrs(void) * parameter of NULL is used to context switch to the default host state (by * the cpu bringup-code, crash path, etc). */ -static void amd_ctxt_switch_masking(const struct vcpu *next) +static void cf_check amd_ctxt_switch_masking(const struct vcpu *next) { struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); const struct domain *nextd = next ? next->domain : NULL; @@ -634,7 +634,7 @@ void amd_log_freq(const struct cpuinfo_x86 *c) #undef FREQ } -void early_init_amd(struct cpuinfo_x86 *c) +void cf_check early_init_amd(struct cpuinfo_x86 *c) { if (c == &boot_cpu_data) amd_init_levelling(); @@ -744,7 +744,7 @@ void __init detect_zen2_null_seg_behaviour(void) } -static void init_amd(struct cpuinfo_x86 *c) +static void cf_check init_amd(struct cpuinfo_x86 *c) { u32 l, h; diff --git a/xen/arch/x86/cpu/centaur.c b/xen/arch/x86/cpu/centaur.c index 34a5bfcaeef2..eac49d78db62 100644 --- a/xen/arch/x86/cpu/centaur.c +++ b/xen/arch/x86/cpu/centaur.c @@ -48,7 +48,7 @@ static void init_c3(struct cpuinfo_x86 *c) display_cacheinfo(c); } -static void init_centaur(struct cpuinfo_x86 *c) +static void cf_check init_centaur(struct cpuinfo_x86 *c) { if (c->x86 == 6) init_c3(c); diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index c4f07f2d1da4..6b674bf15e8b 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -104,7 +104,7 @@ bool __init is_forced_cpu_cap(unsigned int cap) return test_bit(cap, forced_caps); } -static void default_init(struct cpuinfo_x86 * c) +static void cf_check default_init(struct cpuinfo_x86 * c) { /* Not much we can do here... */ /* Check if at least it has cpuid */ diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h index b593bd85f04f..a228087f9157 100644 --- a/xen/arch/x86/cpu/cpu.h +++ b/xen/arch/x86/cpu/cpu.h @@ -18,7 +18,7 @@ extern void display_cacheinfo(struct cpuinfo_x86 *c); extern void detect_ht(struct cpuinfo_x86 *c); extern bool detect_extended_topology(struct cpuinfo_x86 *c); -void early_init_amd(struct cpuinfo_x86 *c); +void cf_check early_init_amd(struct cpuinfo_x86 *c); void amd_log_freq(const struct cpuinfo_x86 *c); void amd_init_lfence(struct cpuinfo_x86 *c); void amd_init_ssbd(const struct cpuinfo_x86 *c); diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c index cdc94130dd2e..3c8516e014c3 100644 --- a/xen/arch/x86/cpu/hygon.c +++ b/xen/arch/x86/cpu/hygon.c @@ -28,7 +28,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) c->phys_proc_id, c->cpu_core_id); } -static void init_hygon(struct cpuinfo_x86 *c) +static void cf_check init_hygon(struct cpuinfo_x86 *c) { unsigned long long value; diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c index 06b0e552cc8f..ff7c02223687 100644 --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -176,7 +176,7 @@ static void __init probe_masking_msrs(void) * parameter of NULL is used to context switch to the default host state (by * the cpu bringup-code, crash path, etc). */ -static void intel_ctxt_switch_masking(const struct vcpu *next) +static void cf_check intel_ctxt_switch_masking(const struct vcpu *next) { struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); const struct domain *nextd = next ? next->domain : NULL; @@ -286,7 +286,7 @@ static void __init noinline intel_init_levelling(void) ctxt_switch_masking = intel_ctxt_switch_masking; } -static void early_init_intel(struct cpuinfo_x86 *c) +static void cf_check early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable, disable; @@ -500,7 +500,7 @@ static void intel_log_freq(const struct cpuinfo_x86 *c) } } -static void init_intel(struct cpuinfo_x86 *c) +static void cf_check init_intel(struct cpuinfo_x86 *c) { /* Detect the extended topology information if available */ detect_extended_topology(c); diff --git a/xen/arch/x86/cpu/shanghai.c b/xen/arch/x86/cpu/shanghai.c index 08a81f0f0c8e..95ae544f8c54 100644 --- a/xen/arch/x86/cpu/shanghai.c +++ b/xen/arch/x86/cpu/shanghai.c @@ -3,7 +3,7 @@ #include <asm/processor.h> #include "cpu.h" -static void init_shanghai(struct cpuinfo_x86 *c) +static void cf_check init_shanghai(struct cpuinfo_x86 *c) { if ( cpu_has(c, X86_FEATURE_ITSC) ) { -- 2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |