[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 15/70] xen: CFI hardening for call_rcu()
Control Flow Integrity schemes use toolchain and optionally hardware support to help protect against call/jump/return oriented programming attacks. Use cf_check to annotate function pointer targets for the toolchain. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/hvm/mtrr.c | 2 +- xen/arch/x86/hvm/vmsi.c | 2 +- xen/arch/x86/mm/mem_sharing.c | 2 +- xen/arch/x86/percpu.c | 2 +- xen/common/domain.c | 4 ++-- xen/common/radix-tree.c | 2 +- xen/common/rcupdate.c | 2 +- xen/common/sched/core.c | 2 +- xen/xsm/flask/avc.c | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c index b3ef1bf54133..42f3d8319296 100644 --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -586,7 +586,7 @@ int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn, return rc; } -static void free_pinned_cacheattr_entry(struct rcu_head *rcu) +static void cf_check free_pinned_cacheattr_entry(struct rcu_head *rcu) { xfree(container_of(rcu, struct hvm_mem_pinned_cacheattr_range, rcu)); } diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c index 13e2a190b439..2889575a2035 100644 --- a/xen/arch/x86/hvm/vmsi.c +++ b/xen/arch/x86/hvm/vmsi.c @@ -441,7 +441,7 @@ static void add_msixtbl_entry(struct domain *d, list_add_rcu(&entry->list, &d->arch.hvm.msixtbl_list); } -static void free_msixtbl_entry(struct rcu_head *rcu) +static void cf_check free_msixtbl_entry(struct rcu_head *rcu) { struct msixtbl_entry *entry; diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 74d2869c0e6f..15e6a7ed814b 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -75,7 +75,7 @@ static DEFINE_SPINLOCK(shr_audit_lock); static DEFINE_RCU_READ_LOCK(shr_audit_read_lock); /* RCU delayed free of audit list entry */ -static void _free_pg_shared_info(struct rcu_head *head) +static void cf_check _free_pg_shared_info(struct rcu_head *head) { xfree(container_of(head, struct page_sharing_info, rcu_head)); } diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c index eb3ba7bc8874..46460689b73d 100644 --- a/xen/arch/x86/percpu.c +++ b/xen/arch/x86/percpu.c @@ -45,7 +45,7 @@ struct free_info { }; static DEFINE_PER_CPU(struct free_info, free_info); -static void _free_percpu_area(struct rcu_head *head) +static void cf_check _free_percpu_area(struct rcu_head *head) { struct free_info *info = container_of(head, struct free_info, rcu); unsigned int cpu = info->cpu; diff --git a/xen/common/domain.c b/xen/common/domain.c index 5df0d167537b..32ec156e6f6a 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -365,7 +365,7 @@ static int __init cf_check parse_extra_guest_irqs(const char *s) } custom_param("extra_guest_irqs", parse_extra_guest_irqs); -static void _free_pirq_struct(struct rcu_head *head) +static void cf_check _free_pirq_struct(struct rcu_head *head) { xfree(container_of(head, struct pirq, rcu_head)); } @@ -1108,7 +1108,7 @@ void vcpu_end_shutdown_deferral(struct vcpu *v) } /* Complete domain destroy after RCU readers are not holding old references. */ -static void complete_domain_destroy(struct rcu_head *head) +static void cf_check complete_domain_destroy(struct rcu_head *head) { struct domain *d = container_of(head, struct domain, rcu); struct vcpu *v; diff --git a/xen/common/radix-tree.c b/xen/common/radix-tree.c index 628a7e06988f..33b47748ae49 100644 --- a/xen/common/radix-tree.c +++ b/xen/common/radix-tree.c @@ -58,7 +58,7 @@ static struct radix_tree_node *rcu_node_alloc(void *arg) return rcu_node ? &rcu_node->node : NULL; } -static void _rcu_node_free(struct rcu_head *head) +static void cf_check _rcu_node_free(struct rcu_head *head) { struct rcu_node *rcu_node = container_of(head, struct rcu_node, rcu_head); diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c index f9dd2584a8b7..423d6b1d6d02 100644 --- a/xen/common/rcupdate.c +++ b/xen/common/rcupdate.c @@ -167,7 +167,7 @@ static int rsinterval = 1000; static atomic_t cpu_count = ATOMIC_INIT(0); static atomic_t pending_count = ATOMIC_INIT(0); -static void rcu_barrier_callback(struct rcu_head *head) +static void cf_check rcu_barrier_callback(struct rcu_head *head) { /* * We need a barrier making all previous writes visible to other cpus diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c index cf1ba01b4d87..285de9ee2a19 100644 --- a/xen/common/sched/core.c +++ b/xen/common/sched/core.c @@ -2798,7 +2798,7 @@ static int cpu_schedule_up(unsigned int cpu) return 0; } -static void sched_res_free(struct rcu_head *head) +static void cf_check sched_res_free(struct rcu_head *head) { struct sched_resource *sr = container_of(head, struct sched_resource, rcu); diff --git a/xen/xsm/flask/avc.c b/xen/xsm/flask/avc.c index 87ea38b7a0d0..e20c16504213 100644 --- a/xen/xsm/flask/avc.c +++ b/xen/xsm/flask/avc.c @@ -276,7 +276,7 @@ int avc_get_hash_stats(struct xen_flask_hash_stats *arg) return 0; } -static void avc_node_free(struct rcu_head *rhead) +static void cf_check avc_node_free(struct rcu_head *rhead) { struct avc_node *node = container_of(rhead, struct avc_node, rhead); xfree(node); -- 2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |