|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen: CFI hardening for call_rcu()
commit d910f5ce7744f992a4a69bbb94d1e85a741962a9
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Oct 28 10:28:35 2021 +0100
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Feb 23 15:33:43 2022 +0000
xen: CFI hardening for call_rcu()
Control Flow Integrity schemes use toolchain and optionally hardware support
to help protect against call/jump/return oriented programming attacks.
Use cf_check to annotate function pointer targets for the toolchain.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/hvm/mtrr.c | 2 +-
xen/arch/x86/hvm/vmsi.c | 2 +-
xen/arch/x86/mm/mem_sharing.c | 2 +-
xen/arch/x86/percpu.c | 2 +-
xen/common/domain.c | 4 ++--
xen/common/radix-tree.c | 2 +-
xen/common/rcupdate.c | 2 +-
xen/common/sched/core.c | 2 +-
xen/xsm/flask/avc.c | 2 +-
9 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index b3ef1bf541..42f3d83192 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -586,7 +586,7 @@ int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t
gfn,
return rc;
}
-static void free_pinned_cacheattr_entry(struct rcu_head *rcu)
+static void cf_check free_pinned_cacheattr_entry(struct rcu_head *rcu)
{
xfree(container_of(rcu, struct hvm_mem_pinned_cacheattr_range, rcu));
}
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index 13e2a190b4..2889575a20 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -441,7 +441,7 @@ static void add_msixtbl_entry(struct domain *d,
list_add_rcu(&entry->list, &d->arch.hvm.msixtbl_list);
}
-static void free_msixtbl_entry(struct rcu_head *rcu)
+static void cf_check free_msixtbl_entry(struct rcu_head *rcu)
{
struct msixtbl_entry *entry;
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 74d2869c0e..15e6a7ed81 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -75,7 +75,7 @@ static DEFINE_SPINLOCK(shr_audit_lock);
static DEFINE_RCU_READ_LOCK(shr_audit_read_lock);
/* RCU delayed free of audit list entry */
-static void _free_pg_shared_info(struct rcu_head *head)
+static void cf_check _free_pg_shared_info(struct rcu_head *head)
{
xfree(container_of(head, struct page_sharing_info, rcu_head));
}
diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c
index eb3ba7bc88..46460689b7 100644
--- a/xen/arch/x86/percpu.c
+++ b/xen/arch/x86/percpu.c
@@ -45,7 +45,7 @@ struct free_info {
};
static DEFINE_PER_CPU(struct free_info, free_info);
-static void _free_percpu_area(struct rcu_head *head)
+static void cf_check _free_percpu_area(struct rcu_head *head)
{
struct free_info *info = container_of(head, struct free_info, rcu);
unsigned int cpu = info->cpu;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index dacd03254c..c5716cd72f 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1075,7 +1075,7 @@ void vcpu_end_shutdown_deferral(struct vcpu *v)
}
/* Complete domain destroy after RCU readers are not holding old references. */
-static void complete_domain_destroy(struct rcu_head *head)
+static void cf_check complete_domain_destroy(struct rcu_head *head)
{
struct domain *d = container_of(head, struct domain, rcu);
struct vcpu *v;
@@ -1798,7 +1798,7 @@ struct pirq *pirq_get_info(struct domain *d, int pirq)
return info;
}
-static void _free_pirq_struct(struct rcu_head *head)
+static void cf_check _free_pirq_struct(struct rcu_head *head)
{
xfree(container_of(head, struct pirq, rcu_head));
}
diff --git a/xen/common/radix-tree.c b/xen/common/radix-tree.c
index 628a7e0698..33b47748ae 100644
--- a/xen/common/radix-tree.c
+++ b/xen/common/radix-tree.c
@@ -58,7 +58,7 @@ static struct radix_tree_node *rcu_node_alloc(void *arg)
return rcu_node ? &rcu_node->node : NULL;
}
-static void _rcu_node_free(struct rcu_head *head)
+static void cf_check _rcu_node_free(struct rcu_head *head)
{
struct rcu_node *rcu_node =
container_of(head, struct rcu_node, rcu_head);
diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c
index f9dd2584a8..423d6b1d6d 100644
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -167,7 +167,7 @@ static int rsinterval = 1000;
static atomic_t cpu_count = ATOMIC_INIT(0);
static atomic_t pending_count = ATOMIC_INIT(0);
-static void rcu_barrier_callback(struct rcu_head *head)
+static void cf_check rcu_barrier_callback(struct rcu_head *head)
{
/*
* We need a barrier making all previous writes visible to other cpus
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 6a1f17e94e..aaa7ef2a6f 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -2798,7 +2798,7 @@ static int cpu_schedule_up(unsigned int cpu)
return 0;
}
-static void sched_res_free(struct rcu_head *head)
+static void cf_check sched_res_free(struct rcu_head *head)
{
struct sched_resource *sr = container_of(head, struct sched_resource, rcu);
diff --git a/xen/xsm/flask/avc.c b/xen/xsm/flask/avc.c
index 87ea38b7a0..e20c165042 100644
--- a/xen/xsm/flask/avc.c
+++ b/xen/xsm/flask/avc.c
@@ -276,7 +276,7 @@ int avc_get_hash_stats(struct xen_flask_hash_stats *arg)
return 0;
}
-static void avc_node_free(struct rcu_head *rhead)
+static void cf_check avc_node_free(struct rcu_head *rhead)
{
struct avc_node *node = container_of(rhead, struct avc_node, rhead);
xfree(node);
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |