|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH SpectreV1+L1TF v6 1/9] xen/evtchn: block speculative out-of-bound accesses
Guests can issue event channel interaction with guest specified data.
To avoid speculative out-of-bound accesses, we use the nospec macros,
or the domain_vcpu function.
This commit is part of the SpectreV1+L1TF mitigation patch series.
Signed-off-by: Norbert Manthey <nmanthey@xxxxxxxxx>
---
Notes:
v6: drop vcpu < 0 check
use struct vpcu in evtchn_bind_vcpu
do not call domain_vcpu twice in evtchn_fifo_word_from_port
xen/common/event_channel.c | 34 +++++++++++++++++++++++-----------
xen/common/event_fifo.c | 13 ++++++++++---
xen/include/xen/event.h | 5 +++--
3 files changed, 36 insertions(+), 16 deletions(-)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -365,11 +365,16 @@ int evtchn_bind_virq(evtchn_bind_virq_t *bind,
evtchn_port_t port)
if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
+ /*
+ * Make sure the guest controlled value virq is bounded even during
+ * speculative execution.
+ */
+ virq = array_index_nospec(virq, ARRAY_SIZE(v->virq_to_evtchn));
+
if ( virq_is_global(virq) && (vcpu != 0) )
return -EINVAL;
- if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
- ((v = d->vcpu[vcpu]) == NULL) )
+ if ( (v = domain_vcpu(d, vcpu)) == NULL )
return -ENOENT;
spin_lock(&d->event_lock);
@@ -418,8 +423,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
int port, vcpu = bind->vcpu;
long rc = 0;
- if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
- (d->vcpu[vcpu] == NULL) )
+ if ( domain_vcpu(d, vcpu) == NULL )
return -ENOENT;
spin_lock(&d->event_lock);
@@ -813,6 +817,13 @@ int set_global_virq_handler(struct domain *d, uint32_t
virq)
if (virq >= NR_VIRQS)
return -EINVAL;
+
+ /*
+ * Make sure the guest controlled value virq is bounded even during
+ * speculative execution.
+ */
+ virq = array_index_nospec(virq, ARRAY_SIZE(global_virq_handlers));
+
if (!virq_is_global(virq))
return -EINVAL;
@@ -930,8 +941,9 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int
vcpu_id)
struct domain *d = current->domain;
struct evtchn *chn;
long rc = 0;
+ struct vcpu *v;
- if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
+ if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
return -ENOENT;
spin_lock(&d->event_lock);
@@ -955,22 +967,22 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int
vcpu_id)
{
case ECS_VIRQ:
if ( virq_is_global(chn->u.virq) )
- chn->notify_vcpu_id = vcpu_id;
+ chn->notify_vcpu_id = v->vcpu_id;
else
rc = -EINVAL;
break;
case ECS_UNBOUND:
case ECS_INTERDOMAIN:
- chn->notify_vcpu_id = vcpu_id;
+ chn->notify_vcpu_id = v->vcpu_id;
break;
case ECS_PIRQ:
- if ( chn->notify_vcpu_id == vcpu_id )
+ if ( chn->notify_vcpu_id == v->vcpu_id )
break;
unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
- chn->notify_vcpu_id = vcpu_id;
+ chn->notify_vcpu_id = v->vcpu_id;
pirq_set_affinity(d, chn->u.pirq.irq,
- cpumask_of(d->vcpu[vcpu_id]->processor));
- link_pirq_port(port, chn, d->vcpu[vcpu_id]);
+ cpumask_of(v->processor));
+ link_pirq_port(port, chn, v);
break;
default:
rc = -EINVAL;
diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c
--- a/xen/common/event_fifo.c
+++ b/xen/common/event_fifo.c
@@ -33,7 +33,8 @@ static inline event_word_t *evtchn_fifo_word_from_port(const
struct domain *d,
*/
smp_rmb();
- p = port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
+ p = array_index_nospec(port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE,
+ d->evtchn_fifo->num_evtchns);
w = port % EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
return d->evtchn_fifo->event_array[p] + w;
@@ -516,14 +517,20 @@ int evtchn_fifo_init_control(struct evtchn_init_control
*init_control)
gfn = init_control->control_gfn;
offset = init_control->offset;
- if ( vcpu_id >= d->max_vcpus || !d->vcpu[vcpu_id] )
+ if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
return -ENOENT;
- v = d->vcpu[vcpu_id];
/* Must not cross page boundary. */
if ( offset > (PAGE_SIZE - sizeof(evtchn_fifo_control_block_t)) )
return -EINVAL;
+ /*
+ * Make sure the guest controlled value offset is bounded even during
+ * speculative execution.
+ */
+ offset = array_index_nospec(offset,
+ PAGE_SIZE - sizeof(evtchn_fifo_control_block_t) +
1);
+
/* Must be 8-bytes aligned. */
if ( offset & (8 - 1) )
return -EINVAL;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -13,6 +13,7 @@
#include <xen/smp.h>
#include <xen/softirq.h>
#include <xen/bitops.h>
+#include <xen/nospec.h>
#include <asm/event.h>
/*
@@ -103,7 +104,7 @@ void arch_evtchn_inject(struct vcpu *v);
* The first bucket is directly accessed via d->evtchn.
*/
#define group_from_port(d, p) \
- ((d)->evtchn_group[(p) / EVTCHNS_PER_GROUP])
+ array_access_nospec((d)->evtchn_group, (p) / EVTCHNS_PER_GROUP)
#define bucket_from_port(d, p) \
((group_from_port(d, p))[((p) % EVTCHNS_PER_GROUP) / EVTCHNS_PER_BUCKET])
@@ -117,7 +118,7 @@ static inline bool_t port_is_valid(struct domain *d,
unsigned int p)
static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p)
{
if ( p < EVTCHNS_PER_BUCKET )
- return &d->evtchn[p];
+ return &d->evtchn[array_index_nospec(p, EVTCHNS_PER_BUCKET)];
return bucket_from_port(d, p) + (p % EVTCHNS_PER_BUCKET);
}
--
2.7.4
Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrer: Christian Schlaeger, Ralf Herbrich
Ust-ID: DE 289 237 879
Eingetragen am Amtsgericht Charlottenburg HRB 149173 B
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |