|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] evtchn: convert vIRQ lock to an r/w one
commit a69583cb50ec07c114cff97f8f902258d6f175d1
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Dec 11 11:52:35 2020 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Dec 11 11:52:35 2020 +0100
evtchn: convert vIRQ lock to an r/w one
There's no need to serialize all sending of vIRQ-s; all that's needed
is serialization against the closing of the respective event channels
(so far by means of a barrier). To facilitate the conversion, switch to
an ordinary write locked region in evtchn_close().
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx>
---
xen/common/domain.c | 2 +-
xen/common/event_channel.c | 25 +++++++++++++++++--------
xen/include/xen/sched.h | 2 +-
3 files changed, 19 insertions(+), 10 deletions(-)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index f748806a45..5ec48c3e19 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -160,7 +160,7 @@ struct vcpu *vcpu_create(struct domain *d, unsigned int
vcpu_id)
v->vcpu_id = vcpu_id;
v->dirty_cpu = VCPU_CPU_CLEAN;
- spin_lock_init(&v->virq_lock);
+ rwlock_init(&v->virq_lock);
tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL);
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 59f95f2eb2..4a48094356 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -470,6 +470,13 @@ int evtchn_bind_virq(evtchn_bind_virq_t *bind,
evtchn_port_t port)
evtchn_write_unlock(chn);
bind->port = port;
+ /*
+ * If by any, the update of virq_to_evtchn[] would need guarding by
+ * virq_lock, but since this is the last action here, there's no strict
+ * need to acquire the lock. Hence holding event_lock isn't helpful
+ * anymore at this point, but utilize that its unlocking acts as the
+ * otherwise necessary smp_wmb() here.
+ */
write_atomic(&v->virq_to_evtchn[virq], port);
out:
@@ -656,10 +663,12 @@ int evtchn_close(struct domain *d1, int port1, bool guest)
case ECS_VIRQ:
for_each_vcpu ( d1, v )
{
- if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) != port1 )
- continue;
- write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
- spin_barrier(&v->virq_lock);
+ unsigned long flags;
+
+ write_lock_irqsave(&v->virq_lock, flags);
+ if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) == port1 )
+ write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
+ write_unlock_irqrestore(&v->virq_lock, flags);
}
break;
@@ -809,7 +818,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
ASSERT(!virq_is_global(virq));
- spin_lock_irqsave(&v->virq_lock, flags);
+ read_lock_irqsave(&v->virq_lock, flags);
port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
@@ -824,7 +833,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
}
out:
- spin_unlock_irqrestore(&v->virq_lock, flags);
+ read_unlock_irqrestore(&v->virq_lock, flags);
}
void send_guest_global_virq(struct domain *d, uint32_t virq)
@@ -843,7 +852,7 @@ void send_guest_global_virq(struct domain *d, uint32_t virq)
if ( unlikely(v == NULL) )
return;
- spin_lock_irqsave(&v->virq_lock, flags);
+ read_lock_irqsave(&v->virq_lock, flags);
port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
@@ -857,7 +866,7 @@ void send_guest_global_virq(struct domain *d, uint32_t virq)
}
out:
- spin_unlock_irqrestore(&v->virq_lock, flags);
+ read_unlock_irqrestore(&v->virq_lock, flags);
}
void send_guest_pirq(struct domain *d, const struct pirq *pirq)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 31abbe7a99..faf5fda36f 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -239,7 +239,7 @@ struct vcpu
/* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
evtchn_port_t virq_to_evtchn[NR_VIRQS];
- spinlock_t virq_lock;
+ rwlock_t virq_lock;
/* Tasklet for continue_hypercall_on_cpu(). */
struct tasklet continue_hypercall_tasklet;
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |