[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [qemu-xen staging-4.16] rcu: Introduce force_rcu notifier
commit fceaefb43f059428cbedd81810c57b460cc80a79 Author: Greg Kurz <groug@xxxxxxxx> AuthorDate: Tue Nov 9 19:35:22 2021 +0100 Commit: Michael Roth <michael.roth@xxxxxxx> CommitDate: Tue Dec 14 14:25:55 2021 -0600 rcu: Introduce force_rcu notifier The drain_rcu_call() function can be blocked as long as an RCU reader stays in a read-side critical section. This is typically what happens when a TCG vCPU is executing a busy loop. It can deadlock the QEMU monitor as reported in https://gitlab.com/qemu-project/qemu/-/issues/650 . This can be avoided by allowing drain_rcu_call() to enforce an RCU grace period. Since each reader might need to do specific actions to end a read-side critical section, do it with notifiers. Prepare ground for this by adding a notifier list to the RCU reader struct and use it in wait_for_readers() if drain_rcu_call() is in progress. An API is added for readers to register their notifiers. This is largely based on a draft from Paolo Bonzini. Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Greg Kurz <groug@xxxxxxxx> Reviewed-by: Richard Henderson <richard.henderson@xxxxxxxxxx> Message-Id: <20211109183523.47726-2-groug@xxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> (cherry picked from commit ef149763a8fcce70b85dfda27cc1222ecf765750) Signed-off-by: Michael Roth <michael.roth@xxxxxxx> --- include/qemu/rcu.h | 15 +++++++++++++++ util/rcu.c | 19 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index 515d327cf1..e69efbd47f 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -27,6 +27,7 @@ #include "qemu/thread.h" #include "qemu/queue.h" #include "qemu/atomic.h" +#include "qemu/notify.h" #include "qemu/sys_membarrier.h" #ifdef __cplusplus @@ -66,6 +67,13 @@ struct rcu_reader_data { /* Data used for registry, protected by rcu_registry_lock */ QLIST_ENTRY(rcu_reader_data) node; + + /* + * NotifierList used to force an RCU grace period. Accessed under + * rcu_registry_lock. Note that the notifier is called _outside_ + * the thread! + */ + NotifierList force_rcu; }; extern __thread struct rcu_reader_data rcu_reader; @@ -180,6 +188,13 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock) #define RCU_READ_LOCK_GUARD() \ g_autoptr(RCUReadAuto) _rcu_read_auto __attribute__((unused)) = rcu_read_auto_lock() +/* + * Force-RCU notifiers tell readers that they should exit their + * read-side critical section. + */ +void rcu_add_force_rcu_notifier(Notifier *n); +void rcu_remove_force_rcu_notifier(Notifier *n); + #ifdef __cplusplus } #endif diff --git a/util/rcu.c b/util/rcu.c index 13ac0f75cb..c91da9f137 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -46,6 +46,7 @@ unsigned long rcu_gp_ctr = RCU_GP_LOCKED; QemuEvent rcu_gp_event; +static int in_drain_call_rcu; static QemuMutex rcu_registry_lock; static QemuMutex rcu_sync_lock; @@ -107,6 +108,8 @@ static void wait_for_readers(void) * get some extra futex wakeups. */ qatomic_set(&index->waiting, false); + } else if (qatomic_read(&in_drain_call_rcu)) { + notifier_list_notify(&index->force_rcu, NULL); } } @@ -339,8 +342,10 @@ void drain_call_rcu(void) * assumed. */ + qatomic_inc(&in_drain_call_rcu); call_rcu1(&rcu_drain.rcu, drain_rcu_callback); qemu_event_wait(&rcu_drain.drain_complete_event); + qatomic_dec(&in_drain_call_rcu); if (locked) { qemu_mutex_lock_iothread(); @@ -363,6 +368,20 @@ void rcu_unregister_thread(void) qemu_mutex_unlock(&rcu_registry_lock); } +void rcu_add_force_rcu_notifier(Notifier *n) +{ + qemu_mutex_lock(&rcu_registry_lock); + notifier_list_add(&rcu_reader.force_rcu, n); + qemu_mutex_unlock(&rcu_registry_lock); +} + +void rcu_remove_force_rcu_notifier(Notifier *n) +{ + qemu_mutex_lock(&rcu_registry_lock); + notifier_remove(n); + qemu_mutex_unlock(&rcu_registry_lock); +} + static void rcu_init_complete(void) { QemuThread thread; -- generated by git-patchbot for /home/xen/git/qemu-xen.git#staging-4.16
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |