[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 3/6] xen: add process_pending_softirqs_norcu() for keyhandlers



Some keyhandlers are calling process_pending_softirqs() while holding
a rcu_read_lock(). This is wrong, as process_pending_softirqs() might
activate rcu calls which should not happen inside a rcu_read_lock().

For that purpose add process_pending_softirqs_norcu() which will not
do any rcu activity and use this for keyhandlers.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V3:
- add RCU_SOFTIRQ to ignore in process_pending_softirqs_norcu()
  (Roger Pau Monné)
---
 xen/arch/x86/mm/p2m-ept.c                   |  2 +-
 xen/arch/x86/numa.c                         |  4 ++--
 xen/common/keyhandler.c                     |  6 +++---
 xen/common/softirq.c                        | 17 +++++++++++++----
 xen/drivers/passthrough/amd/pci_amd_iommu.c |  2 +-
 xen/drivers/passthrough/vtd/iommu.c         |  2 +-
 xen/drivers/vpci/msi.c                      |  4 ++--
 xen/include/xen/softirq.h                   |  2 ++
 8 files changed, 25 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index eb0f0edfef..f6e813e061 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1344,7 +1344,7 @@ static void ept_dump_p2m_table(unsigned char key)
                            c ?: ept_entry->ipat ? '!' : ' ');
 
                 if ( !(record_counter++ % 100) )
-                    process_pending_softirqs();
+                    process_pending_softirqs_norcu();
             }
             unmap_domain_page(table);
         }
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index f1066c59c7..cf6fcc9966 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -418,7 +418,7 @@ static void dump_numa(unsigned char key)
     printk("Memory location of each domain:\n");
     for_each_domain ( d )
     {
-        process_pending_softirqs();
+        process_pending_softirqs_norcu();
 
         printk("Domain %u (total: %u):\n", d->domain_id, domain_tot_pages(d));
 
@@ -462,7 +462,7 @@ static void dump_numa(unsigned char key)
             for ( j = 0; j < d->max_vcpus; j++ )
             {
                 if ( !(j & 0x3f) )
-                    process_pending_softirqs();
+                    process_pending_softirqs_norcu();
 
                 if ( vnuma->vcpu_to_vnode[j] == i )
                 {
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index 87bd145374..0d32bc4e2a 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -263,7 +263,7 @@ static void dump_domains(unsigned char key)
     {
         unsigned int i;
 
-        process_pending_softirqs();
+        process_pending_softirqs_norcu();
 
         printk("General information for domain %u:\n", d->domain_id);
         printk("    refcnt=%d dying=%d pause_count=%d\n",
@@ -307,7 +307,7 @@ static void dump_domains(unsigned char key)
             for_each_sched_unit_vcpu ( unit, v )
             {
                 if ( !(v->vcpu_id & 0x3f) )
-                    process_pending_softirqs();
+                    process_pending_softirqs_norcu();
 
                 printk("    VCPU%d: CPU%d [has=%c] poll=%d "
                        "upcall_pend=%02x upcall_mask=%02x ",
@@ -337,7 +337,7 @@ static void dump_domains(unsigned char key)
         for_each_vcpu ( d, v )
         {
             if ( !(v->vcpu_id & 0x3f) )
-                process_pending_softirqs();
+                process_pending_softirqs_norcu();
 
             printk("Notifying guest %d:%d (virq %d, port %d)\n",
                    d->domain_id, v->vcpu_id,
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index b83ad96d6c..30beb27ae9 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -25,7 +25,7 @@ static softirq_handler softirq_handlers[NR_SOFTIRQS];
 static DEFINE_PER_CPU(cpumask_t, batch_mask);
 static DEFINE_PER_CPU(unsigned int, batching);
 
-static void __do_softirq(unsigned long ignore_mask)
+static void __do_softirq(unsigned long ignore_mask, bool rcu_allowed)
 {
     unsigned int i, cpu;
     unsigned long pending;
@@ -38,7 +38,7 @@ static void __do_softirq(unsigned long ignore_mask)
          */
         cpu = smp_processor_id();
 
-        if ( rcu_pending(cpu) )
+        if ( rcu_allowed && rcu_pending(cpu) )
             rcu_check_callbacks(cpu);
 
         if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0)
@@ -55,13 +55,22 @@ void process_pending_softirqs(void)
 {
     ASSERT(!in_irq() && local_irq_is_enabled());
     /* Do not enter scheduler as it can preempt the calling context. */
-    __do_softirq((1ul << SCHEDULE_SOFTIRQ) | (1ul << SCHED_SLAVE_SOFTIRQ));
+    __do_softirq((1ul << SCHEDULE_SOFTIRQ) | (1ul << SCHED_SLAVE_SOFTIRQ),
+                 true);
+}
+
+void process_pending_softirqs_norcu(void)
+{
+    ASSERT(!in_irq() && local_irq_is_enabled());
+    /* Do not enter scheduler as it can preempt the calling context. */
+    __do_softirq((1ul << SCHEDULE_SOFTIRQ) | (1ul << SCHED_SLAVE_SOFTIRQ) |
+                 (1ul << RCU_SOFTIRQ), false);
 }
 
 void do_softirq(void)
 {
     ASSERT_NOT_IN_ATOMIC();
-    __do_softirq(0);
+    __do_softirq(0, true);
 }
 
 void open_softirq(int nr, softirq_handler handler)
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 3112653960..880d64c748 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -587,7 +587,7 @@ static void amd_dump_p2m_table_level(struct page_info* pg, 
int level,
         struct amd_iommu_pte *pde = &table_vaddr[index];
 
         if ( !(index % 2) )
-            process_pending_softirqs();
+            process_pending_softirqs_norcu();
 
         if ( !pde->pr )
             continue;
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 3d60976dd5..c7bd8d4ada 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2646,7 +2646,7 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, 
int level, paddr_t gpa,
     for ( i = 0; i < PTE_NUM; i++ )
     {
         if ( !(i % 2) )
-            process_pending_softirqs();
+            process_pending_softirqs_norcu();
 
         pte = &pt_vaddr[i];
         if ( !dma_pte_present(*pte) )
diff --git a/xen/drivers/vpci/msi.c b/xen/drivers/vpci/msi.c
index 75010762ed..1d337604cc 100644
--- a/xen/drivers/vpci/msi.c
+++ b/xen/drivers/vpci/msi.c
@@ -321,13 +321,13 @@ void vpci_dump_msi(void)
                      * holding the lock.
                      */
                     printk("unable to print all MSI-X entries: %d\n", rc);
-                    process_pending_softirqs();
+                    process_pending_softirqs_norcu();
                     continue;
                 }
             }
 
             spin_unlock(&pdev->vpci->lock);
-            process_pending_softirqs();
+            process_pending_softirqs_norcu();
         }
     }
     rcu_read_unlock(&domlist_read_lock);
diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h
index b4724f5c8b..b5bf3b83b1 100644
--- a/xen/include/xen/softirq.h
+++ b/xen/include/xen/softirq.h
@@ -37,7 +37,9 @@ void cpu_raise_softirq_batch_finish(void);
  * Process pending softirqs on this CPU. This should be called periodically
  * when performing work that prevents softirqs from running in a timely manner.
  * Use this instead of do_softirq() when you do not want to be preempted.
+ * The norcu variant is to be used while holding a read_rcu_lock().
  */
 void process_pending_softirqs(void);
+void process_pending_softirqs_norcu(void);
 
 #endif /* __XEN_SOFTIRQ_H__ */
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.