[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/5] xen/livepatch: register livepatch regions when loaded



Currently livepatch regions are registered as virtual regions only after the
livepatch has been applied.

This can lead to issues when using the pre-apply or post-revert hooks, as at
the point the livepatch is not in the virtual regions list.  If a livepatch
pre-apply hook contains a WARN() it would trigger an hypervisor crash, as the
code to handle the bug frame won't be able to find the instruction pointer that
triggered the #UD in any of the registered virtual regions, and hence crash.

Fix this by adding the livepatch payloads as virtual regions as soon as loaded,
and only remove them once the payload is unloaded.  This requires some changes
to the virtual regions code, as the removal of the virtual regions is no longer
done in stop machine context, and hence an RCU barrier is added in order to
make sure there are no users of the virtual region after it's been removed from
the list.

Fixes: 8313c864fa95 ('livepatch: Implement pre-|post- apply|revert hooks')
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/common/livepatch.c      |  5 +++--
 xen/common/virtual_region.c | 40 +++++++++++--------------------------
 2 files changed, 15 insertions(+), 30 deletions(-)

diff --git a/xen/common/livepatch.c b/xen/common/livepatch.c
index 1209fea2566c..3199432f11f5 100644
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -942,6 +942,8 @@ static int prepare_payload(struct payload *payload,
         }
     }
 
+    register_virtual_region(region);
+
     return 0;
 }
 
@@ -1071,6 +1073,7 @@ static int build_symbol_table(struct payload *payload,
 static void free_payload(struct payload *data)
 {
     ASSERT(spin_is_locked(&payload_lock));
+    unregister_virtual_region(&data->region);
     list_del(&data->list);
     payload_cnt--;
     payload_version++;
@@ -1386,7 +1389,6 @@ static inline void apply_payload_tail(struct payload 
*data)
      * The applied_list is iterated by the trap code.
      */
     list_add_tail_rcu(&data->applied_list, &applied_list);
-    register_virtual_region(&data->region);
 
     data->state = LIVEPATCH_STATE_APPLIED;
 }
@@ -1432,7 +1434,6 @@ static inline void revert_payload_tail(struct payload 
*data)
      * The applied_list is iterated by the trap code.
      */
     list_del_rcu(&data->applied_list);
-    unregister_virtual_region(&data->region);
 
     data->reverted = true;
     data->state = LIVEPATCH_STATE_CHECKED;
diff --git a/xen/common/virtual_region.c b/xen/common/virtual_region.c
index 5f89703f513b..b444253848cf 100644
--- a/xen/common/virtual_region.c
+++ b/xen/common/virtual_region.c
@@ -23,14 +23,8 @@ static struct virtual_region core_init __initdata = {
 };
 
 /*
- * RCU locking. Additions are done either at startup (when there is only
- * one CPU) or when all CPUs are running without IRQs.
- *
- * Deletions are bit tricky. We do it when Live Patch (all CPUs running
- * without IRQs) or during bootup (when clearing the init).
- *
- * Hence we use list_del_rcu (which sports an memory fence) and a spinlock
- * on deletion.
+ * RCU locking. Modifications to the list must be done in exclusive mode, and
+ * hence need to hold the spinlock.
  *
  * All readers of virtual_region_list MUST use list_for_each_entry_rcu.
  */
@@ -58,38 +52,28 @@ const struct virtual_region *find_text_region(unsigned long 
addr)
 
 void register_virtual_region(struct virtual_region *r)
 {
-    ASSERT(!local_irq_is_enabled());
+    unsigned long flags;
 
+    spin_lock_irqsave(&virtual_region_lock, flags);
     list_add_tail_rcu(&r->list, &virtual_region_list);
+    spin_unlock_irqrestore(&virtual_region_lock, flags);
 }
 
 static void remove_virtual_region(struct virtual_region *r)
 {
-    unsigned long flags;
+     unsigned long flags;
 
-    spin_lock_irqsave(&virtual_region_lock, flags);
-    list_del_rcu(&r->list);
-    spin_unlock_irqrestore(&virtual_region_lock, flags);
-    /*
-     * We do not need to invoke call_rcu.
-     *
-     * This is due to the fact that on the deletion we have made sure
-     * to use spinlocks (to guard against somebody else calling
-     * unregister_virtual_region) and list_deletion spiced with
-     * memory barrier.
-     *
-     * That protects us from corrupting the list as the readers all
-     * use list_for_each_entry_rcu which is safe against concurrent
-     * deletions.
-     */
+     spin_lock_irqsave(&virtual_region_lock, flags);
+     list_del_rcu(&r->list);
+     spin_unlock_irqrestore(&virtual_region_lock, flags);
 }
 
 void unregister_virtual_region(struct virtual_region *r)
 {
-    /* Expected to be called from Live Patch - which has IRQs disabled. */
-    ASSERT(!local_irq_is_enabled());
-
     remove_virtual_region(r);
+
+    /* Assert that no CPU might be using the removed region. */
+    rcu_barrier();
 }
 
 #if defined(CONFIG_LIVEPATCH) && defined(CONFIG_X86)
-- 
2.43.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.