[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] netback: fix shutting down the ring if it contains garbage


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
  • Date: Mon, 18 Feb 2013 08:55:04 +0000
  • Delivery-date: Mon, 18 Feb 2013 08:55:19 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1361176894 -3600
# Node ID ff0befcaac096534fba1c17410367740ed4b1d5f
# Parent  a56b12b91c25fc922ce7eee18fb4576b23e5fdb6
netback: fix shutting down the ring if it contains garbage

Using rtnl_lock() in tasklet context is not permitted.

This undoes the part of 1219:5108c6901b30 that split off
xenvif_carrier_off() from netif_disconnect().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r a56b12b91c25 -r ff0befcaac09 drivers/xen/netback/common.h
--- a/drivers/xen/netback/common.h      Fri Feb 15 09:51:33 2013 +0100
+++ b/drivers/xen/netback/common.h      Mon Feb 18 09:41:34 2013 +0100
@@ -78,6 +78,8 @@ typedef struct netif_st {
        u8 can_queue:1; /* can queue packets for receiver? */
        u8 copying_receiver:1;  /* copy packets to receiver?       */
 
+       u8 busted:1;
+
        /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
        RING_IDX rx_req_cons_peek;
 
@@ -195,7 +197,8 @@ int netif_map(struct backend_info *be, g
 void netif_xenbus_init(void);
 
 #define netif_schedulable(netif)                               \
-       (netif_running((netif)->dev) && netback_carrier_ok(netif))
+       (likely(!(netif)->busted) &&                            \
+        netif_running((netif)->dev) && netback_carrier_ok(netif))
 
 void netif_schedule_work(netif_t *netif);
 void netif_deschedule_work(netif_t *netif);
@@ -204,9 +207,6 @@ int netif_be_start_xmit(struct sk_buff *
 struct net_device_stats *netif_be_get_stats(struct net_device *dev);
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 
-/* Prevent the device from generating any further traffic. */
-void xenvif_carrier_off(netif_t *netif);
-
 static inline int netbk_can_queue(struct net_device *dev)
 {
        netif_t *netif = netdev_priv(dev);
diff -r a56b12b91c25 -r ff0befcaac09 drivers/xen/netback/interface.c
--- a/drivers/xen/netback/interface.c   Fri Feb 15 09:51:33 2013 +0100
+++ b/drivers/xen/netback/interface.c   Mon Feb 18 09:41:34 2013 +0100
@@ -56,6 +56,10 @@ module_param_named(queue_length, netbk_q
 
 static void __netif_up(netif_t *netif)
 {
+       if (netif->busted) {
+               netif->busted = 0;
+               enable_irq(netif->irq);
+       }
        enable_irq(netif->irq);
        netif_schedule_work(netif);
 }
@@ -347,23 +351,19 @@ err_rx:
        return err;
 }
 
-void xenvif_carrier_off(netif_t *netif)
-{
-       rtnl_lock();
-       netback_carrier_off(netif);
-       netif_carrier_off(netif->dev); /* discard queued packets */
-       if (netif_running(netif->dev))
-               __netif_down(netif);
-       rtnl_unlock();
-       netif_put(netif);
-}
-
 void netif_disconnect(struct backend_info *be)
 {
        netif_t *netif = be->netif;
 
-       if (netback_carrier_ok(netif))
-               xenvif_carrier_off(netif);
+       if (netback_carrier_ok(netif)) {
+               rtnl_lock();
+               netback_carrier_off(netif);
+               netif_carrier_off(netif->dev); /* discard queued packets */
+               if (netif_running(netif->dev))
+                       __netif_down(netif);
+               rtnl_unlock();
+               netif_put(netif);
+       }
 
        atomic_dec(&netif->refcnt);
        wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
diff -r a56b12b91c25 -r ff0befcaac09 drivers/xen/netback/netback.c
--- a/drivers/xen/netback/netback.c     Fri Feb 15 09:51:33 2013 +0100
+++ b/drivers/xen/netback/netback.c     Mon Feb 18 09:41:34 2013 +0100
@@ -845,7 +845,7 @@ void netif_schedule_work(netif_t *netif)
        RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
 #endif
 
-       if (more_to_do) {
+       if (more_to_do && likely(!netif->busted)) {
                add_to_net_schedule_list_tail(netif);
                maybe_schedule_tx_action();
        }
@@ -1024,7 +1024,9 @@ static void netbk_fatal_tx_err(netif_t *
 {
        printk(KERN_ERR "%s: fatal error; disabling device\n",
               netif->dev->name);
-       xenvif_carrier_off(netif);
+       netif->busted = 1;
+       disable_irq(netif->irq);
+       netif_deschedule_work(netif);
        netif_put(netif);
 }
 
@@ -1292,6 +1294,11 @@ static void net_tx_action(unsigned long 
                if (!netif)
                        continue;
 
+               if (unlikely(netif->busted)) {
+                       netif_put(netif);
+                       continue;
+               }
+
                if (netif->tx.sring->req_prod - netif->tx.req_cons >
                    NET_TX_RING_SIZE) {
                        printk(KERN_ERR "%s: Impossible number of requests. "

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.