[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC 1/4] xen-netback: Factor queue-specific data into queue struct.



On 16/01/14 10:23, Paul Durrant wrote:
-----Original Message-----
From: Andrew J. Bennieston [mailto:andrew.bennieston@xxxxxxxxxx]
Sent: 15 January 2014 16:23
To: xen-devel@xxxxxxxxxxxxxxxxxxxx
Cc: Ian Campbell; Wei Liu; Paul Durrant; Andrew Bennieston
Subject: [PATCH RFC 1/4] xen-netback: Factor queue-specific data into
queue struct.

From: "Andrew J. Bennieston" <andrew.bennieston@xxxxxxxxxx>

In preparation for multi-queue support in xen-netback, move the
queue-specific data from struct xenvif into struct xenvif_queue, and
update the rest of the code to use this.

Also adds loops over queues where appropriate, even though only one is
configured at this point, and uses alloc_netdev_mq() and the
corresponding multi-queue netif wake/start/stop functions in preparation
for multiple active queues.

Finally, implements a trivial queue selection function suitable for
ndo_select_queue, which simply returns 0 for a single queue and uses
skb_get_rxhash() to compute the queue index otherwise.

Signed-off-by: Andrew J. Bennieston <andrew.bennieston@xxxxxxxxxx>
---
  drivers/net/xen-netback/common.h    |   66 +++--
  drivers/net/xen-netback/interface.c |  308 +++++++++++++--------
  drivers/net/xen-netback/netback.c   |  516 +++++++++++++++++-------------
-----
  drivers/net/xen-netback/xenbus.c    |   89 ++++--
  4 files changed, 556 insertions(+), 423 deletions(-)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-
netback/common.h
index c47794b..54d2eeb 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -108,17 +108,19 @@ struct xenvif_rx_meta {
   */
  #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS *
XEN_NETIF_RX_RING_SIZE)

-struct xenvif {
-       /* Unique identifier for this interface. */
-       domid_t          domid;
-       unsigned int     handle;
+struct xenvif;
+
+struct xenvif_queue { /* Per-queue data for xenvif */
+       unsigned int number; /* Queue number, 0-based */
+       char name[IFNAMSIZ+4]; /* DEVNAME-qN */

I wonder whether it would be neater to #define the name size here...


Absolutely. I'll do this in V2.

+       struct xenvif *vif; /* Parent VIF */

        /* Use NAPI for guest TX */
        struct napi_struct napi;
        /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
        unsigned int tx_irq;
        /* Only used when feature-split-event-channels = 1 */
-       char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+       char tx_irq_name[IFNAMSIZ+7]; /* DEVNAME-qN-tx */

...and the IRQ name size here. It's kind of ugly to have + some_magic_value in 
array definitions.


As above.

        struct xen_netif_tx_back_ring tx;
        struct sk_buff_head tx_queue;
        struct page *mmap_pages[MAX_PENDING_REQS];
@@ -140,7 +142,7 @@ struct xenvif {
        /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
        unsigned int rx_irq;
        /* Only used when feature-split-event-channels = 1 */
-       char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
+       char rx_irq_name[IFNAMSIZ+7]; /* DEVNAME-qN-rx */
        struct xen_netif_rx_back_ring rx;
        struct sk_buff_head rx_queue;

@@ -150,14 +152,27 @@ struct xenvif {
         */
        RING_IDX rx_req_cons_peek;

-       /* This array is allocated seperately as it is large */
-       struct gnttab_copy *grant_copy_op;
+       struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];

I see you brought this back in line, which is reasonable as the queue is now a 
separately allocated struct.


Indeed; trying to keep the number of separate allocs/frees to a minimum,
for everybody's sanity!


        /* We create one meta structure per ring request we consume, so
         * the maximum number is the same as the ring size.
         */
        struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];

+       /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+       unsigned long   credit_bytes;
+       unsigned long   credit_usec;
+       unsigned long   remaining_credit;
+       struct timer_list credit_timeout;
+       u64 credit_window_start;
+
+};
+
+struct xenvif {
+       /* Unique identifier for this interface. */
+       domid_t          domid;
+       unsigned int     handle;
+
        u8               fe_dev_addr[6];

        /* Frontend feature information. */
@@ -171,12 +186,9 @@ struct xenvif {
        /* Internal feature information. */
        u8 can_queue:1;     /* can queue packets for receiver? */

-       /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-       unsigned long   credit_bytes;
-       unsigned long   credit_usec;
-       unsigned long   remaining_credit;
-       struct timer_list credit_timeout;
-       u64 credit_window_start;
+       /* Queues */
+       unsigned int num_queues;
+       struct xenvif_queue *queues;

        /* Statistics */

Do stats need to be per-queue (and then possibly aggregated at query time)?


Aside from the potential to see the stats for each queue, which may be
useful in some limited circumstances for performance testing or
debugging, I don't see what this buys us...

        unsigned long rx_gso_checksum_fixup;
@@ -194,7 +206,9 @@ struct xenvif *xenvif_alloc(struct device *parent,
                            domid_t domid,
                            unsigned int handle);

-int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+void xenvif_init_queue(struct xenvif_queue *queue);
+
+int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
                   unsigned long rx_ring_ref, unsigned int tx_evtchn,
                   unsigned int rx_evtchn);
  void xenvif_disconnect(struct xenvif *vif);
@@ -205,23 +219,23 @@ void xenvif_xenbus_fini(void);

  int xenvif_schedulable(struct xenvif *vif);

-int xenvif_rx_ring_full(struct xenvif *vif);
+int xenvif_rx_ring_full(struct xenvif_queue *queue);

-int xenvif_must_stop_queue(struct xenvif *vif);
+int xenvif_must_stop_queue(struct xenvif_queue *queue);

  /* (Un)Map communication rings. */
-void xenvif_unmap_frontend_rings(struct xenvif *vif);
-int xenvif_map_frontend_rings(struct xenvif *vif,
+void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
+int xenvif_map_frontend_rings(struct xenvif_queue *queue,
                              grant_ref_t tx_ring_ref,
                              grant_ref_t rx_ring_ref);

  /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_check_rx_xenvif(struct xenvif_queue *queue);

  /* Queue an SKB for transmission to the frontend */
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_queue_tx_skb(struct xenvif_queue *queue, struct sk_buff
*skb);
  /* Notify xenvif that ring now has space to send an skb to the frontend */
-void xenvif_notify_tx_completion(struct xenvif *vif);
+void xenvif_notify_tx_completion(struct xenvif_queue *queue);

  /* Prevent the device from generating any further traffic. */
  void xenvif_carrier_off(struct xenvif *vif);
@@ -229,11 +243,15 @@ void xenvif_carrier_off(struct xenvif *vif);
  /* Returns number of ring slots required to send an skb to the frontend */
  unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);

-int xenvif_tx_action(struct xenvif *vif, int budget);
-void xenvif_rx_action(struct xenvif *vif);
+int xenvif_tx_action(struct xenvif_queue *queue, int budget);
+void xenvif_rx_action(struct xenvif_queue *queue);

  int xenvif_kthread(void *data);

+int xenvif_poll(struct napi_struct *napi, int budget);
+
+void xenvif_carrier_on(struct xenvif *vif);
+
  extern bool separate_tx_rx_irq;

  #endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-
netback/interface.c
index fff8cdd..0113324 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,7 +34,6 @@
  #include <linux/ethtool.h>
  #include <linux/rtnetlink.h>
  #include <linux/if_vlan.h>
-#include <linux/vmalloc.h>

  #include <xen/events.h>
  #include <asm/xen/hypercall.h>
@@ -42,32 +41,50 @@
  #define XENVIF_QUEUE_LENGTH 32
  #define XENVIF_NAPI_WEIGHT  64

+static inline void xenvif_wake_queue(struct xenvif_queue *queue)
+{
+       netif_tx_wake_queue(
+                       netdev_get_tx_queue(queue->vif->dev, queue-
number));

Might be neater to declare some stack variables for dev and number to avoid the 
long line.

+}
+
+static inline void xenvif_stop_queue(struct xenvif_queue *queue)
+{
+       netif_tx_stop_queue(
+                       netdev_get_tx_queue(queue->vif->dev, queue-
number));

Ditto.

+}
+
+static inline int xenvif_queue_stopped(struct xenvif_queue *queue)
+{
+       return netif_tx_queue_stopped(
+                       netdev_get_tx_queue(queue->vif->dev, queue-
number));

Ditto.

+}
+
  int xenvif_schedulable(struct xenvif *vif)
  {
        return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
  }

-static int xenvif_rx_schedulable(struct xenvif *vif)
+static int xenvif_rx_schedulable(struct xenvif_queue *queue)
  {
-       return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
+       return xenvif_schedulable(queue->vif) &&
!xenvif_rx_ring_full(queue);

I guess your patches have not been re-based onto net-next? xenvif_ring_full() 
and xenvif_rx_schedulable() went away in c/s 
ca2f09f2b2c6c25047cfc545d057c4edfcfe561c (xen-netback: improve 
guest-receive-side flow control).

Can you rebase? Eventual patches will need to go into net-next.

They haven't yet; I wanted to get some comments on these, but I will
definitely rebase onto net-next in the near future.


   Paul

  }

  static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
  {
-       struct xenvif *vif = dev_id;
+       struct xenvif_queue *queue = dev_id;

-       if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
-               napi_schedule(&vif->napi);
+       if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
+               napi_schedule(&queue->napi);

        return IRQ_HANDLED;
  }

-static int xenvif_poll(struct napi_struct *napi, int budget)
+int xenvif_poll(struct napi_struct *napi, int budget)
  {
-       struct xenvif *vif = container_of(napi, struct xenvif, napi);
+       struct xenvif_queue *queue = container_of(napi, struct
xenvif_queue, napi);
        int work_done;

-       work_done = xenvif_tx_action(vif, budget);
+       work_done = xenvif_tx_action(queue, budget);

        if (work_done < budget) {
                int more_to_do = 0;
@@ -91,7 +108,7 @@ static int xenvif_poll(struct napi_struct *napi, int
budget)

                local_irq_save(flags);

-               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx,
more_to_do);
+               RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx,
more_to_do);
                if (!more_to_do)
                        __napi_complete(napi);

@@ -103,10 +120,10 @@ static int xenvif_poll(struct napi_struct *napi, int
budget)

  static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
  {
-       struct xenvif *vif = dev_id;
+       struct xenvif_queue *queue = dev_id;

-       if (xenvif_rx_schedulable(vif))
-               netif_wake_queue(vif->dev);
+       if (xenvif_rx_schedulable(queue))
+               xenvif_wake_queue(queue);

        return IRQ_HANDLED;
  }
@@ -119,27 +136,56 @@ static irqreturn_t xenvif_interrupt(int irq, void
*dev_id)
        return IRQ_HANDLED;
  }

+static u16 select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+       struct xenvif *vif = netdev_priv(dev);
+       u32 hash;
+       u16 queue_index;
+
+       /* First, check if there is only one queue */
+       if (vif->num_queues == 1) {
+               queue_index = 0;
+       }

Style.

+       else {
+               /* Use skb_get_rxhash to obtain an L4 hash if available */
+               hash = skb_get_rxhash(skb);
+               queue_index = (u16) (((u64)hash * vif->num_queues) >>
32);
+       }
+
+       return queue_index;
+}
+
  static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
  {
        struct xenvif *vif = netdev_priv(dev);
+       u16 queue_index = 0;
+       struct xenvif_queue *queue = NULL;

        BUG_ON(skb->dev != dev);

-       /* Drop the packet if vif is not ready */
-       if (vif->task == NULL)
+       /* Drop the packet if the queues are not set up */
+       if (vif->num_queues < 1 || vif->queues == NULL)
+               goto drop;

Just do the former test and ASSERT the second.
+
+       /* Obtain the queue to be used to transmit this packet */
+       queue_index = skb_get_queue_mapping(skb);

Personally, I'd stick a range check here.


OK.

Andrew

+       queue = &vif->queues[queue_index];
+
+       /* Drop the packet if queue is not ready */
+       if (queue->task == NULL)
                goto drop;

        /* Drop the packet if the target domain has no receive buffers. */
-       if (!xenvif_rx_schedulable(vif))
+       if (!xenvif_rx_schedulable(queue))
                goto drop;

        /* Reserve ring slots for the worst-case number of fragments. */
-       vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
+       queue->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);

-       if (vif->can_queue && xenvif_must_stop_queue(vif))
-               netif_stop_queue(dev);
+       if (vif->can_queue && xenvif_must_stop_queue(queue))
+               xenvif_stop_queue(queue);

-       xenvif_queue_tx_skb(vif, skb);
+       xenvif_queue_tx_skb(queue, skb);

        return NETDEV_TX_OK;

@@ -149,10 +195,10 @@ static int xenvif_start_xmit(struct sk_buff *skb,
struct net_device *dev)
        return NETDEV_TX_OK;
  }

-void xenvif_notify_tx_completion(struct xenvif *vif)
+void xenvif_notify_tx_completion(struct xenvif_queue *queue)
  {
-       if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
-               netif_wake_queue(vif->dev);
+       if (xenvif_queue_stopped(queue) &&
xenvif_rx_schedulable(queue))
+               xenvif_wake_queue(queue);
  }

  static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
@@ -163,20 +209,30 @@ static struct net_device_stats
*xenvif_get_stats(struct net_device *dev)

  static void xenvif_up(struct xenvif *vif)
  {
-       napi_enable(&vif->napi);
-       enable_irq(vif->tx_irq);
-       if (vif->tx_irq != vif->rx_irq)
-               enable_irq(vif->rx_irq);
-       xenvif_check_rx_xenvif(vif);
+       struct xenvif_queue *queue = NULL;
+       unsigned int queue_index;
+       for (queue_index = 0; queue_index < vif->num_queues;
++queue_index) {
+               queue = &vif->queues[queue_index];
+               napi_enable(&queue->napi);
+               enable_irq(queue->tx_irq);
+               if (queue->tx_irq != queue->rx_irq)
+                       enable_irq(queue->rx_irq);
+               xenvif_check_rx_xenvif(queue);
+       }
  }

  static void xenvif_down(struct xenvif *vif)
  {
-       napi_disable(&vif->napi);
-       disable_irq(vif->tx_irq);
-       if (vif->tx_irq != vif->rx_irq)
-               disable_irq(vif->rx_irq);
-       del_timer_sync(&vif->credit_timeout);
+       struct xenvif_queue *queue = NULL;
+       unsigned int queue_index;
+       for (queue_index = 0; queue_index < vif->num_queues;
++queue_index) {
+               queue = &vif->queues[queue_index];
+               napi_disable(&queue->napi);
+               disable_irq(queue->tx_irq);
+               if (queue->tx_irq != queue->rx_irq)
+                       disable_irq(queue->rx_irq);
+               del_timer_sync(&queue->credit_timeout);
+       }
  }

  static int xenvif_open(struct net_device *dev)
@@ -184,7 +240,7 @@ static int xenvif_open(struct net_device *dev)
        struct xenvif *vif = netdev_priv(dev);
        if (netif_carrier_ok(dev))
                xenvif_up(vif);
-       netif_start_queue(dev);
+       netif_tx_start_all_queues(dev);
        return 0;
  }

@@ -193,7 +249,7 @@ static int xenvif_close(struct net_device *dev)
        struct xenvif *vif = netdev_priv(dev);
        if (netif_carrier_ok(dev))
                xenvif_down(vif);
-       netif_stop_queue(dev);
+       netif_tx_stop_all_queues(dev);
        return 0;
  }

@@ -287,6 +343,7 @@ static const struct net_device_ops
xenvif_netdev_ops = {
        .ndo_fix_features = xenvif_fix_features,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr   = eth_validate_addr,
+       .ndo_select_queue = select_queue,
  };

  struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -296,10 +353,9 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
        struct net_device *dev;
        struct xenvif *vif;
        char name[IFNAMSIZ] = {};
-       int i;

        snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-       dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
+       dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 1);
        if (dev == NULL) {
                pr_warn("Could not allocate netdev for %s\n", name);
                return ERR_PTR(-ENOMEM);
@@ -309,24 +365,15 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,

        vif = netdev_priv(dev);

-       vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
-                                    MAX_GRANT_COPY_OPS);
-       if (vif->grant_copy_op == NULL) {
-               pr_warn("Could not allocate grant copy space for %s\n",
name);
-               free_netdev(dev);
-               return ERR_PTR(-ENOMEM);
-       }
-
        vif->domid  = domid;
        vif->handle = handle;
        vif->can_sg = 1;
        vif->ip_csum = 1;
        vif->dev = dev;

-       vif->credit_bytes = vif->remaining_credit = ~0UL;
-       vif->credit_usec  = 0UL;
-       init_timer(&vif->credit_timeout);
-       vif->credit_window_start = get_jiffies_64();
+       /* Start out with no queues */
+       vif->num_queues = 0;
+       vif->queues = NULL;

        dev->netdev_ops      = &xenvif_netdev_ops;
        dev->hw_features = NETIF_F_SG |
@@ -337,16 +384,6 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,

        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

-       skb_queue_head_init(&vif->rx_queue);
-       skb_queue_head_init(&vif->tx_queue);
-
-       vif->pending_cons = 0;
-       vif->pending_prod = MAX_PENDING_REQS;
-       for (i = 0; i < MAX_PENDING_REQS; i++)
-               vif->pending_ring[i] = i;
-       for (i = 0; i < MAX_PENDING_REQS; i++)
-               vif->mmap_pages[i] = NULL;
-
        /*
         * Initialise a dummy MAC address. We choose the numerically
         * largest non-broadcast address to prevent the address getting
@@ -356,8 +393,6 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
        memset(dev->dev_addr, 0xFF, ETH_ALEN);
        dev->dev_addr[0] &= ~0x01;

-       netif_napi_add(dev, &vif->napi, xenvif_poll,
XENVIF_NAPI_WEIGHT);
-
        netif_carrier_off(dev);

        err = register_netdev(dev);
@@ -374,84 +409,110 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
        return vif;
  }

-int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+void xenvif_init_queue(struct xenvif_queue *queue)
+{
+       int i;
+
+       queue->credit_bytes = queue->remaining_credit = ~0UL;
+       queue->credit_usec  = 0UL;
+       init_timer(&queue->credit_timeout);
+       queue->credit_window_start = get_jiffies_64();
+
+       skb_queue_head_init(&queue->rx_queue);
+       skb_queue_head_init(&queue->tx_queue);
+
+       queue->pending_cons = 0;
+       queue->pending_prod = MAX_PENDING_REQS;
+       for (i = 0; i < MAX_PENDING_REQS; ++i) {
+               queue->pending_ring[i] = i;
+               queue->mmap_pages[i] = NULL;
+       }
+
+       netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
+                       XENVIF_NAPI_WEIGHT);
+}
+
+void xenvif_carrier_on(struct xenvif *vif)
+{
+       rtnl_lock();
+       if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
+               dev_set_mtu(vif->dev, ETH_DATA_LEN);
+       netdev_update_features(vif->dev);
+       netif_carrier_on(vif->dev);
+       if (netif_running(vif->dev))
+               xenvif_up(vif);
+       rtnl_unlock();
+}
+
+int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
                   unsigned long rx_ring_ref, unsigned int tx_evtchn,
                   unsigned int rx_evtchn)
  {
        struct task_struct *task;
        int err = -ENOMEM;

-       BUG_ON(vif->tx_irq);
-       BUG_ON(vif->task);
+       BUG_ON(queue->tx_irq);
+       BUG_ON(queue->task);

-       err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+       err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
        if (err < 0)
                goto err;

        if (tx_evtchn == rx_evtchn) {
                /* feature-split-event-channels == 0 */
                err = bind_interdomain_evtchn_to_irqhandler(
-                       vif->domid, tx_evtchn, xenvif_interrupt, 0,
-                       vif->dev->name, vif);
+                       queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
+                       queue->name, queue);
                if (err < 0)
                        goto err_unmap;
-               vif->tx_irq = vif->rx_irq = err;
-               disable_irq(vif->tx_irq);
+               queue->tx_irq = queue->rx_irq = err;
+               disable_irq(queue->tx_irq);
        } else {
                /* feature-split-event-channels == 1 */
-               snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
-                        "%s-tx", vif->dev->name);
+               snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+                        "%s-tx", queue->name);
                err = bind_interdomain_evtchn_to_irqhandler(
-                       vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
-                       vif->tx_irq_name, vif);
+                       queue->vif->domid, tx_evtchn, xenvif_tx_interrupt,
0,
+                       queue->tx_irq_name, queue);
                if (err < 0)
                        goto err_unmap;
-               vif->tx_irq = err;
-               disable_irq(vif->tx_irq);
+               queue->tx_irq = err;
+               disable_irq(queue->tx_irq);

-               snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
-                        "%s-rx", vif->dev->name);
+               snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+                        "%s-rx", queue->name);
                err = bind_interdomain_evtchn_to_irqhandler(
-                       vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
-                       vif->rx_irq_name, vif);
+                       queue->vif->domid, rx_evtchn, xenvif_rx_interrupt,
0,
+                       queue->rx_irq_name, queue);
                if (err < 0)
                        goto err_tx_unbind;
-               vif->rx_irq = err;
-               disable_irq(vif->rx_irq);
+               queue->rx_irq = err;
+               disable_irq(queue->rx_irq);
        }

-       init_waitqueue_head(&vif->wq);
+       init_waitqueue_head(&queue->wq);
        task = kthread_create(xenvif_kthread,
-                             (void *)vif, "%s", vif->dev->name);
+                             (void *)queue, "%s", queue->name);
        if (IS_ERR(task)) {
-               pr_warn("Could not allocate kthread for %s\n", vif->dev-
name);
+               pr_warn("Could not allocate kthread for %s\n", queue-
name);
                err = PTR_ERR(task);
                goto err_rx_unbind;
        }

-       vif->task = task;
-
-       rtnl_lock();
-       if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
-               dev_set_mtu(vif->dev, ETH_DATA_LEN);
-       netdev_update_features(vif->dev);
-       netif_carrier_on(vif->dev);
-       if (netif_running(vif->dev))
-               xenvif_up(vif);
-       rtnl_unlock();
+       queue->task = task;

-       wake_up_process(vif->task);
+       wake_up_process(queue->task);

        return 0;

  err_rx_unbind:
-       unbind_from_irqhandler(vif->rx_irq, vif);
-       vif->rx_irq = 0;
+       unbind_from_irqhandler(queue->rx_irq, queue);
+       queue->rx_irq = 0;
  err_tx_unbind:
-       unbind_from_irqhandler(vif->tx_irq, vif);
-       vif->tx_irq = 0;
+       unbind_from_irqhandler(queue->tx_irq, queue);
+       queue->tx_irq = 0;
  err_unmap:
-       xenvif_unmap_frontend_rings(vif);
+       xenvif_unmap_frontend_rings(queue);
  err:
        module_put(THIS_MODULE);
        return err;
@@ -470,34 +531,51 @@ void xenvif_carrier_off(struct xenvif *vif)

  void xenvif_disconnect(struct xenvif *vif)
  {
+       struct xenvif_queue *queue = NULL;
+       unsigned int queue_index;
+
        if (netif_carrier_ok(vif->dev))
                xenvif_carrier_off(vif);

-       if (vif->task) {
-               kthread_stop(vif->task);
-               vif->task = NULL;
-       }
+       for (queue_index = 0; queue_index < vif->num_queues;
++queue_index) {
+               queue = &vif->queues[queue_index];

-       if (vif->tx_irq) {
-               if (vif->tx_irq == vif->rx_irq)
-                       unbind_from_irqhandler(vif->tx_irq, vif);
-               else {
-                       unbind_from_irqhandler(vif->tx_irq, vif);
-                       unbind_from_irqhandler(vif->rx_irq, vif);
+               if (queue->task) {
+                       kthread_stop(queue->task);
+                       queue->task = NULL;
                }
-               vif->tx_irq = 0;
+
+               if (queue->tx_irq) {
+                       if (queue->tx_irq == queue->rx_irq)
+                               unbind_from_irqhandler(queue->tx_irq,
queue);
+                       else {
+                               unbind_from_irqhandler(queue->tx_irq,
queue);
+                               unbind_from_irqhandler(queue->rx_irq,
queue);
+                       }
+                       queue->tx_irq = 0;
+               }
+
+               xenvif_unmap_frontend_rings(queue);
        }

-       xenvif_unmap_frontend_rings(vif);
+
  }

  void xenvif_free(struct xenvif *vif)
  {
-       netif_napi_del(&vif->napi);
+       struct xenvif_queue *queue = NULL;
+       unsigned int queue_index;

-       unregister_netdev(vif->dev);
+       for(queue_index = 0; queue_index < vif->num_queues;
++queue_index) {
+               netif_napi_del(&queue->napi);
+       }

-       vfree(vif->grant_copy_op);
+       /* Free the array of queues */
+       vfree(vif->queues);
+       vif->num_queues = 0;
+       vif->queues = 0;
+
+       unregister_netdev(vif->dev);
        free_netdev(vif->dev);

        module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-
netback/netback.c
index 7842555..586e741 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -76,38 +76,38 @@ module_param(fatal_skb_slots, uint, 0444);
   * one or more merged tx requests, otherwise it is the continuation of
   * previous tx request.
   */
-static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
+static inline int pending_tx_is_head(struct xenvif_queue *queue,
RING_IDX idx)
  {
-       return vif->pending_tx_info[idx].head !=
INVALID_PENDING_RING_IDX;
+       return queue->pending_tx_info[idx].head !=
INVALID_PENDING_RING_IDX;
  }

-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+static void xenvif_idx_release(struct xenvif_queue *queue, u16
pending_idx,
                               u8 status);

-static void make_tx_response(struct xenvif *vif,
+static void make_tx_response(struct xenvif_queue *queue,
                             struct xen_netif_tx_request *txp,
                             s8       st);

-static inline int tx_work_todo(struct xenvif *vif);
-static inline int rx_work_todo(struct xenvif *vif);
+static inline int tx_work_todo(struct xenvif_queue *queue);
+static inline int rx_work_todo(struct xenvif_queue *queue);

-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+static struct xen_netif_rx_response *make_rx_response(struct
xenvif_queue *queue,
                                             u16      id,
                                             s8       st,
                                             u16      offset,
                                             u16      size,
                                             u16      flags);

-static inline unsigned long idx_to_pfn(struct xenvif *vif,
+static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
                                       u16 idx)
  {
-       return page_to_pfn(vif->mmap_pages[idx]);
+       return page_to_pfn(queue->mmap_pages[idx]);
  }

-static inline unsigned long idx_to_kaddr(struct xenvif *vif,
+static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
                                         u16 idx)
  {
-       return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
+       return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
  }

  /* This is a miniumum size for the linear area to avoid lots of
@@ -132,10 +132,10 @@ static inline pending_ring_idx_t
pending_index(unsigned i)
        return i & (MAX_PENDING_REQS-1);
  }

-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue
*queue)
  {
        return MAX_PENDING_REQS -
-               vif->pending_prod + vif->pending_cons;
+               queue->pending_prod + queue->pending_cons;
  }

  static int max_required_rx_slots(struct xenvif *vif)
@@ -149,25 +149,25 @@ static int max_required_rx_slots(struct xenvif *vif)
        return max;
  }

-int xenvif_rx_ring_full(struct xenvif *vif)
+int xenvif_rx_ring_full(struct xenvif_queue *queue)
  {
-       RING_IDX peek   = vif->rx_req_cons_peek;
-       RING_IDX needed = max_required_rx_slots(vif);
+       RING_IDX peek   = queue->rx_req_cons_peek;
+       RING_IDX needed = max_required_rx_slots(queue->vif);

-       return ((vif->rx.sring->req_prod - peek) < needed) ||
-              ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) <
needed);
+       return ((queue->rx.sring->req_prod - peek) < needed) ||
+              ((queue->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) <
needed);
  }

-int xenvif_must_stop_queue(struct xenvif *vif)
+int xenvif_must_stop_queue(struct xenvif_queue *queue)
  {
-       if (!xenvif_rx_ring_full(vif))
+       if (!xenvif_rx_ring_full(queue))
                return 0;

-       vif->rx.sring->req_event = vif->rx_req_cons_peek +
-               max_required_rx_slots(vif);
+       queue->rx.sring->req_event = queue->rx_req_cons_peek +
+               max_required_rx_slots(queue->vif);
        mb(); /* request notification /then/ check the queue */

-       return xenvif_rx_ring_full(vif);
+       return xenvif_rx_ring_full(queue);
  }

  /*
@@ -306,13 +306,13 @@ struct netrx_pending_operations {
        grant_ref_t copy_gref;
  };

-static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue
*queue,
                                                 struct
netrx_pending_operations *npo)
  {
        struct xenvif_rx_meta *meta;
        struct xen_netif_rx_request *req;

-       req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+       req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);

        meta = npo->meta + npo->meta_prod++;
        meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
@@ -330,7 +330,7 @@ static struct xenvif_rx_meta
*get_next_rx_buffer(struct xenvif *vif,
   * Set up the grant operations for this fragment. If it's a flipping
   * interface, we also set up the unmap request from here.
   */
-static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct
sk_buff *skb,
                                 struct netrx_pending_operations *npo,
                                 struct page *page, unsigned long size,
                                 unsigned long offset, int *head)
@@ -365,7 +365,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
struct sk_buff *skb,
                         */
                        BUG_ON(*head);

-                       meta = get_next_rx_buffer(vif, npo);
+                       meta = get_next_rx_buffer(queue, npo);
                }

                if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
@@ -379,7 +379,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
struct sk_buff *skb,
                copy_gop->source.u.gmfn =
virt_to_mfn(page_address(page));
                copy_gop->source.offset = offset;

-               copy_gop->dest.domid = vif->domid;
+               copy_gop->dest.domid = queue->vif->domid;
                copy_gop->dest.offset = npo->copy_off;
                copy_gop->dest.u.ref = npo->copy_gref;

@@ -404,8 +404,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
struct sk_buff *skb,
                else
                        gso_type = XEN_NETIF_GSO_TYPE_NONE;

-               if (*head && ((1 << gso_type) & vif->gso_mask))
-                       vif->rx.req_cons++;
+               if (*head && ((1 << gso_type) & queue->vif->gso_mask))
+                       queue->rx.req_cons++;

                *head = 0; /* There must be something in this buffer now. */

@@ -425,7 +425,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
struct sk_buff *skb,
   * frontend-side LRO).
   */
  static int xenvif_gop_skb(struct sk_buff *skb,
-                         struct netrx_pending_operations *npo)
+                         struct netrx_pending_operations *npo,
+                         struct xenvif_queue *queue)
  {
        struct xenvif *vif = netdev_priv(skb->dev);
        int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -453,7 +454,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,

        /* Set up a GSO prefix descriptor, if necessary */
        if ((1 << gso_type) & vif->gso_prefix_mask) {
-               req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+               req = RING_GET_REQUEST(&queue->rx, queue-
rx.req_cons++);
                meta = npo->meta + npo->meta_prod++;
                meta->gso_type = gso_type;
                meta->gso_size = gso_size;
@@ -461,7 +462,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                meta->id = req->id;
        }

-       req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+       req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
        meta = npo->meta + npo->meta_prod++;

        if ((1 << gso_type) & vif->gso_mask) {
@@ -485,13 +486,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                if (data + len > skb_tail_pointer(skb))
                        len = skb_tail_pointer(skb) - data;

-               xenvif_gop_frag_copy(vif, skb, npo,
+               xenvif_gop_frag_copy(queue, skb, npo,
                                     virt_to_page(data), len, offset, &head);
                data += len;
        }

        for (i = 0; i < nr_frags; i++) {
-               xenvif_gop_frag_copy(vif, skb, npo,
+               xenvif_gop_frag_copy(queue, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)-
frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
@@ -527,7 +528,7 @@ static int xenvif_check_gop(struct xenvif *vif, int
nr_meta_slots,
        return status;
  }

-static void xenvif_add_frag_responses(struct xenvif *vif, int status,
+static void xenvif_add_frag_responses(struct xenvif_queue *queue, int
status,
                                      struct xenvif_rx_meta *meta,
                                      int nr_meta_slots)
  {
@@ -548,7 +549,7 @@ static void xenvif_add_frag_responses(struct xenvif
*vif, int status,
                        flags = XEN_NETRXF_more_data;

                offset = 0;
-               make_rx_response(vif, meta[i].id, status, offset,
+               make_rx_response(queue, meta[i].id, status, offset,
                                 meta[i].size, flags);
        }
  }
@@ -557,12 +558,12 @@ struct skb_cb_overlay {
        int meta_slots_used;
  };

-static void xenvif_kick_thread(struct xenvif *vif)
+static void xenvif_kick_thread(struct xenvif_queue *queue)
  {
-       wake_up(&vif->wq);
+       wake_up(&queue->wq);
  }

-void xenvif_rx_action(struct xenvif *vif)
+void xenvif_rx_action(struct xenvif_queue *queue)
  {
        s8 status;
        u16 flags;
@@ -578,20 +579,19 @@ void xenvif_rx_action(struct xenvif *vif)
        int need_to_notify = 0;

        struct netrx_pending_operations npo = {
-               .copy  = vif->grant_copy_op,
-               .meta  = vif->meta,
+               .copy  = queue->grant_copy_op,
+               .meta  = queue->meta,
        };

        skb_queue_head_init(&rxq);

        count = 0;

-       while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
-               vif = netdev_priv(skb->dev);
+       while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
                nr_frags = skb_shinfo(skb)->nr_frags;

                sco = (struct skb_cb_overlay *)skb->cb;
-               sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
+               sco->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);

                count += nr_frags + 1;

@@ -603,28 +603,26 @@ void xenvif_rx_action(struct xenvif *vif)
                        break;
        }

-       BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
+       BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));

        if (!npo.copy_prod)
                return;

        BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
-       gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
+       gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);

        while ((skb = __skb_dequeue(&rxq)) != NULL) {
                sco = (struct skb_cb_overlay *)skb->cb;

-               vif = netdev_priv(skb->dev);
-
-               if ((1 << vif->meta[npo.meta_cons].gso_type) &
-                   vif->gso_prefix_mask) {
-                       resp = RING_GET_RESPONSE(&vif->rx,
-                                                vif->rx.rsp_prod_pvt++);
+               if ((1 << queue->meta[npo.meta_cons].gso_type) &
+                   queue->vif->gso_prefix_mask) {
+                       resp = RING_GET_RESPONSE(&queue->rx,
+                                                queue->rx.rsp_prod_pvt++);

                        resp->flags = XEN_NETRXF_gso_prefix |
XEN_NETRXF_more_data;

-                       resp->offset = vif->meta[npo.meta_cons].gso_size;
-                       resp->id = vif->meta[npo.meta_cons].id;
+                       resp->offset = queue-
meta[npo.meta_cons].gso_size;
+                       resp->id = queue->meta[npo.meta_cons].id;
                        resp->status = sco->meta_slots_used;

                        npo.meta_cons++;
@@ -632,10 +630,10 @@ void xenvif_rx_action(struct xenvif *vif)
                }


-               vif->dev->stats.tx_bytes += skb->len;
-               vif->dev->stats.tx_packets++;
+               queue->vif->dev->stats.tx_bytes += skb->len;
+               queue->vif->dev->stats.tx_packets++;

-               status = xenvif_check_gop(vif, sco->meta_slots_used,
&npo);
+               status = xenvif_check_gop(queue->vif, sco-
meta_slots_used, &npo);

                if (sco->meta_slots_used == 1)
                        flags = 0;
@@ -649,22 +647,22 @@ void xenvif_rx_action(struct xenvif *vif)
                        flags |= XEN_NETRXF_data_validated;

                offset = 0;
-               resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
+               resp = make_rx_response(queue, queue-
meta[npo.meta_cons].id,
                                        status, offset,
-                                       vif->meta[npo.meta_cons].size,
+                                       queue->meta[npo.meta_cons].size,
                                        flags);

-               if ((1 << vif->meta[npo.meta_cons].gso_type) &
-                   vif->gso_mask) {
+               if ((1 << queue->meta[npo.meta_cons].gso_type) &
+                   queue->vif->gso_mask) {
                        struct xen_netif_extra_info *gso =
                                (struct xen_netif_extra_info *)
-                               RING_GET_RESPONSE(&vif->rx,
-                                                 vif->rx.rsp_prod_pvt++);
+                               RING_GET_RESPONSE(&queue->rx,
+                                                 queue-
rx.rsp_prod_pvt++);

                        resp->flags |= XEN_NETRXF_extra_info;

-                       gso->u.gso.type = vif-
meta[npo.meta_cons].gso_type;
-                       gso->u.gso.size = vif-
meta[npo.meta_cons].gso_size;
+                       gso->u.gso.type = queue-
meta[npo.meta_cons].gso_type;
+                       gso->u.gso.size = queue-
meta[npo.meta_cons].gso_size;
                        gso->u.gso.pad = 0;
                        gso->u.gso.features = 0;

@@ -672,47 +670,47 @@ void xenvif_rx_action(struct xenvif *vif)
                        gso->flags = 0;
                }

-               xenvif_add_frag_responses(vif, status,
-                                         vif->meta + npo.meta_cons + 1,
+               xenvif_add_frag_responses(queue, status,
+                                         queue->meta + npo.meta_cons + 1,
                                          sco->meta_slots_used);

-               RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx,
ret);
+               RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx,
ret);

                if (ret)
                        need_to_notify = 1;

-               xenvif_notify_tx_completion(vif);
+               xenvif_notify_tx_completion(queue);

                npo.meta_cons += sco->meta_slots_used;
                dev_kfree_skb(skb);
        }

        if (need_to_notify)
-               notify_remote_via_irq(vif->rx_irq);
+               notify_remote_via_irq(queue->rx_irq);

        /* More work to do? */
-       if (!skb_queue_empty(&vif->rx_queue))
-               xenvif_kick_thread(vif);
+       if (!skb_queue_empty(&queue->rx_queue))
+               xenvif_kick_thread(queue);
  }

-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
+void xenvif_queue_tx_skb(struct xenvif_queue *queue, struct sk_buff
*skb)
  {
-       skb_queue_tail(&vif->rx_queue, skb);
+       skb_queue_tail(&queue->rx_queue, skb);

-       xenvif_kick_thread(vif);
+       xenvif_kick_thread(queue);
  }

-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_check_rx_xenvif(struct xenvif_queue *queue)
  {
        int more_to_do;

-       RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
+       RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);

        if (more_to_do)
-               napi_schedule(&vif->napi);
+               napi_schedule(&queue->napi);
  }

-static void tx_add_credit(struct xenvif *vif)
+static void tx_add_credit(struct xenvif_queue *queue)
  {
        unsigned long max_burst, max_credit;

@@ -720,37 +718,37 @@ static void tx_add_credit(struct xenvif *vif)
         * Allow a burst big enough to transmit a jumbo packet of up to
128kB.
         * Otherwise the interface can seize up due to insufficient credit.
         */
-       max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
+       max_burst = RING_GET_REQUEST(&queue->tx, queue-
tx.req_cons)->size;
        max_burst = min(max_burst, 131072UL);
-       max_burst = max(max_burst, vif->credit_bytes);
+       max_burst = max(max_burst, queue->credit_bytes);

        /* Take care that adding a new chunk of credit doesn't wrap to zero.
*/
-       max_credit = vif->remaining_credit + vif->credit_bytes;
-       if (max_credit < vif->remaining_credit)
+       max_credit = queue->remaining_credit + queue->credit_bytes;
+       if (max_credit < queue->remaining_credit)
                max_credit = ULONG_MAX; /* wrapped: clamp to
ULONG_MAX */

-       vif->remaining_credit = min(max_credit, max_burst);
+       queue->remaining_credit = min(max_credit, max_burst);
  }

  static void tx_credit_callback(unsigned long data)
  {
-       struct xenvif *vif = (struct xenvif *)data;
-       tx_add_credit(vif);
-       xenvif_check_rx_xenvif(vif);
+       struct xenvif_queue *queue = (struct xenvif_queue *)data;
+       tx_add_credit(queue);
+       xenvif_check_rx_xenvif(queue);
  }

-static void xenvif_tx_err(struct xenvif *vif,
+static void xenvif_tx_err(struct xenvif_queue *queue,
                          struct xen_netif_tx_request *txp, RING_IDX end)
  {
-       RING_IDX cons = vif->tx.req_cons;
+       RING_IDX cons = queue->tx.req_cons;

        do {
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+               make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
                if (cons == end)
                        break;
-               txp = RING_GET_REQUEST(&vif->tx, cons++);
+               txp = RING_GET_REQUEST(&queue->tx, cons++);
        } while (1);
-       vif->tx.req_cons = cons;
+       queue->tx.req_cons = cons;
  }

  static void xenvif_fatal_tx_err(struct xenvif *vif)
@@ -759,12 +757,12 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
        xenvif_carrier_off(vif);
  }

-static int xenvif_count_requests(struct xenvif *vif,
+static int xenvif_count_requests(struct xenvif_queue *queue,
                                 struct xen_netif_tx_request *first,
                                 struct xen_netif_tx_request *txp,
                                 int work_to_do)
  {
-       RING_IDX cons = vif->tx.req_cons;
+       RING_IDX cons = queue->tx.req_cons;
        int slots = 0;
        int drop_err = 0;
        int more_data;
@@ -776,10 +774,10 @@ static int xenvif_count_requests(struct xenvif *vif,
                struct xen_netif_tx_request dropped_tx = { 0 };

                if (slots >= work_to_do) {
-                       netdev_err(vif->dev,
+                       netdev_err(queue->vif->dev,
                                   "Asked for %d slots but exceeds this
limit\n",
                                   work_to_do);
-                       xenvif_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(queue->vif);
                        return -ENODATA;
                }

@@ -787,10 +785,10 @@ static int xenvif_count_requests(struct xenvif *vif,
                 * considered malicious.
                 */
                if (unlikely(slots >= fatal_skb_slots)) {
-                       netdev_err(vif->dev,
+                       netdev_err(queue->vif->dev,
                                   "Malicious frontend using %d slots,
threshold %u\n",
                                   slots, fatal_skb_slots);
-                       xenvif_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(queue->vif);
                        return -E2BIG;
                }

@@ -803,7 +801,7 @@ static int xenvif_count_requests(struct xenvif *vif,
                 */
                if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX)
{
                        if (net_ratelimit())
-                               netdev_dbg(vif->dev,
+                               netdev_dbg(queue->vif->dev,
                                           "Too many slots (%d) exceeding
limit (%d), dropping packet\n",
                                           slots,
XEN_NETBK_LEGACY_SLOTS_MAX);
                        drop_err = -E2BIG;
@@ -812,7 +810,7 @@ static int xenvif_count_requests(struct xenvif *vif,
                if (drop_err)
                        txp = &dropped_tx;

-               memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+               memcpy(txp, RING_GET_REQUEST(&queue->tx, cons +
slots),
                       sizeof(*txp));

                /* If the guest submitted a frame >= 64 KiB then
@@ -826,7 +824,7 @@ static int xenvif_count_requests(struct xenvif *vif,
                 */
                if (!drop_err && txp->size > first->size) {
                        if (net_ratelimit())
-                               netdev_dbg(vif->dev,
+                               netdev_dbg(queue->vif->dev,
                                           "Invalid tx request, slot size %u >
remaining size %u\n",
                                           txp->size, first->size);
                        drop_err = -EIO;
@@ -836,9 +834,9 @@ static int xenvif_count_requests(struct xenvif *vif,
                slots++;

                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-                       netdev_err(vif->dev, "Cross page boundary, txp-
offset: %x, size: %u\n",
+                       netdev_err(queue->vif->dev, "Cross page boundary,
txp->offset: %x, size: %u\n",
                                 txp->offset, txp->size);
-                       xenvif_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(queue->vif);
                        return -EINVAL;
                }

@@ -850,14 +848,14 @@ static int xenvif_count_requests(struct xenvif *vif,
        } while (more_data);

        if (drop_err) {
-               xenvif_tx_err(vif, first, cons + slots);
+               xenvif_tx_err(queue, first, cons + slots);
                return drop_err;
        }

        return slots;
  }

-static struct page *xenvif_alloc_page(struct xenvif *vif,
+static struct page *xenvif_alloc_page(struct xenvif_queue *queue,
                                      u16 pending_idx)
  {
        struct page *page;
@@ -865,12 +863,12 @@ static struct page *xenvif_alloc_page(struct xenvif
*vif,
        page = alloc_page(GFP_ATOMIC|__GFP_COLD);
        if (!page)
                return NULL;
-       vif->mmap_pages[pending_idx] = page;
+       queue->mmap_pages[pending_idx] = page;

        return page;
  }

-static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
+static struct gnttab_copy *xenvif_get_requests(struct xenvif_queue
*queue,
                                               struct sk_buff *skb,
                                               struct xen_netif_tx_request *txp,
                                               struct gnttab_copy *gop)
@@ -901,7 +899,7 @@ static struct gnttab_copy *xenvif_get_requests(struct
xenvif *vif,
        for (shinfo->nr_frags = slot = start; slot < nr_slots;
             shinfo->nr_frags++) {
                struct pending_tx_info *pending_tx_info =
-                       vif->pending_tx_info;
+                       queue->pending_tx_info;

                page = alloc_page(GFP_ATOMIC|__GFP_COLD);
                if (!page)
@@ -913,7 +911,7 @@ static struct gnttab_copy *xenvif_get_requests(struct
xenvif *vif,
                        gop->flags = GNTCOPY_source_gref;

                        gop->source.u.ref = txp->gref;
-                       gop->source.domid = vif->domid;
+                       gop->source.domid = queue->vif->domid;
                        gop->source.offset = txp->offset;

                        gop->dest.domid = DOMID_SELF;
@@ -938,9 +936,9 @@ static struct gnttab_copy *xenvif_get_requests(struct
xenvif *vif,
                                gop->len = txp->size;
                                dst_offset += gop->len;

-                               index = pending_index(vif-
pending_cons++);
+                               index = pending_index(queue-
pending_cons++);

-                               pending_idx = vif->pending_ring[index];
+                               pending_idx = queue->pending_ring[index];


        memcpy(&pending_tx_info[pending_idx].req, txp,
                                       sizeof(*txp));
@@ -949,7 +947,7 @@ static struct gnttab_copy *xenvif_get_requests(struct
xenvif *vif,
                                 * fields for head tx req will be set
                                 * to correct values after the loop.
                                 */
-                               vif->mmap_pages[pending_idx] = (void
*)(~0UL);
+                               queue->mmap_pages[pending_idx] = (void
*)(~0UL);
                                pending_tx_info[pending_idx].head =
                                        INVALID_PENDING_RING_IDX;

@@ -969,7 +967,7 @@ static struct gnttab_copy *xenvif_get_requests(struct
xenvif *vif,
                first->req.offset = 0;
                first->req.size = dst_offset;
                first->head = start_idx;
-               vif->mmap_pages[head_idx] = page;
+               queue->mmap_pages[head_idx] = page;
                frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
        }

@@ -979,18 +977,18 @@ static struct gnttab_copy
*xenvif_get_requests(struct xenvif *vif,
  err:
        /* Unwind, freeing all pages and sending error responses. */
        while (shinfo->nr_frags-- > start) {
-               xenvif_idx_release(vif,
+               xenvif_idx_release(queue,
                                frag_get_pending_idx(&frags[shinfo-
nr_frags]),
                                XEN_NETIF_RSP_ERROR);
        }
        /* The head too, if necessary. */
        if (start)
-               xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_ERROR);
+               xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);

        return NULL;
  }

-static int xenvif_tx_check_gop(struct xenvif *vif,
+static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                               struct sk_buff *skb,
                               struct gnttab_copy **gopp)
  {
@@ -1005,7 +1003,7 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
        /* Check status of header. */
        err = gop->status;
        if (unlikely(err))
-               xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_ERROR);
+               xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);

        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
@@ -1015,7 +1013,7 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
                pending_ring_idx_t head;

                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
-               tx_info = &vif->pending_tx_info[pending_idx];
+               tx_info = &queue->pending_tx_info[pending_idx];
                head = tx_info->head;

                /* Check error status: if okay then remember grant handle.
*/
@@ -1023,19 +1021,19 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
                        newerr = (++gop)->status;
                        if (newerr)
                                break;
-                       peek = vif->pending_ring[pending_index(++head)];
-               } while (!pending_tx_is_head(vif, peek));
+                       peek = queue-
pending_ring[pending_index(++head)];
+               } while (!pending_tx_is_head(queue, peek));

                if (likely(!newerr)) {
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xenvif_idx_release(vif, pending_idx,
+                               xenvif_idx_release(queue, pending_idx,
                                                   XEN_NETIF_RSP_OKAY);
                        continue;
                }

                /* Error on this fragment: respond to client with an error. */
-               xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_ERROR);
+               xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);

                /* Not the first error? Preceding frags already invalidated. */
                if (err)
@@ -1043,10 +1041,10 @@ static int xenvif_tx_check_gop(struct xenvif *vif,

                /* First error: invalidate header and preceding fragments. */
                pending_idx = *((u16 *)skb->data);
-               xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_OKAY);
+               xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
                for (j = start; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo-
frags[j]);
-                       xenvif_idx_release(vif, pending_idx,
+                       xenvif_idx_release(queue, pending_idx,
                                           XEN_NETIF_RSP_OKAY);
                }

@@ -1058,7 +1056,7 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
        return err;
  }

-static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
+static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff
*skb)
  {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
@@ -1072,46 +1070,46 @@ static void xenvif_fill_frags(struct xenvif *vif,
struct sk_buff *skb)

                pending_idx = frag_get_pending_idx(frag);

-               txp = &vif->pending_tx_info[pending_idx].req;
-               page = virt_to_page(idx_to_kaddr(vif, pending_idx));
+               txp = &queue->pending_tx_info[pending_idx].req;
+               page = virt_to_page(idx_to_kaddr(queue, pending_idx));
                __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
                skb->len += txp->size;
                skb->data_len += txp->size;
                skb->truesize += txp->size;

                /* Take an extra reference to offset xenvif_idx_release */
-               get_page(vif->mmap_pages[pending_idx]);
-               xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_OKAY);
+               get_page(queue->mmap_pages[pending_idx]);
+               xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
        }
  }

-static int xenvif_get_extras(struct xenvif *vif,
+static int xenvif_get_extras(struct xenvif_queue *queue,
                                struct xen_netif_extra_info *extras,
                                int work_to_do)
  {
        struct xen_netif_extra_info extra;
-       RING_IDX cons = vif->tx.req_cons;
+       RING_IDX cons = queue->tx.req_cons;

        do {
                if (unlikely(work_to_do-- <= 0)) {
-                       netdev_err(vif->dev, "Missing extra info\n");
-                       xenvif_fatal_tx_err(vif);
+                       netdev_err(queue->vif->dev, "Missing extra
info\n");
+                       xenvif_fatal_tx_err(queue->vif);
                        return -EBADR;
                }

-               memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
+               memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
                       sizeof(extra));
                if (unlikely(!extra.type ||
                             extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
-                       vif->tx.req_cons = ++cons;
-                       netdev_err(vif->dev,
+                       queue->tx.req_cons = ++cons;
+                       netdev_err(queue->vif->dev,
                                   "Invalid extra type: %d\n", extra.type);
-                       xenvif_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(queue->vif);
                        return -EINVAL;
                }

                memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
-               vif->tx.req_cons = ++cons;
+               queue->tx.req_cons = ++cons;
        } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);

        return work_to_do;
@@ -1424,31 +1422,31 @@ static int checksum_setup(struct xenvif *vif,
struct sk_buff *skb)
        return err;
  }

-static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned
size)
  {
        u64 now = get_jiffies_64();
-       u64 next_credit = vif->credit_window_start +
-               msecs_to_jiffies(vif->credit_usec / 1000);
+       u64 next_credit = queue->credit_window_start +
+               msecs_to_jiffies(queue->credit_usec / 1000);

        /* Timer could already be pending in rare cases. */
-       if (timer_pending(&vif->credit_timeout))
+       if (timer_pending(&queue->credit_timeout))
                return true;

        /* Passed the point where we can replenish credit? */
        if (time_after_eq64(now, next_credit)) {
-               vif->credit_window_start = now;
-               tx_add_credit(vif);
+               queue->credit_window_start = now;
+               tx_add_credit(queue);
        }

        /* Still too big to send right now? Set a callback. */
-       if (size > vif->remaining_credit) {
-               vif->credit_timeout.data     =
-                       (unsigned long)vif;
-               vif->credit_timeout.function =
+       if (size > queue->remaining_credit) {
+               queue->credit_timeout.data     =
+                       (unsigned long)queue;
+               queue->credit_timeout.function =
                        tx_credit_callback;
-               mod_timer(&vif->credit_timeout,
+               mod_timer(&queue->credit_timeout,
                          next_credit);
-               vif->credit_window_start = next_credit;
+               queue->credit_window_start = next_credit;

                return true;
        }
@@ -1456,15 +1454,15 @@ static bool tx_credit_exceeded(struct xenvif *vif,
unsigned size)
        return false;
  }

-static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
+static unsigned xenvif_tx_build_gops(struct xenvif_queue *queue, int
budget)
  {
-       struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
+       struct gnttab_copy *gop = queue->tx_copy_ops, *request_gop;
        struct sk_buff *skb;
        int ret;

-       while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+       while ((nr_pending_reqs(queue) +
XEN_NETBK_LEGACY_SLOTS_MAX
                < MAX_PENDING_REQS) &&
-              (skb_queue_len(&vif->tx_queue) < budget)) {
+              (skb_queue_len(&queue->tx_queue) < budget)) {
                struct xen_netif_tx_request txreq;
                struct xen_netif_tx_request
txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
                struct page *page;
@@ -1475,69 +1473,69 @@ static unsigned xenvif_tx_build_gops(struct
xenvif *vif, int budget)
                unsigned int data_len;
                pending_ring_idx_t index;

-               if (vif->tx.sring->req_prod - vif->tx.req_cons >
+               if (queue->tx.sring->req_prod - queue->tx.req_cons >
                    XEN_NETIF_TX_RING_SIZE) {
-                       netdev_err(vif->dev,
+                       netdev_err(queue->vif->dev,
                                   "Impossible number of requests. "
                                   "req_prod %d, req_cons %d, size %ld\n",
-                                  vif->tx.sring->req_prod, vif->tx.req_cons,
+                                  queue->tx.sring->req_prod, queue-
tx.req_cons,
                                   XEN_NETIF_TX_RING_SIZE);
-                       xenvif_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(queue->vif);
                        continue;
                }

-               work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif-
tx);
+               work_to_do =
RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
                if (!work_to_do)
                        break;

-               idx = vif->tx.req_cons;
+               idx = queue->tx.req_cons;
                rmb(); /* Ensure that we see the request before we copy it.
*/
-               memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx),
sizeof(txreq));
+               memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx),
sizeof(txreq));

                /* Credit-based scheduling. */
-               if (txreq.size > vif->remaining_credit &&
-                   tx_credit_exceeded(vif, txreq.size))
+               if (txreq.size > queue->remaining_credit &&
+                   tx_credit_exceeded(queue, txreq.size))
                        break;

-               vif->remaining_credit -= txreq.size;
+               queue->remaining_credit -= txreq.size;

                work_to_do--;
-               vif->tx.req_cons = ++idx;
+               queue->tx.req_cons = ++idx;

                memset(extras, 0, sizeof(extras));
                if (txreq.flags & XEN_NETTXF_extra_info) {
-                       work_to_do = xenvif_get_extras(vif, extras,
+                       work_to_do = xenvif_get_extras(queue, extras,
                                                       work_to_do);
-                       idx = vif->tx.req_cons;
+                       idx = queue->tx.req_cons;
                        if (unlikely(work_to_do < 0))
                                break;
                }

-               ret = xenvif_count_requests(vif, &txreq, txfrags,
work_to_do);
+               ret = xenvif_count_requests(queue, &txreq, txfrags,
work_to_do);
                if (unlikely(ret < 0))
                        break;

                idx += ret;

                if (unlikely(txreq.size < ETH_HLEN)) {
-                       netdev_dbg(vif->dev,
+                       netdev_dbg(queue->vif->dev,
                                   "Bad packet size: %d\n", txreq.size);
-                       xenvif_tx_err(vif, &txreq, idx);
+                       xenvif_tx_err(queue, &txreq, idx);
                        break;
                }

                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-                       netdev_err(vif->dev,
+                       netdev_err(queue->vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
                                   (txreq.offset&~PAGE_MASK) + txreq.size);
-                       xenvif_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(queue->vif);
                        break;
                }

-               index = pending_index(vif->pending_cons);
-               pending_idx = vif->pending_ring[index];
+               index = pending_index(queue->pending_cons);
+               pending_idx = queue->pending_ring[index];

                data_len = (txreq.size > PKT_PROT_LEN &&
                            ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1546,9 +1544,9 @@ static unsigned xenvif_tx_build_gops(struct xenvif
*vif, int budget)
                skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
                                GFP_ATOMIC | __GFP_NOWARN);
                if (unlikely(skb == NULL)) {
-                       netdev_dbg(vif->dev,
+                       netdev_dbg(queue->vif->dev,
                                   "Can't allocate a skb in start_xmit.\n");
-                       xenvif_tx_err(vif, &txreq, idx);
+                       xenvif_tx_err(queue, &txreq, idx);
                        break;
                }

@@ -1559,7 +1557,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif
*vif, int budget)
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];

-                       if (xenvif_set_skb_gso(vif, skb, gso)) {
+                       if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
                                /* Failure in xenvif_set_skb_gso is fatal. */
                                kfree_skb(skb);
                                break;
@@ -1567,15 +1565,15 @@ static unsigned xenvif_tx_build_gops(struct
xenvif *vif, int budget)
                }

                /* XXX could copy straight to head */
-               page = xenvif_alloc_page(vif, pending_idx);
+               page = xenvif_alloc_page(queue, pending_idx);
                if (!page) {
                        kfree_skb(skb);
-                       xenvif_tx_err(vif, &txreq, idx);
+                       xenvif_tx_err(queue, &txreq, idx);
                        break;
                }

                gop->source.u.ref = txreq.gref;
-               gop->source.domid = vif->domid;
+               gop->source.domid = queue->vif->domid;
                gop->source.offset = txreq.offset;

                gop->dest.u.gmfn = virt_to_mfn(page_address(page));
@@ -1587,9 +1585,9 @@ static unsigned xenvif_tx_build_gops(struct xenvif
*vif, int budget)

                gop++;

-               memcpy(&vif->pending_tx_info[pending_idx].req,
+               memcpy(&queue->pending_tx_info[pending_idx].req,
                       &txreq, sizeof(txreq));
-               vif->pending_tx_info[pending_idx].head = index;
+               queue->pending_tx_info[pending_idx].head = index;
                *((u16 *)skb->data) = pending_idx;

                __skb_put(skb, data_len);
@@ -1604,45 +1602,45 @@ static unsigned xenvif_tx_build_gops(struct
xenvif *vif, int budget)
                                             INVALID_PENDING_IDX);
                }

-               vif->pending_cons++;
+               queue->pending_cons++;

-               request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
+               request_gop = xenvif_get_requests(queue, skb, txfrags,
gop);
                if (request_gop == NULL) {
                        kfree_skb(skb);
-                       xenvif_tx_err(vif, &txreq, idx);
+                       xenvif_tx_err(queue, &txreq, idx);
                        break;
                }
                gop = request_gop;

-               __skb_queue_tail(&vif->tx_queue, skb);
+               __skb_queue_tail(&queue->tx_queue, skb);

-               vif->tx.req_cons = idx;
+               queue->tx.req_cons = idx;

-               if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif-
tx_copy_ops))
+               if ((gop - queue->tx_copy_ops) >= ARRAY_SIZE(queue-
tx_copy_ops))
                        break;
        }

-       return gop - vif->tx_copy_ops;
+       return gop - queue->tx_copy_ops;
  }


-static int xenvif_tx_submit(struct xenvif *vif)
+static int xenvif_tx_submit(struct xenvif_queue *queue)
  {
-       struct gnttab_copy *gop = vif->tx_copy_ops;
+       struct gnttab_copy *gop = queue->tx_copy_ops;
        struct sk_buff *skb;
        int work_done = 0;

-       while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
+       while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
                struct xen_netif_tx_request *txp;
                u16 pending_idx;
                unsigned data_len;

                pending_idx = *((u16 *)skb->data);
-               txp = &vif->pending_tx_info[pending_idx].req;
+               txp = &queue->pending_tx_info[pending_idx].req;

                /* Check the remap error code. */
-               if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
-                       netdev_dbg(vif->dev, "netback grant failed.\n");
+               if (unlikely(xenvif_tx_check_gop(queue, skb, &gop))) {
+                       netdev_dbg(queue->vif->dev, "netback grant
failed.\n");
                        skb_shinfo(skb)->nr_frags = 0;
                        kfree_skb(skb);
                        continue;
@@ -1650,7 +1648,7 @@ static int xenvif_tx_submit(struct xenvif *vif)

                data_len = skb->len;
                memcpy(skb->data,
-                      (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
+                      (void *)(idx_to_kaddr(queue, pending_idx)|txp->offset),
                       data_len);
                if (data_len < txp->size) {
                        /* Append the packet payload as a fragment. */
@@ -1658,7 +1656,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
                        txp->size -= data_len;
                } else {
                        /* Schedule a response immediately. */
-                       xenvif_idx_release(vif, pending_idx,
+                       xenvif_idx_release(queue, pending_idx,
                                           XEN_NETIF_RSP_OKAY);
                }

@@ -1667,19 +1665,19 @@ static int xenvif_tx_submit(struct xenvif *vif)
                else if (txp->flags & XEN_NETTXF_data_validated)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;

-               xenvif_fill_frags(vif, skb);
+               xenvif_fill_frags(queue, skb);

                if (skb_is_nonlinear(skb) && skb_headlen(skb) <
PKT_PROT_LEN) {
                        int target = min_t(int, skb->len, PKT_PROT_LEN);
                        __pskb_pull_tail(skb, target - skb_headlen(skb));
                }

-               skb->dev      = vif->dev;
+               skb->dev      = queue->vif->dev;
                skb->protocol = eth_type_trans(skb, skb->dev);
                skb_reset_network_header(skb);

-               if (checksum_setup(vif, skb)) {
-                       netdev_dbg(vif->dev,
+               if (checksum_setup(queue->vif, skb)) {
+                       netdev_dbg(queue->vif->dev,
                                   "Can't setup checksum in
net_tx_action\n");
                        kfree_skb(skb);
                        continue;
@@ -1687,8 +1685,8 @@ static int xenvif_tx_submit(struct xenvif *vif)

                skb_probe_transport_header(skb, 0);

-               vif->dev->stats.rx_bytes += skb->len;
-               vif->dev->stats.rx_packets++;
+               queue->vif->dev->stats.rx_bytes += skb->len;
+               queue->vif->dev->stats.rx_packets++;


                work_done++;

@@ -1699,53 +1697,53 @@ static int xenvif_tx_submit(struct xenvif *vif)
  }

  /* Called after netfront has transmitted */
-int xenvif_tx_action(struct xenvif *vif, int budget)
+int xenvif_tx_action(struct xenvif_queue *queue, int budget)
  {
        unsigned nr_gops;
        int work_done;

-       if (unlikely(!tx_work_todo(vif)))
+       if (unlikely(!tx_work_todo(queue)))
                return 0;

-       nr_gops = xenvif_tx_build_gops(vif, budget);
+       nr_gops = xenvif_tx_build_gops(queue, budget);

        if (nr_gops == 0)
                return 0;

-       gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
+       gnttab_batch_copy(queue->tx_copy_ops, nr_gops);

-       work_done = xenvif_tx_submit(vif);
+       work_done = xenvif_tx_submit(queue);

        return work_done;
  }

-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+static void xenvif_idx_release(struct xenvif_queue *queue, u16
pending_idx,
                               u8 status)
  {
        struct pending_tx_info *pending_tx_info;
        pending_ring_idx_t head;
        u16 peek; /* peek into next tx request */

-       BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
+       BUG_ON(queue->mmap_pages[pending_idx] == (void *)(~0UL));

        /* Already complete? */
-       if (vif->mmap_pages[pending_idx] == NULL)
+       if (queue->mmap_pages[pending_idx] == NULL)
                return;

-       pending_tx_info = &vif->pending_tx_info[pending_idx];
+       pending_tx_info = &queue->pending_tx_info[pending_idx];

        head = pending_tx_info->head;

-       BUG_ON(!pending_tx_is_head(vif, head));
-       BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
+       BUG_ON(!pending_tx_is_head(queue, head));
+       BUG_ON(queue->pending_ring[pending_index(head)] !=
pending_idx);

        do {
                pending_ring_idx_t index;
                pending_ring_idx_t idx = pending_index(head);
-               u16 info_idx = vif->pending_ring[idx];
+               u16 info_idx = queue->pending_ring[idx];

-               pending_tx_info = &vif->pending_tx_info[info_idx];
-               make_tx_response(vif, &pending_tx_info->req, status);
+               pending_tx_info = &queue->pending_tx_info[info_idx];
+               make_tx_response(queue, &pending_tx_info->req, status);

                /* Setting any number other than
                 * INVALID_PENDING_RING_IDX indicates this slot is
@@ -1753,50 +1751,50 @@ static void xenvif_idx_release(struct xenvif *vif,
u16 pending_idx,
                 */
                pending_tx_info->head = 0;

-               index = pending_index(vif->pending_prod++);
-               vif->pending_ring[index] = vif->pending_ring[info_idx];
+               index = pending_index(queue->pending_prod++);
+               queue->pending_ring[index] = queue-
pending_ring[info_idx];

-               peek = vif->pending_ring[pending_index(++head)];
+               peek = queue->pending_ring[pending_index(++head)];

-       } while (!pending_tx_is_head(vif, peek));
+       } while (!pending_tx_is_head(queue, peek));

-       put_page(vif->mmap_pages[pending_idx]);
-       vif->mmap_pages[pending_idx] = NULL;
+       put_page(queue->mmap_pages[pending_idx]);
+       queue->mmap_pages[pending_idx] = NULL;
  }


-static void make_tx_response(struct xenvif *vif,
+static void make_tx_response(struct xenvif_queue *queue,
                             struct xen_netif_tx_request *txp,
                             s8       st)
  {
-       RING_IDX i = vif->tx.rsp_prod_pvt;
+       RING_IDX i = queue->tx.rsp_prod_pvt;
        struct xen_netif_tx_response *resp;
        int notify;

-       resp = RING_GET_RESPONSE(&vif->tx, i);
+       resp = RING_GET_RESPONSE(&queue->tx, i);
        resp->id     = txp->id;
        resp->status = st;

        if (txp->flags & XEN_NETTXF_extra_info)
-               RING_GET_RESPONSE(&vif->tx, ++i)->status =
XEN_NETIF_RSP_NULL;
+               RING_GET_RESPONSE(&queue->tx, ++i)->status =
XEN_NETIF_RSP_NULL;

-       vif->tx.rsp_prod_pvt = ++i;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
+       queue->tx.rsp_prod_pvt = ++i;
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
        if (notify)
-               notify_remote_via_irq(vif->tx_irq);
+               notify_remote_via_irq(queue->tx_irq);
  }

-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+static struct xen_netif_rx_response *make_rx_response(struct
xenvif_queue *queue,
                                             u16      id,
                                             s8       st,
                                             u16      offset,
                                             u16      size,
                                             u16      flags)
  {
-       RING_IDX i = vif->rx.rsp_prod_pvt;
+       RING_IDX i = queue->rx.rsp_prod_pvt;
        struct xen_netif_rx_response *resp;

-       resp = RING_GET_RESPONSE(&vif->rx, i);
+       resp = RING_GET_RESPONSE(&queue->rx, i);
        resp->offset     = offset;
        resp->flags      = flags;
        resp->id         = id;
@@ -1804,38 +1802,38 @@ static struct xen_netif_rx_response
*make_rx_response(struct xenvif *vif,
        if (st < 0)
                resp->status = (s16)st;

-       vif->rx.rsp_prod_pvt = ++i;
+       queue->rx.rsp_prod_pvt = ++i;

        return resp;
  }

-static inline int rx_work_todo(struct xenvif *vif)
+static inline int rx_work_todo(struct xenvif_queue *queue)
  {
-       return !skb_queue_empty(&vif->rx_queue);
+       return !skb_queue_empty(&queue->rx_queue);
  }

-static inline int tx_work_todo(struct xenvif *vif)
+static inline int tx_work_todo(struct xenvif_queue *queue)
  {

-       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
-           (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) &&
+           (nr_pending_reqs(queue) + XEN_NETBK_LEGACY_SLOTS_MAX
             < MAX_PENDING_REQS))
                return 1;

        return 0;
  }

-void xenvif_unmap_frontend_rings(struct xenvif *vif)
+void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
  {
-       if (vif->tx.sring)
-               xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
-                                       vif->tx.sring);
-       if (vif->rx.sring)
-               xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
-                                       vif->rx.sring);
+       if (queue->tx.sring)
+
        xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
+                                       queue->tx.sring);
+       if (queue->rx.sring)
+
        xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
+                                       queue->rx.sring);
  }

-int xenvif_map_frontend_rings(struct xenvif *vif,
+int xenvif_map_frontend_rings(struct xenvif_queue *queue,
                              grant_ref_t tx_ring_ref,
                              grant_ref_t rx_ring_ref)
  {
@@ -1845,44 +1843,44 @@ int xenvif_map_frontend_rings(struct xenvif
*vif,

        int err = -ENOMEM;

-       err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+       err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue-
vif),
                                     tx_ring_ref, &addr);
        if (err)
                goto err;

        txs = (struct xen_netif_tx_sring *)addr;
-       BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
+       BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);

-       err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+       err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue-
vif),
                                     rx_ring_ref, &addr);
        if (err)
                goto err;

        rxs = (struct xen_netif_rx_sring *)addr;
-       BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
+       BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);

-       vif->rx_req_cons_peek = 0;
+       queue->rx_req_cons_peek = 0;

        return 0;

  err:
-       xenvif_unmap_frontend_rings(vif);
+       xenvif_unmap_frontend_rings(queue);
        return err;
  }

  int xenvif_kthread(void *data)
  {
-       struct xenvif *vif = data;
+       struct xenvif_queue *queue = data;

        while (!kthread_should_stop()) {
-               wait_event_interruptible(vif->wq,
-                                        rx_work_todo(vif) ||
+               wait_event_interruptible(queue->wq,
+                                        rx_work_todo(queue) ||
                                         kthread_should_stop());
                if (kthread_should_stop())
                        break;

-               if (rx_work_todo(vif))
-                       xenvif_rx_action(vif);
+               if (rx_work_todo(queue))
+                       xenvif_rx_action(queue);

                cond_resched();
        }
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-
netback/xenbus.c
index f035899..c3332e2 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -20,6 +20,7 @@
  */

  #include "common.h"
+#include <linux/vmalloc.h>

  struct backend_info {
        struct xenbus_device *dev;
@@ -35,8 +36,9 @@ struct backend_info {
        u8 have_hotplug_status_watch:1;
  };

-static int connect_rings(struct backend_info *);
-static void connect(struct backend_info *);
+static int connect_rings(struct backend_info *be, struct xenvif_queue
*queue);
+static void connect(struct backend_info *be);
+static int read_xenbus_vif_flags(struct backend_info *be);
  static void backend_create_xenvif(struct backend_info *be);
  static void unregister_hotplug_status_watch(struct backend_info *be);
  static void set_backend_state(struct backend_info *be,
@@ -486,10 +488,9 @@ static void connect(struct backend_info *be)
  {
        int err;
        struct xenbus_device *dev = be->dev;
-
-       err = connect_rings(be);
-       if (err)
-               return;
+       unsigned long credit_bytes, credit_usec;
+       unsigned int queue_index;
+       struct xenvif_queue *queue;

        err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
        if (err) {
@@ -497,9 +498,31 @@ static void connect(struct backend_info *be)
                return;
        }

-       xen_net_read_rate(dev, &be->vif->credit_bytes,
-                         &be->vif->credit_usec);
-       be->vif->remaining_credit = be->vif->credit_bytes;
+       xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+       read_xenbus_vif_flags(be);
+
+       be->vif->num_queues = 1;
+       be->vif->queues = vzalloc(be->vif->num_queues *
+                       sizeof(struct xenvif_queue));
+
+       for (queue_index = 0; queue_index < be->vif->num_queues;
++queue_index)
+       {
+               queue = &be->vif->queues[queue_index];
+               queue->vif = be->vif;
+               queue->number = queue_index;
+               snprintf(queue->name, sizeof(queue->name), "%s-q%u",
+                               be->vif->dev->name, queue->number);
+
+               xenvif_init_queue(queue);
+
+               queue->remaining_credit = credit_bytes;
+
+               err = connect_rings(be, queue);
+               if (err)
+                       goto err;
+       }
+
+       xenvif_carrier_on(be->vif);

        unregister_hotplug_status_watch(be);
        err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
@@ -508,18 +531,24 @@ static void connect(struct backend_info *be)
        if (!err)
                be->have_hotplug_status_watch = 1;

-       netif_wake_queue(be->vif->dev);
+       netif_tx_wake_all_queues(be->vif->dev);
+
+       return;
+
+err:
+       vfree(be->vif->queues);
+       be->vif->queues = NULL;
+       be->vif->num_queues = 0;
+       return;
  }


-static int connect_rings(struct backend_info *be)
+static int connect_rings(struct backend_info *be, struct xenvif_queue
*queue)
  {
-       struct xenvif *vif = be->vif;
        struct xenbus_device *dev = be->dev;
        unsigned long tx_ring_ref, rx_ring_ref;
-       unsigned int tx_evtchn, rx_evtchn, rx_copy;
+       unsigned int tx_evtchn, rx_evtchn;
        int err;
-       int val;

        err = xenbus_gather(XBT_NIL, dev->otherend,
                            "tx-ring-ref", "%lu", &tx_ring_ref,
@@ -547,6 +576,27 @@ static int connect_rings(struct backend_info *be)
                rx_evtchn = tx_evtchn;
        }

+       /* Map the shared frame, irq etc. */
+       err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
+                            tx_evtchn, rx_evtchn);
+       if (err) {
+               xenbus_dev_fatal(dev, err,
+                                "mapping shared-frames %lu/%lu port tx %u
rx %u",
+                                tx_ring_ref, rx_ring_ref,
+                                tx_evtchn, rx_evtchn);
+               return err;
+       }
+
+       return 0;
+}
+
+static int read_xenbus_vif_flags(struct backend_info *be)
+{
+       struct xenvif *vif = be->vif;
+       struct xenbus_device *dev = be->dev;
+       unsigned int rx_copy;
+       int err, val;
+
        err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy",
"%u",
                           &rx_copy);
        if (err == -ENOENT) {
@@ -622,20 +672,9 @@ static int connect_rings(struct backend_info *be)
                val = 0;
        vif->ipv6_csum = !!val;

-       /* Map the shared frame, irq etc. */
-       err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
-                            tx_evtchn, rx_evtchn);
-       if (err) {
-               xenbus_dev_fatal(dev, err,
-                                "mapping shared-frames %lu/%lu port tx %u
rx %u",
-                                tx_ring_ref, rx_ring_ref,
-                                tx_evtchn, rx_evtchn);
-               return err;
-       }
        return 0;
  }

-
  /* ** Driver Registration ** */


--
1.7.10.4



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.