[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] xen-netback: switch to NAPI + kthread 1:1 model



This patch implements 1:1 model netback. NAPI and kthread are utilized
to do the weight-lifting job:

 - NAPI is used for guest side TX (host side RX)
 - kthread is used for guest side RX (host side TX)

Xenvif and xen_netbk are made into one structure to reduce code size.

This model provides better scheduling fairness among vifs. It is also
prerequisite for implementing multiqueue for Xen netback.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 drivers/net/xen-netback/common.h    |  105 +++--
 drivers/net/xen-netback/interface.c |  122 +++---
 drivers/net/xen-netback/netback.c   |  742 ++++++++++++-----------------------
 3 files changed, 389 insertions(+), 580 deletions(-)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a4d77e..2e270cf 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -45,15 +45,43 @@
 #include <xen/grant_table.h>
 #include <xen/xenbus.h>
 
-struct xen_netbk;
+typedef unsigned int pending_ring_idx_t;
+#define INVALID_PENDING_RING_IDX (~0U)
+
+struct pending_tx_info {
+       struct xen_netif_tx_request req; /* coalesced tx request */
+       pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
+                                 * if it is head of one or more tx
+                                 * reqs
+                                 */
+};
+
+#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
+#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
+
+struct xenvif_rx_meta {
+       int id;
+       int size;
+       int gso_size;
+};
+
+/* Discriminate from any valid pending_idx value. */
+#define INVALID_PENDING_IDX 0xFFFF
+
+#define MAX_BUFFER_OFFSET PAGE_SIZE
+
+#define MAX_PENDING_REQS 256
 
 struct xenvif {
        /* Unique identifier for this interface. */
        domid_t          domid;
        unsigned int     handle;
 
-       /* Reference to netback processing backend. */
-       struct xen_netbk *netbk;
+       /* Use NAPI for guest TX */
+       struct napi_struct napi;
+       /* Use kthread for guest RX */
+       struct task_struct *task;
+       wait_queue_head_t wq;
 
        u8               fe_dev_addr[6];
 
@@ -64,9 +92,6 @@ struct xenvif {
        char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
        char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
 
-       /* List of frontends to notify after a batch of frames sent. */
-       struct list_head notify_list;
-
        /* The shared rings and indexes. */
        struct xen_netif_tx_back_ring tx;
        struct xen_netif_rx_back_ring rx;
@@ -96,12 +121,33 @@ struct xenvif {
        /* Statistics */
        unsigned long rx_gso_checksum_fixup;
 
+       struct sk_buff_head rx_queue;
+       struct sk_buff_head tx_queue;
+
+       struct page *mmap_pages[MAX_PENDING_REQS];
+
+       pending_ring_idx_t pending_prod;
+       pending_ring_idx_t pending_cons;
+       u16 pending_ring[MAX_PENDING_REQS];
+
+       struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+
+       /* Coalescing tx requests before copying makes number of grant
+        * copy ops greater or equal to number of slots required. In
+        * worst case a tx request consumes 2 gnttab_copy.
+        */
+       struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
+
+       /*
+        * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
+        * head/fragment page uses 2 copy operations because it
+        * straddles two buffers in the frontend.
+        */
+       struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
+       struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+
        /* Miscellaneous private stuff. */
-       struct list_head schedule_list;
-       atomic_t         refcnt;
        struct net_device *dev;
-
-       wait_queue_head_t waiting_to_free;
 };
 
 static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
@@ -109,9 +155,6 @@ static inline struct xenbus_device 
*xenvif_to_xenbus_device(struct xenvif *vif)
        return to_xenbus_device(vif->dev->dev.parent);
 }
 
-#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
-#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-
 struct xenvif *xenvif_alloc(struct device *parent,
                            domid_t domid,
                            unsigned int handle);
@@ -121,39 +164,26 @@ int xenvif_connect(struct xenvif *vif, unsigned long 
tx_ring_ref,
                   unsigned int rx_evtchn);
 void xenvif_disconnect(struct xenvif *vif);
 
-void xenvif_get(struct xenvif *vif);
-void xenvif_put(struct xenvif *vif);
-
 int xenvif_xenbus_init(void);
 void xenvif_xenbus_fini(void);
 
 int xenvif_schedulable(struct xenvif *vif);
 
-int xen_netbk_rx_ring_full(struct xenvif *vif);
+int xenvif_rx_ring_full(struct xenvif *vif);
 
-int xen_netbk_must_stop_queue(struct xenvif *vif);
+int xenvif_must_stop_queue(struct xenvif *vif);
 
 /* (Un)Map communication rings. */
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
-                                grant_ref_t tx_ring_ref,
-                                grant_ref_t rx_ring_ref);
-
-/* (De)Register a xenvif with the netback backend. */
-void xen_netbk_add_xenvif(struct xenvif *vif);
-void xen_netbk_remove_xenvif(struct xenvif *vif);
-
-/* (De)Schedule backend processing for a xenvif */
-void xen_netbk_schedule_xenvif(struct xenvif *vif);
-void xen_netbk_deschedule_xenvif(struct xenvif *vif);
+void xenvif_unmap_frontend_rings(struct xenvif *vif);
+int xenvif_map_frontend_rings(struct xenvif *vif,
+                             grant_ref_t tx_ring_ref,
+                             grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
-void xen_netbk_check_rx_xenvif(struct xenvif *vif);
-/* Receive an SKB from the frontend */
-void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_check_rx_xenvif(struct xenvif *vif);
 
 /* Queue an SKB for transmission to the frontend */
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
 /* Notify xenvif that ring now has space to send an skb to the frontend */
 void xenvif_notify_tx_completion(struct xenvif *vif);
 
@@ -161,7 +191,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif);
 void xenvif_carrier_off(struct xenvif *vif);
 
 /* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff 
*skb);
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+
+int xenvif_tx_action(struct xenvif *vif, int budget);
+void xenvif_rx_action(struct xenvif *vif);
+
+int xenvif_kthread(void *data);
 
 extern bool separate_tx_rx_irq;
 
diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 087d2db..3d30e93 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -30,6 +30,7 @@
 
 #include "common.h"
 
+#include <linux/kthread.h>
 #include <linux/ethtool.h>
 #include <linux/rtnetlink.h>
 #include <linux/if_vlan.h>
@@ -38,17 +39,7 @@
 #include <asm/xen/hypercall.h>
 
 #define XENVIF_QUEUE_LENGTH 32
-
-void xenvif_get(struct xenvif *vif)
-{
-       atomic_inc(&vif->refcnt);
-}
-
-void xenvif_put(struct xenvif *vif)
-{
-       if (atomic_dec_and_test(&vif->refcnt))
-               wake_up(&vif->waiting_to_free);
-}
+#define XENVIF_NAPI_WEIGHT  64
 
 int xenvif_schedulable(struct xenvif *vif)
 {
@@ -57,28 +48,46 @@ int xenvif_schedulable(struct xenvif *vif)
 
 static int xenvif_rx_schedulable(struct xenvif *vif)
 {
-       return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
+       return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
 }
 
 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 {
        struct xenvif *vif = dev_id;
 
-       if (vif->netbk == NULL)
-               return IRQ_HANDLED;
-
-       xen_netbk_schedule_xenvif(vif);
+       if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
+               napi_schedule(&vif->napi);
 
        return IRQ_HANDLED;
 }
 
+static int xenvif_poll(struct napi_struct *napi, int budget)
+{
+       struct xenvif *vif = container_of(napi, struct xenvif, napi);
+       int work_done;
+
+       work_done = xenvif_tx_action(vif, budget);
+
+       if (work_done < budget) {
+               int more_to_do = 0;
+               unsigned long flags;
+
+               local_irq_save(flags);
+
+               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
+               if (!more_to_do || work_done < 0)
+                       __napi_complete(napi);
+
+               local_irq_restore(flags);
+       }
+
+       return work_done;
+}
+
 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
 {
        struct xenvif *vif = dev_id;
 
-       if (vif->netbk == NULL)
-               return IRQ_HANDLED;
-
        if (xenvif_rx_schedulable(vif))
                netif_wake_queue(vif->dev);
 
@@ -99,7 +108,8 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct 
net_device *dev)
 
        BUG_ON(skb->dev != dev);
 
-       if (vif->netbk == NULL)
+       /* Drop the packet if vif is not ready */
+       if (vif->task == NULL)
                goto drop;
 
        /* Drop the packet if the target domain has no receive buffers. */
@@ -107,13 +117,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct 
net_device *dev)
                goto drop;
 
        /* Reserve ring slots for the worst-case number of fragments. */
-       vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
-       xenvif_get(vif);
+       vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
 
-       if (vif->can_queue && xen_netbk_must_stop_queue(vif))
+       if (vif->can_queue && xenvif_must_stop_queue(vif))
                netif_stop_queue(dev);
 
-       xen_netbk_queue_tx_skb(vif, skb);
+       xenvif_queue_tx_skb(vif, skb);
 
        return NETDEV_TX_OK;
 
@@ -123,11 +132,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct 
net_device *dev)
        return NETDEV_TX_OK;
 }
 
-void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
-{
-       netif_rx_ni(skb);
-}
-
 void xenvif_notify_tx_completion(struct xenvif *vif)
 {
        if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
@@ -142,21 +146,20 @@ static struct net_device_stats *xenvif_get_stats(struct 
net_device *dev)
 
 static void xenvif_up(struct xenvif *vif)
 {
-       xen_netbk_add_xenvif(vif);
+       napi_enable(&vif->napi);
        enable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                enable_irq(vif->rx_irq);
-       xen_netbk_check_rx_xenvif(vif);
+       xenvif_check_rx_xenvif(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
 {
+       napi_disable(&vif->napi);
        disable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                disable_irq(vif->rx_irq);
        del_timer_sync(&vif->credit_timeout);
-       xen_netbk_deschedule_xenvif(vif);
-       xen_netbk_remove_xenvif(vif);
 }
 
 static int xenvif_open(struct net_device *dev)
@@ -272,11 +275,13 @@ struct xenvif *xenvif_alloc(struct device *parent, 
domid_t domid,
        struct net_device *dev;
        struct xenvif *vif;
        char name[IFNAMSIZ] = {};
+       int i;
 
        snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
        dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
        if (dev == NULL) {
-               pr_warn("Could not allocate netdev\n");
+               printk(KERN_WARNING "xen-netback: Could not allocate netdev for 
vif%d.%d\n",
+                      domid, handle);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -285,14 +290,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t 
domid,
        vif = netdev_priv(dev);
        vif->domid  = domid;
        vif->handle = handle;
-       vif->netbk  = NULL;
        vif->can_sg = 1;
        vif->csum = 1;
-       atomic_set(&vif->refcnt, 1);
-       init_waitqueue_head(&vif->waiting_to_free);
        vif->dev = dev;
-       INIT_LIST_HEAD(&vif->schedule_list);
-       INIT_LIST_HEAD(&vif->notify_list);
 
        vif->credit_bytes = vif->remaining_credit = ~0UL;
        vif->credit_usec  = 0UL;
@@ -307,6 +307,16 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t 
domid,
 
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
 
+       skb_queue_head_init(&vif->rx_queue);
+       skb_queue_head_init(&vif->tx_queue);
+
+       vif->pending_cons = 0;
+       vif->pending_prod = MAX_PENDING_REQS;
+       for (i = 0; i < MAX_PENDING_REQS; i++)
+               vif->pending_ring[i] = i;
+       for (i = 0; i < MAX_PENDING_REQS; i++)
+               vif->mmap_pages[i] = NULL;
+
        /*
         * Initialise a dummy MAC address. We choose the numerically
         * largest non-broadcast address to prevent the address getting
@@ -316,6 +326,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t 
domid,
        memset(dev->dev_addr, 0xFF, ETH_ALEN);
        dev->dev_addr[0] &= ~0x01;
 
+       netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
+
        netif_carrier_off(dev);
 
        err = register_netdev(dev);
@@ -341,7 +353,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long 
tx_ring_ref,
 
        __module_get(THIS_MODULE);
 
-       err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+       err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
        if (err < 0)
                goto err;
 
@@ -377,7 +389,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long 
tx_ring_ref,
                disable_irq(vif->rx_irq);
        }
 
-       xenvif_get(vif);
+       init_waitqueue_head(&vif->wq);
+       vif->task = kthread_create(xenvif_kthread,
+                                  (void *)vif,
+                                  "vif%d.%d", vif->domid, vif->handle);
+       if (IS_ERR(vif->task)) {
+               printk(KERN_WARNING "xen-netback: Could not allocate kthread 
for vif%d.%d\n",
+                      vif->domid, vif->handle);
+               err = PTR_ERR(vif->task);
+               goto err_rx_unbind;
+       }
 
        rtnl_lock();
        if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
@@ -388,12 +409,18 @@ int xenvif_connect(struct xenvif *vif, unsigned long 
tx_ring_ref,
                xenvif_up(vif);
        rtnl_unlock();
 
+       wake_up_process(vif->task);
+
        return 0;
+
+err_rx_unbind:
+       unbind_from_irqhandler(vif->rx_irq, vif);
+       vif->rx_irq = 0;
 err_tx_unbind:
        unbind_from_irqhandler(vif->tx_irq, vif);
        vif->tx_irq = 0;
 err_unmap:
-       xen_netbk_unmap_frontend_rings(vif);
+       xenvif_unmap_frontend_rings(vif);
 err:
        module_put(THIS_MODULE);
        return err;
@@ -408,7 +435,6 @@ void xenvif_carrier_off(struct xenvif *vif)
        if (netif_running(dev))
                xenvif_down(vif);
        rtnl_unlock();
-       xenvif_put(vif);
 }
 
 void xenvif_disconnect(struct xenvif *vif)
@@ -422,9 +448,6 @@ void xenvif_disconnect(struct xenvif *vif)
        if (netif_carrier_ok(vif->dev))
                xenvif_carrier_off(vif);
 
-       atomic_dec(&vif->refcnt);
-       wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
-
        if (vif->tx_irq) {
                if (vif->tx_irq == vif->rx_irq)
                        unbind_from_irqhandler(vif->tx_irq, vif);
@@ -438,9 +461,14 @@ void xenvif_disconnect(struct xenvif *vif)
                need_module_put = 1;
        }
 
+       if (vif->task)
+               kthread_stop(vif->task);
+
+       netif_napi_del(&vif->napi);
+
        unregister_netdev(vif->dev);
 
-       xen_netbk_unmap_frontend_rings(vif);
+       xenvif_unmap_frontend_rings(vif);
 
        free_netdev(vif->dev);
 
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index 54853be..09f32f2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -70,116 +70,25 @@ module_param(fatal_skb_slots, uint, 0444);
  */
 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
 
-typedef unsigned int pending_ring_idx_t;
-#define INVALID_PENDING_RING_IDX (~0U)
-
-struct pending_tx_info {
-       struct xen_netif_tx_request req; /* coalesced tx request */
-       struct xenvif *vif;
-       pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
-                                 * if it is head of one or more tx
-                                 * reqs
-                                 */
-};
-
-struct netbk_rx_meta {
-       int id;
-       int size;
-       int gso_size;
-};
-
-#define MAX_PENDING_REQS 256
-
-/* Discriminate from any valid pending_idx value. */
-#define INVALID_PENDING_IDX 0xFFFF
-
-#define MAX_BUFFER_OFFSET PAGE_SIZE
-
-struct xen_netbk {
-       wait_queue_head_t wq;
-       struct task_struct *task;
-
-       struct sk_buff_head rx_queue;
-       struct sk_buff_head tx_queue;
-
-       struct timer_list net_timer;
-
-       struct page *mmap_pages[MAX_PENDING_REQS];
-
-       pending_ring_idx_t pending_prod;
-       pending_ring_idx_t pending_cons;
-       struct list_head net_schedule_list;
-
-       /* Protect the net_schedule_list in netif. */
-       spinlock_t net_schedule_list_lock;
-
-       atomic_t netfront_count;
-
-       struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
-       /* Coalescing tx requests before copying makes number of grant
-        * copy ops greater or equal to number of slots required. In
-        * worst case a tx request consumes 2 gnttab_copy.
-        */
-       struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
-
-       u16 pending_ring[MAX_PENDING_REQS];
-
-       /*
-        * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
-        * head/fragment page uses 2 copy operations because it
-        * straddles two buffers in the frontend.
-        */
-       struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
-       struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
-};
-
-static struct xen_netbk *xen_netbk;
-static int xen_netbk_group_nr;
-
 /*
  * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
  * one or more merged tx requests, otherwise it is the continuation of
  * previous tx request.
  */
-static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
+static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
 {
-       return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
+       return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
 }
 
-void xen_netbk_add_xenvif(struct xenvif *vif)
-{
-       int i;
-       int min_netfront_count;
-       int min_group = 0;
-       struct xen_netbk *netbk;
-
-       min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
-       for (i = 0; i < xen_netbk_group_nr; i++) {
-               int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
-               if (netfront_count < min_netfront_count) {
-                       min_group = i;
-                       min_netfront_count = netfront_count;
-               }
-       }
-
-       netbk = &xen_netbk[min_group];
-
-       vif->netbk = netbk;
-       atomic_inc(&netbk->netfront_count);
-}
-
-void xen_netbk_remove_xenvif(struct xenvif *vif)
-{
-       struct xen_netbk *netbk = vif->netbk;
-       vif->netbk = NULL;
-       atomic_dec(&netbk->netfront_count);
-}
-
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
-                                 u8 status);
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+                              u8 status);
 static void make_tx_response(struct xenvif *vif,
                             struct xen_netif_tx_request *txp,
                             s8       st);
+
+static inline int tx_work_todo(struct xenvif *vif);
+static inline int rx_work_todo(struct xenvif *vif);
+
 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
                                             u16      id,
                                             s8       st,
@@ -187,16 +96,16 @@ static struct xen_netif_rx_response 
*make_rx_response(struct xenvif *vif,
                                             u16      size,
                                             u16      flags);
 
-static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
+static inline unsigned long idx_to_pfn(struct xenvif *vif,
                                       u16 idx)
 {
-       return page_to_pfn(netbk->mmap_pages[idx]);
+       return page_to_pfn(vif->mmap_pages[idx]);
 }
 
-static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
+static inline unsigned long idx_to_kaddr(struct xenvif *vif,
                                         u16 idx)
 {
-       return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
+       return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
 }
 
 /*
@@ -224,15 +133,10 @@ static inline pending_ring_idx_t pending_index(unsigned i)
        return i & (MAX_PENDING_REQS-1);
 }
 
-static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
 {
        return MAX_PENDING_REQS -
-               netbk->pending_prod + netbk->pending_cons;
-}
-
-static void xen_netbk_kick_thread(struct xen_netbk *netbk)
-{
-       wake_up(&netbk->wq);
+               vif->pending_prod + vif->pending_cons;
 }
 
 static int max_required_rx_slots(struct xenvif *vif)
@@ -246,7 +150,7 @@ static int max_required_rx_slots(struct xenvif *vif)
        return max;
 }
 
-int xen_netbk_rx_ring_full(struct xenvif *vif)
+int xenvif_rx_ring_full(struct xenvif *vif)
 {
        RING_IDX peek   = vif->rx_req_cons_peek;
        RING_IDX needed = max_required_rx_slots(vif);
@@ -255,16 +159,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif)
               ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < 
needed);
 }
 
-int xen_netbk_must_stop_queue(struct xenvif *vif)
+int xenvif_must_stop_queue(struct xenvif *vif)
 {
-       if (!xen_netbk_rx_ring_full(vif))
+       if (!xenvif_rx_ring_full(vif))
                return 0;
 
        vif->rx.sring->req_event = vif->rx_req_cons_peek +
                max_required_rx_slots(vif);
        mb(); /* request notification /then/ check the queue */
 
-       return xen_netbk_rx_ring_full(vif);
+       return xenvif_rx_ring_full(vif);
 }
 
 /*
@@ -310,9 +214,9 @@ static bool start_new_rx_buffer(int offset, unsigned long 
size, int head)
 /*
  * Figure out how many ring slots we're going to need to send @skb to
  * the guest. This function is essentially a dry run of
- * netbk_gop_frag_copy.
+ * xenvif_gop_frag_copy.
  */
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
 {
        unsigned int count;
        int i, copy_off;
@@ -364,15 +268,15 @@ struct netrx_pending_operations {
        unsigned copy_prod, copy_cons;
        unsigned meta_prod, meta_cons;
        struct gnttab_copy *copy;
-       struct netbk_rx_meta *meta;
+       struct xenvif_rx_meta *meta;
        int copy_off;
        grant_ref_t copy_gref;
 };
 
-static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
-                                               struct netrx_pending_operations 
*npo)
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+                                                struct 
netrx_pending_operations *npo)
 {
-       struct netbk_rx_meta *meta;
+       struct xenvif_rx_meta *meta;
        struct xen_netif_rx_request *req;
 
        req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
@@ -392,13 +296,13 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct 
xenvif *vif,
  * Set up the grant operations for this fragment. If it's a flipping
  * interface, we also set up the unmap request from here.
  */
-static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
-                               struct netrx_pending_operations *npo,
-                               struct page *page, unsigned long size,
-                               unsigned long offset, int *head)
+static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+                                struct netrx_pending_operations *npo,
+                                struct page *page, unsigned long size,
+                                unsigned long offset, int *head)
 {
        struct gnttab_copy *copy_gop;
-       struct netbk_rx_meta *meta;
+       struct xenvif_rx_meta *meta;
        unsigned long bytes;
 
        /* Data must not cross a page boundary. */
@@ -434,9 +338,9 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct 
sk_buff *skb,
 
                copy_gop = npo->copy + npo->copy_prod++;
                copy_gop->flags = GNTCOPY_dest_gref;
+
                copy_gop->source.domid = DOMID_SELF;
                copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
-
                copy_gop->source.offset = offset;
                copy_gop->dest.domid = vif->domid;
 
@@ -478,14 +382,14 @@ static void netbk_gop_frag_copy(struct xenvif *vif, 
struct sk_buff *skb,
  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  * frontend-side LRO).
  */
-static int netbk_gop_skb(struct sk_buff *skb,
-                        struct netrx_pending_operations *npo)
+static int xenvif_gop_skb(struct sk_buff *skb,
+                         struct netrx_pending_operations *npo)
 {
        struct xenvif *vif = netdev_priv(skb->dev);
        int nr_frags = skb_shinfo(skb)->nr_frags;
        int i;
        struct xen_netif_rx_request *req;
-       struct netbk_rx_meta *meta;
+       struct xenvif_rx_meta *meta;
        unsigned char *data;
        int head = 1;
        int old_meta_prod;
@@ -522,30 +426,30 @@ static int netbk_gop_skb(struct sk_buff *skb,
                if (data + len > skb_tail_pointer(skb))
                        len = skb_tail_pointer(skb) - data;
 
-               netbk_gop_frag_copy(vif, skb, npo,
-                                   virt_to_page(data), len, offset, &head);
+               xenvif_gop_frag_copy(vif, skb, npo,
+                                    virt_to_page(data), len, offset, &head);
                data += len;
        }
 
        for (i = 0; i < nr_frags; i++) {
-               netbk_gop_frag_copy(vif, skb, npo,
-                                   skb_frag_page(&skb_shinfo(skb)->frags[i]),
-                                   skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                                   skb_shinfo(skb)->frags[i].page_offset,
-                                   &head);
+               xenvif_gop_frag_copy(vif, skb, npo,
+                                    skb_frag_page(&skb_shinfo(skb)->frags[i]),
+                                    skb_frag_size(&skb_shinfo(skb)->frags[i]),
+                                    skb_shinfo(skb)->frags[i].page_offset,
+                                    &head);
        }
 
        return npo->meta_prod - old_meta_prod;
 }
 
 /*
- * This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
+ * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
  * used to set up the operations on the top of
  * netrx_pending_operations, which have since been done.  Check that
  * they didn't give any errors and advance over them.
  */
-static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
-                          struct netrx_pending_operations *npo)
+static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
+                           struct netrx_pending_operations *npo)
 {
        struct gnttab_copy     *copy_op;
        int status = XEN_NETIF_RSP_OKAY;
@@ -564,9 +468,9 @@ static int netbk_check_gop(struct xenvif *vif, int 
nr_meta_slots,
        return status;
 }
 
-static void netbk_add_frag_responses(struct xenvif *vif, int status,
-                                    struct netbk_rx_meta *meta,
-                                    int nr_meta_slots)
+static void xenvif_add_frag_responses(struct xenvif *vif, int status,
+                                     struct xenvif_rx_meta *meta,
+                                     int nr_meta_slots)
 {
        int i;
        unsigned long offset;
@@ -594,9 +498,13 @@ struct skb_cb_overlay {
        int meta_slots_used;
 };
 
-static void xen_netbk_rx_action(struct xen_netbk *netbk)
+static void xenvif_kick_thread(struct xenvif *vif)
+{
+       wake_up(&vif->wq);
+}
+
+void xenvif_rx_action(struct xenvif *vif)
 {
-       struct xenvif *vif = NULL, *tmp;
        s8 status;
        u16 flags;
        struct xen_netif_rx_response *resp;
@@ -608,22 +516,23 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
        int count;
        unsigned long offset;
        struct skb_cb_overlay *sco;
+       int need_to_notify = 0;
 
        struct netrx_pending_operations npo = {
-               .copy  = netbk->grant_copy_op,
-               .meta  = netbk->meta,
+               .copy  = vif->grant_copy_op,
+               .meta  = vif->meta,
        };
 
        skb_queue_head_init(&rxq);
 
        count = 0;
 
-       while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+       while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
                vif = netdev_priv(skb->dev);
                nr_frags = skb_shinfo(skb)->nr_frags;
 
                sco = (struct skb_cb_overlay *)skb->cb;
-               sco->meta_slots_used = netbk_gop_skb(skb, &npo);
+               sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
 
                count += nr_frags + 1;
 
@@ -635,27 +544,27 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
                        break;
        }
 
-       BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
+       BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
 
        if (!npo.copy_prod)
                return;
 
-       BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
-       gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
+       BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+       gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
                sco = (struct skb_cb_overlay *)skb->cb;
 
                vif = netdev_priv(skb->dev);
 
-               if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+               if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
                        resp = RING_GET_RESPONSE(&vif->rx,
-                                               vif->rx.rsp_prod_pvt++);
+                                                vif->rx.rsp_prod_pvt++);
 
                        resp->flags = XEN_NETRXF_gso_prefix | 
XEN_NETRXF_more_data;
 
-                       resp->offset = netbk->meta[npo.meta_cons].gso_size;
-                       resp->id = netbk->meta[npo.meta_cons].id;
+                       resp->offset = vif->meta[npo.meta_cons].gso_size;
+                       resp->id = vif->meta[npo.meta_cons].id;
                        resp->status = sco->meta_slots_used;
 
                        npo.meta_cons++;
@@ -666,7 +575,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
                vif->dev->stats.tx_bytes += skb->len;
                vif->dev->stats.tx_packets++;
 
-               status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
+               status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
 
                if (sco->meta_slots_used == 1)
                        flags = 0;
@@ -680,12 +589,12 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
                        flags |= XEN_NETRXF_data_validated;
 
                offset = 0;
-               resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
+               resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
                                        status, offset,
-                                       netbk->meta[npo.meta_cons].size,
+                                       vif->meta[npo.meta_cons].size,
                                        flags);
 
-               if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+               if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
                        struct xen_netif_extra_info *gso =
                                (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&vif->rx,
@@ -693,7 +602,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
 
                        resp->flags |= XEN_NETRXF_extra_info;
 
-                       gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
+                       gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
                        gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
                        gso->u.gso.pad = 0;
                        gso->u.gso.features = 0;
@@ -702,121 +611,43 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
                        gso->flags = 0;
                }
 
-               netbk_add_frag_responses(vif, status,
-                                        netbk->meta + npo.meta_cons + 1,
-                                        sco->meta_slots_used);
+               xenvif_add_frag_responses(vif, status,
+                                         vif->meta + npo.meta_cons + 1,
+                                         sco->meta_slots_used);
 
                RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
-               if (ret && list_empty(&vif->notify_list))
-                       list_add_tail(&vif->notify_list, &notify);
+               if (ret)
+                       need_to_notify = 1;
 
                xenvif_notify_tx_completion(vif);
 
-               xenvif_put(vif);
                npo.meta_cons += sco->meta_slots_used;
                dev_kfree_skb(skb);
        }
 
-       list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
+       if (need_to_notify)
                notify_remote_via_irq(vif->rx_irq);
-               list_del_init(&vif->notify_list);
-       }
 
        /* More work to do? */
-       if (!skb_queue_empty(&netbk->rx_queue) &&
-                       !timer_pending(&netbk->net_timer))
-               xen_netbk_kick_thread(netbk);
-}
-
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
-       struct xen_netbk *netbk = vif->netbk;
-
-       skb_queue_tail(&netbk->rx_queue, skb);
-
-       xen_netbk_kick_thread(netbk);
-}
-
-static void xen_netbk_alarm(unsigned long data)
-{
-       struct xen_netbk *netbk = (struct xen_netbk *)data;
-       xen_netbk_kick_thread(netbk);
-}
-
-static int __on_net_schedule_list(struct xenvif *vif)
-{
-       return !list_empty(&vif->schedule_list);
-}
-
-/* Must be called with net_schedule_list_lock held */
-static void remove_from_net_schedule_list(struct xenvif *vif)
-{
-       if (likely(__on_net_schedule_list(vif))) {
-               list_del_init(&vif->schedule_list);
-               xenvif_put(vif);
-       }
+       if (!skb_queue_empty(&vif->rx_queue))
+               xenvif_kick_thread(vif);
 }
 
-static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
 {
-       struct xenvif *vif = NULL;
-
-       spin_lock_irq(&netbk->net_schedule_list_lock);
-       if (list_empty(&netbk->net_schedule_list))
-               goto out;
-
-       vif = list_first_entry(&netbk->net_schedule_list,
-                              struct xenvif, schedule_list);
-       if (!vif)
-               goto out;
-
-       xenvif_get(vif);
+       skb_queue_tail(&vif->rx_queue, skb);
 
-       remove_from_net_schedule_list(vif);
-out:
-       spin_unlock_irq(&netbk->net_schedule_list_lock);
-       return vif;
+       xenvif_kick_thread(vif);
 }
 
-void xen_netbk_schedule_xenvif(struct xenvif *vif)
-{
-       unsigned long flags;
-       struct xen_netbk *netbk = vif->netbk;
-
-       if (__on_net_schedule_list(vif))
-               goto kick;
-
-       spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
-       if (!__on_net_schedule_list(vif) &&
-           likely(xenvif_schedulable(vif))) {
-               list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
-               xenvif_get(vif);
-       }
-       spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
-
-kick:
-       smp_mb();
-       if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
-           !list_empty(&netbk->net_schedule_list))
-               xen_netbk_kick_thread(netbk);
-}
-
-void xen_netbk_deschedule_xenvif(struct xenvif *vif)
-{
-       struct xen_netbk *netbk = vif->netbk;
-       spin_lock_irq(&netbk->net_schedule_list_lock);
-       remove_from_net_schedule_list(vif);
-       spin_unlock_irq(&netbk->net_schedule_list_lock);
-}
-
-void xen_netbk_check_rx_xenvif(struct xenvif *vif)
+void xenvif_check_rx_xenvif(struct xenvif *vif)
 {
        int more_to_do;
 
        RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
 
        if (more_to_do)
-               xen_netbk_schedule_xenvif(vif);
+               napi_schedule(&vif->napi);
 }
 
 static void tx_add_credit(struct xenvif *vif)
@@ -843,11 +674,11 @@ static void tx_credit_callback(unsigned long data)
 {
        struct xenvif *vif = (struct xenvif *)data;
        tx_add_credit(vif);
-       xen_netbk_check_rx_xenvif(vif);
+       xenvif_check_rx_xenvif(vif);
 }
 
-static void netbk_tx_err(struct xenvif *vif,
-                        struct xen_netif_tx_request *txp, RING_IDX end)
+static void xenvif_tx_err(struct xenvif *vif,
+                         struct xen_netif_tx_request *txp, RING_IDX end)
 {
        RING_IDX cons = vif->tx.req_cons;
 
@@ -858,21 +689,18 @@ static void netbk_tx_err(struct xenvif *vif,
                txp = RING_GET_REQUEST(&vif->tx, cons++);
        } while (1);
        vif->tx.req_cons = cons;
-       xen_netbk_check_rx_xenvif(vif);
-       xenvif_put(vif);
 }
 
-static void netbk_fatal_tx_err(struct xenvif *vif)
+static void xenvif_fatal_tx_err(struct xenvif *vif)
 {
        netdev_err(vif->dev, "fatal error; disabling device\n");
        xenvif_carrier_off(vif);
-       xenvif_put(vif);
 }
 
-static int netbk_count_requests(struct xenvif *vif,
-                               struct xen_netif_tx_request *first,
-                               struct xen_netif_tx_request *txp,
-                               int work_to_do)
+static int xenvif_count_requests(struct xenvif *vif,
+                                struct xen_netif_tx_request *first,
+                                struct xen_netif_tx_request *txp,
+                                int work_to_do)
 {
        RING_IDX cons = vif->tx.req_cons;
        int slots = 0;
@@ -889,7 +717,7 @@ static int netbk_count_requests(struct xenvif *vif,
                        netdev_err(vif->dev,
                                   "Asked for %d slots but exceeds this 
limit\n",
                                   work_to_do);
-                       netbk_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(vif);
                        return -ENODATA;
                }
 
@@ -900,7 +728,7 @@ static int netbk_count_requests(struct xenvif *vif,
                        netdev_err(vif->dev,
                                   "Malicious frontend using %d slots, 
threshold %u\n",
                                   slots, fatal_skb_slots);
-                       netbk_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(vif);
                        return -E2BIG;
                }
 
@@ -948,7 +776,7 @@ static int netbk_count_requests(struct xenvif *vif,
                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
                        netdev_err(vif->dev, "Cross page boundary, txp->offset: 
%x, size: %u\n",
                                 txp->offset, txp->size);
-                       netbk_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(vif);
                        return -EINVAL;
                }
 
@@ -960,29 +788,30 @@ static int netbk_count_requests(struct xenvif *vif,
        } while (more_data);
 
        if (drop_err) {
-               netbk_tx_err(vif, first, cons + slots);
+               xenvif_tx_err(vif, first, cons + slots);
                return drop_err;
        }
 
        return slots;
 }
 
-static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
-                                        u16 pending_idx)
+static struct page *xenvif_alloc_page(struct xenvif *vif,
+                                     u16 pending_idx)
 {
        struct page *page;
+
        page = alloc_page(GFP_KERNEL|__GFP_COLD);
        if (!page)
                return NULL;
-       netbk->mmap_pages[pending_idx] = page;
+       vif->mmap_pages[pending_idx] = page;
+
        return page;
 }
 
-static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
-                                                 struct xenvif *vif,
-                                                 struct sk_buff *skb,
-                                                 struct xen_netif_tx_request 
*txp,
-                                                 struct gnttab_copy *gop)
+static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
+                                              struct sk_buff *skb,
+                                              struct xen_netif_tx_request *txp,
+                                              struct gnttab_copy *gop)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
@@ -1005,12 +834,12 @@ static struct gnttab_copy *xen_netbk_get_requests(struct 
xen_netbk *netbk,
 
        /* Coalesce tx requests, at this point the packet passed in
         * should be <= 64K. Any packets larger than 64K have been
-        * handled in netbk_count_requests().
+        * handled in xenvif_count_requests().
         */
        for (shinfo->nr_frags = slot = start; slot < nr_slots;
             shinfo->nr_frags++) {
                struct pending_tx_info *pending_tx_info =
-                       netbk->pending_tx_info;
+                       vif->pending_tx_info;
 
                page = alloc_page(GFP_KERNEL|__GFP_COLD);
                if (!page)
@@ -1047,21 +876,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct 
xen_netbk *netbk,
                                gop->len = txp->size;
                                dst_offset += gop->len;
 
-                               index = pending_index(netbk->pending_cons++);
+                               index = pending_index(vif->pending_cons++);
 
-                               pending_idx = netbk->pending_ring[index];
+                               pending_idx = vif->pending_ring[index];
 
                                memcpy(&pending_tx_info[pending_idx].req, txp,
                                       sizeof(*txp));
-                               xenvif_get(vif);
-
-                               pending_tx_info[pending_idx].vif = vif;
 
                                /* Poison these fields, corresponding
                                 * fields for head tx req will be set
                                 * to correct values after the loop.
                                 */
-                               netbk->mmap_pages[pending_idx] = (void *)(~0UL);
+                               vif->mmap_pages[pending_idx] = (void *)(~0UL);
                                pending_tx_info[pending_idx].head =
                                        INVALID_PENDING_RING_IDX;
 
@@ -1081,7 +907,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct 
xen_netbk *netbk,
                first->req.offset = 0;
                first->req.size = dst_offset;
                first->head = start_idx;
-               netbk->mmap_pages[head_idx] = page;
+               vif->mmap_pages[head_idx] = page;
                frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
        }
 
@@ -1091,20 +917,20 @@ static struct gnttab_copy *xen_netbk_get_requests(struct 
xen_netbk *netbk,
 err:
        /* Unwind, freeing all pages and sending error responses. */
        while (shinfo->nr_frags-- > start) {
-               xen_netbk_idx_release(netbk,
+               xenvif_idx_release(vif,
                                frag_get_pending_idx(&frags[shinfo->nr_frags]),
                                XEN_NETIF_RSP_ERROR);
        }
        /* The head too, if necessary. */
        if (start)
-               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
 
        return NULL;
 }
 
-static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
-                                 struct sk_buff *skb,
-                                 struct gnttab_copy **gopp)
+static int xenvif_tx_check_gop(struct xenvif *vif,
+                              struct sk_buff *skb,
+                              struct gnttab_copy **gopp)
 {
        struct gnttab_copy *gop = *gopp;
        u16 pending_idx = *((u16 *)skb->data);
@@ -1117,7 +943,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
        /* Check status of header. */
        err = gop->status;
        if (unlikely(err))
-               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
@@ -1127,7 +953,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
                pending_ring_idx_t head;
 
                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
-               tx_info = &netbk->pending_tx_info[pending_idx];
+               tx_info = &vif->pending_tx_info[pending_idx];
                head = tx_info->head;
 
                /* Check error status: if okay then remember grant handle. */
@@ -1135,18 +961,18 @@ static int xen_netbk_tx_check_gop(struct xen_netbk 
*netbk,
                        newerr = (++gop)->status;
                        if (newerr)
                                break;
-                       peek = netbk->pending_ring[pending_index(++head)];
-               } while (!pending_tx_is_head(netbk, peek));
+                       peek = vif->pending_ring[pending_index(++head)];
+               } while (!pending_tx_is_head(vif, peek));
 
                if (likely(!newerr)) {
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xen_netbk_idx_release(netbk, pending_idx, 
XEN_NETIF_RSP_OKAY);
+                               xenvif_idx_release(vif, pending_idx, 
XEN_NETIF_RSP_OKAY);
                        continue;
                }
 
                /* Error on this fragment: respond to client with an error. */
-               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
@@ -1154,10 +980,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk 
*netbk,
 
                /* First error: invalidate header and preceding fragments. */
                pending_idx = *((u16 *)skb->data);
-               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
                for (j = start; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xen_netbk_idx_release(netbk, pending_idx, 
XEN_NETIF_RSP_OKAY);
+                       xenvif_idx_release(vif, pending_idx, 
XEN_NETIF_RSP_OKAY);
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
@@ -1168,7 +994,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
        return err;
 }
 
-static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
@@ -1182,20 +1008,20 @@ static void xen_netbk_fill_frags(struct xen_netbk 
*netbk, struct sk_buff *skb)
 
                pending_idx = frag_get_pending_idx(frag);
 
-               txp = &netbk->pending_tx_info[pending_idx].req;
-               page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+               txp = &vif->pending_tx_info[pending_idx].req;
+               page = virt_to_page(idx_to_kaddr(vif, pending_idx));
                __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
                skb->len += txp->size;
                skb->data_len += txp->size;
                skb->truesize += txp->size;
 
-               /* Take an extra reference to offset xen_netbk_idx_release */
-               get_page(netbk->mmap_pages[pending_idx]);
-               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+               /* Take an extra reference to offset xenvif_idx_release */
+               get_page(vif->mmap_pages[pending_idx]);
+               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
        }
 }
 
-static int xen_netbk_get_extras(struct xenvif *vif,
+static int xenvif_get_extras(struct xenvif *vif,
                                struct xen_netif_extra_info *extras,
                                int work_to_do)
 {
@@ -1205,7 +1031,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
        do {
                if (unlikely(work_to_do-- <= 0)) {
                        netdev_err(vif->dev, "Missing extra info\n");
-                       netbk_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(vif);
                        return -EBADR;
                }
 
@@ -1216,7 +1042,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
                        vif->tx.req_cons = ++cons;
                        netdev_err(vif->dev,
                                   "Invalid extra type: %d\n", extra.type);
-                       netbk_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(vif);
                        return -EINVAL;
                }
 
@@ -1227,20 +1053,20 @@ static int xen_netbk_get_extras(struct xenvif *vif,
        return work_to_do;
 }
 
-static int netbk_set_skb_gso(struct xenvif *vif,
-                            struct sk_buff *skb,
-                            struct xen_netif_extra_info *gso)
+static int xenvif_set_skb_gso(struct xenvif *vif,
+                             struct sk_buff *skb,
+                             struct xen_netif_extra_info *gso)
 {
        if (!gso->u.gso.size) {
                netdev_err(vif->dev, "GSO size must not be zero.\n");
-               netbk_fatal_tx_err(vif);
+               xenvif_fatal_tx_err(vif);
                return -EINVAL;
        }
 
        /* Currently only TCPv4 S.O. is supported. */
        if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
                netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
-               netbk_fatal_tx_err(vif);
+               xenvif_fatal_tx_err(vif);
                return -EINVAL;
        }
 
@@ -1351,16 +1177,14 @@ static bool tx_credit_exceeded(struct xenvif *vif, 
unsigned size)
        return false;
 }
 
-static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+static unsigned xenvif_tx_build_gops(struct xenvif *vif)
 {
-       struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
+       struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
        struct sk_buff *skb;
        int ret;
 
-       while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
-               < MAX_PENDING_REQS) &&
-               !list_empty(&netbk->net_schedule_list)) {
-               struct xenvif *vif;
+       while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+               < MAX_PENDING_REQS)) {
                struct xen_netif_tx_request txreq;
                struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
                struct page *page;
@@ -1371,16 +1195,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk 
*netbk)
                unsigned int data_len;
                pending_ring_idx_t index;
 
-               /* Get a netif from the list with work to do. */
-               vif = poll_net_schedule_list(netbk);
-               /* This can sometimes happen because the test of
-                * list_empty(net_schedule_list) at the top of the
-                * loop is unlocked.  Just go back and have another
-                * look.
-                */
-               if (!vif)
-                       continue;
-
                if (vif->tx.sring->req_prod - vif->tx.req_cons >
                    XEN_NETIF_TX_RING_SIZE) {
                        netdev_err(vif->dev,
@@ -1388,15 +1202,13 @@ static unsigned xen_netbk_tx_build_gops(struct 
xen_netbk *netbk)
                                   "req_prod %d, req_cons %d, size %ld\n",
                                   vif->tx.sring->req_prod, vif->tx.req_cons,
                                   XEN_NETIF_TX_RING_SIZE);
-                       netbk_fatal_tx_err(vif);
+                       xenvif_fatal_tx_err(vif);
                        continue;
                }
 
                RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
-               if (!work_to_do) {
-                       xenvif_put(vif);
-                       continue;
-               }
+               if (!work_to_do)
+                       break;
 
                idx = vif->tx.req_cons;
                rmb(); /* Ensure that we see the request before we copy it. */
@@ -1404,10 +1216,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk 
*netbk)
 
                /* Credit-based scheduling. */
                if (txreq.size > vif->remaining_credit &&
-                   tx_credit_exceeded(vif, txreq.size)) {
-                       xenvif_put(vif);
-                       continue;
-               }
+                   tx_credit_exceeded(vif, txreq.size))
+                       break;
 
                vif->remaining_credit -= txreq.size;
 
@@ -1416,24 +1226,24 @@ static unsigned xen_netbk_tx_build_gops(struct 
xen_netbk *netbk)
 
                memset(extras, 0, sizeof(extras));
                if (txreq.flags & XEN_NETTXF_extra_info) {
-                       work_to_do = xen_netbk_get_extras(vif, extras,
+                       work_to_do = xenvif_get_extras(vif, extras,
                                                          work_to_do);
                        idx = vif->tx.req_cons;
                        if (unlikely(work_to_do < 0))
-                               continue;
+                               break;
                }
 
-               ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+               ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
                if (unlikely(ret < 0))
-                       continue;
+                       break;
 
                idx += ret;
 
                if (unlikely(txreq.size < ETH_HLEN)) {
                        netdev_dbg(vif->dev,
                                   "Bad packet size: %d\n", txreq.size);
-                       netbk_tx_err(vif, &txreq, idx);
-                       continue;
+                       xenvif_tx_err(vif, &txreq, idx);
+                       break;
                }
 
                /* No crossing a page as the payload mustn't fragment. */
@@ -1442,12 +1252,12 @@ static unsigned xen_netbk_tx_build_gops(struct 
xen_netbk *netbk)
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
                                   (txreq.offset&~PAGE_MASK) + txreq.size);
-                       netbk_fatal_tx_err(vif);
-                       continue;
+                       xenvif_fatal_tx_err(vif);
+                       break;
                }
 
-               index = pending_index(netbk->pending_cons);
-               pending_idx = netbk->pending_ring[index];
+               index = pending_index(vif->pending_cons);
+               pending_idx = vif->pending_ring[index];
 
                data_len = (txreq.size > PKT_PROT_LEN &&
                            ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1458,7 +1268,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk 
*netbk)
                if (unlikely(skb == NULL)) {
                        netdev_dbg(vif->dev,
                                   "Can't allocate a skb in start_xmit.\n");
-                       netbk_tx_err(vif, &txreq, idx);
+                       xenvif_tx_err(vif, &txreq, idx);
                        break;
                }
 
@@ -1469,19 +1279,20 @@ static unsigned xen_netbk_tx_build_gops(struct 
xen_netbk *netbk)
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 
-                       if (netbk_set_skb_gso(vif, skb, gso)) {
-                               /* Failure in netbk_set_skb_gso is fatal. */
+                       if (xenvif_set_skb_gso(vif, skb, gso)) {
+                               /* Failure in xenvif_set_skb_gso is fatal. */
                                kfree_skb(skb);
-                               continue;
+                               /* XXX ???? break or continue ?*/
+                               break;
                        }
                }
 
                /* XXX could copy straight to head */
-               page = xen_netbk_alloc_page(netbk, pending_idx);
+               page = xenvif_alloc_page(vif, pending_idx);
                if (!page) {
                        kfree_skb(skb);
-                       netbk_tx_err(vif, &txreq, idx);
-                       continue;
+                       xenvif_tx_err(vif, &txreq, idx);
+                       break;
                }
 
                gop->source.u.ref = txreq.gref;
@@ -1497,10 +1308,9 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk 
*netbk)
 
                gop++;
 
-               memcpy(&netbk->pending_tx_info[pending_idx].req,
+               memcpy(&vif->pending_tx_info[pending_idx].req,
                       &txreq, sizeof(txreq));
-               netbk->pending_tx_info[pending_idx].vif = vif;
-               netbk->pending_tx_info[pending_idx].head = index;
+               vif->pending_tx_info[pending_idx].head = index;
                *((u16 *)skb->data) = pending_idx;
 
                __skb_put(skb, data_len);
@@ -1515,46 +1325,45 @@ static unsigned xen_netbk_tx_build_gops(struct 
xen_netbk *netbk)
                                             INVALID_PENDING_IDX);
                }
 
-               netbk->pending_cons++;
+               vif->pending_cons++;
 
-               request_gop = xen_netbk_get_requests(netbk, vif,
-                                                    skb, txfrags, gop);
+               request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
                if (request_gop == NULL) {
                        kfree_skb(skb);
-                       netbk_tx_err(vif, &txreq, idx);
-                       continue;
+                       xenvif_tx_err(vif, &txreq, idx);
+                       break;
                }
                gop = request_gop;
 
-               __skb_queue_tail(&netbk->tx_queue, skb);
+               __skb_queue_tail(&vif->tx_queue, skb);
 
                vif->tx.req_cons = idx;
-               xen_netbk_check_rx_xenvif(vif);
 
-               if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
+               if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
                        break;
        }
 
-       return gop - netbk->tx_copy_ops;
+       return gop - vif->tx_copy_ops;
 }
 
-static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+
+static int xenvif_tx_submit(struct xenvif *vif, int budget)
 {
-       struct gnttab_copy *gop = netbk->tx_copy_ops;
+       struct gnttab_copy *gop = vif->tx_copy_ops;
        struct sk_buff *skb;
+       int work_done = 0;
 
-       while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
+       while (work_done < budget &&
+              (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
                struct xen_netif_tx_request *txp;
-               struct xenvif *vif;
                u16 pending_idx;
                unsigned data_len;
 
                pending_idx = *((u16 *)skb->data);
-               vif = netbk->pending_tx_info[pending_idx].vif;
-               txp = &netbk->pending_tx_info[pending_idx].req;
+               txp = &vif->pending_tx_info[pending_idx].req;
 
                /* Check the remap error code. */
-               if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
+               if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
                        netdev_dbg(vif->dev, "netback grant failed.\n");
                        skb_shinfo(skb)->nr_frags = 0;
                        kfree_skb(skb);
@@ -1563,7 +1372,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
 
                data_len = skb->len;
                memcpy(skb->data,
-                      (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
+                      (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
                       data_len);
                if (data_len < txp->size) {
                        /* Append the packet payload as a fragment. */
@@ -1571,7 +1380,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
                        txp->size -= data_len;
                } else {
                        /* Schedule a response immediately. */
-                       xen_netbk_idx_release(netbk, pending_idx, 
XEN_NETIF_RSP_OKAY);
+                       xenvif_idx_release(vif, pending_idx, 
XEN_NETIF_RSP_OKAY);
                }
 
                if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1579,7 +1388,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
                else if (txp->flags & XEN_NETTXF_data_validated)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-               xen_netbk_fill_frags(netbk, skb);
+               xenvif_fill_frags(vif, skb);
 
                /*
                 * If the initial fragment was < PKT_PROT_LEN then
@@ -1607,53 +1416,61 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
                vif->dev->stats.rx_bytes += skb->len;
                vif->dev->stats.rx_packets++;
 
-               xenvif_receive_skb(vif, skb);
+               work_done++;
+
+               netif_receive_skb(skb);
        }
+
+       return work_done;
 }
 
 /* Called after netfront has transmitted */
-static void xen_netbk_tx_action(struct xen_netbk *netbk)
+int xenvif_tx_action(struct xenvif *vif, int budget)
 {
        unsigned nr_gops;
+       int work_done;
+
+       if (unlikely(!tx_work_todo(vif)))
+               return 0;
 
-       nr_gops = xen_netbk_tx_build_gops(netbk);
+       nr_gops = xenvif_tx_build_gops(vif);
 
        if (nr_gops == 0)
-               return;
+               return 0;
+
+       gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
 
-       gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
+       work_done = xenvif_tx_submit(vif, nr_gops);
 
-       xen_netbk_tx_submit(netbk);
+       return work_done;
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
                                  u8 status)
 {
-       struct xenvif *vif;
        struct pending_tx_info *pending_tx_info;
        pending_ring_idx_t head;
        u16 peek; /* peek into next tx request */
 
-       BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
+       BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
 
        /* Already complete? */
-       if (netbk->mmap_pages[pending_idx] == NULL)
+       if (vif->mmap_pages[pending_idx] == NULL)
                return;
 
-       pending_tx_info = &netbk->pending_tx_info[pending_idx];
+       pending_tx_info = &vif->pending_tx_info[pending_idx];
 
-       vif = pending_tx_info->vif;
        head = pending_tx_info->head;
 
-       BUG_ON(!pending_tx_is_head(netbk, head));
-       BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
+       BUG_ON(!pending_tx_is_head(vif, head));
+       BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
 
        do {
                pending_ring_idx_t index;
                pending_ring_idx_t idx = pending_index(head);
-               u16 info_idx = netbk->pending_ring[idx];
+               u16 info_idx = vif->pending_ring[idx];
 
-               pending_tx_info = &netbk->pending_tx_info[info_idx];
+               pending_tx_info = &vif->pending_tx_info[info_idx];
                make_tx_response(vif, &pending_tx_info->req, status);
 
                /* Setting any number other than
@@ -1662,18 +1479,15 @@ static void xen_netbk_idx_release(struct xen_netbk 
*netbk, u16 pending_idx,
                 */
                pending_tx_info->head = 0;
 
-               index = pending_index(netbk->pending_prod++);
-               netbk->pending_ring[index] = netbk->pending_ring[info_idx];
+               index = pending_index(vif->pending_prod++);
+               vif->pending_ring[index] = vif->pending_ring[info_idx];
 
-               xenvif_put(vif);
+               peek = vif->pending_ring[pending_index(++head)];
 
-               peek = netbk->pending_ring[pending_index(++head)];
+       } while (!pending_tx_is_head(vif, peek));
 
-       } while (!pending_tx_is_head(netbk, peek));
-
-       netbk->mmap_pages[pending_idx]->mapping = 0;
-       put_page(netbk->mmap_pages[pending_idx]);
-       netbk->mmap_pages[pending_idx] = NULL;
+       put_page(vif->mmap_pages[pending_idx]);
+       vif->mmap_pages[pending_idx] = NULL;
 }
 
 
@@ -1721,46 +1535,23 @@ static struct xen_netif_rx_response 
*make_rx_response(struct xenvif *vif,
        return resp;
 }
 
-static inline int rx_work_todo(struct xen_netbk *netbk)
+static inline int rx_work_todo(struct xenvif *vif)
 {
-       return !skb_queue_empty(&netbk->rx_queue);
+       return !skb_queue_empty(&vif->rx_queue);
 }
 
-static inline int tx_work_todo(struct xen_netbk *netbk)
+static inline int tx_work_todo(struct xenvif *vif)
 {
 
-       if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
-            < MAX_PENDING_REQS) &&
-            !list_empty(&netbk->net_schedule_list))
+       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
+           (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+            < MAX_PENDING_REQS))
                return 1;
 
        return 0;
 }
 
-static int xen_netbk_kthread(void *data)
-{
-       struct xen_netbk *netbk = data;
-       while (!kthread_should_stop()) {
-               wait_event_interruptible(netbk->wq,
-                               rx_work_todo(netbk) ||
-                               tx_work_todo(netbk) ||
-                               kthread_should_stop());
-               cond_resched();
-
-               if (kthread_should_stop())
-                       break;
-
-               if (rx_work_todo(netbk))
-                       xen_netbk_rx_action(netbk);
-
-               if (tx_work_todo(netbk))
-                       xen_netbk_tx_action(netbk);
-       }
-
-       return 0;
-}
-
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
+void xenvif_unmap_frontend_rings(struct xenvif *vif)
 {
        if (vif->tx.sring)
                xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
@@ -1770,9 +1561,9 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
                                        vif->rx.sring);
 }
 
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
-                                grant_ref_t tx_ring_ref,
-                                grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_rings(struct xenvif *vif,
+                             grant_ref_t tx_ring_ref,
+                             grant_ref_t rx_ring_ref)
 {
        void *addr;
        struct xen_netif_tx_sring *txs;
@@ -1801,15 +1592,33 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
        return 0;
 
 err:
-       xen_netbk_unmap_frontend_rings(vif);
+       xenvif_unmap_frontend_rings(vif);
        return err;
 }
 
+int xenvif_kthread(void *data)
+{
+       struct xenvif *vif = data;
+
+       while (!kthread_should_stop()) {
+               wait_event_interruptible(vif->wq,
+                                        rx_work_todo(vif) ||
+                                        kthread_should_stop());
+               cond_resched();
+
+               if (kthread_should_stop())
+                       break;
+
+               if (rx_work_todo(vif))
+                       xenvif_rx_action(vif);
+       }
+
+       return 0;
+}
+
 static int __init netback_init(void)
 {
-       int i;
        int rc = 0;
-       int group;
 
        if (!xen_domain())
                return -ENODEV;
@@ -1821,48 +1630,6 @@ static int __init netback_init(void)
                fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
        }
 
-       xen_netbk_group_nr = num_online_cpus();
-       xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
-       if (!xen_netbk)
-               return -ENOMEM;
-
-       for (group = 0; group < xen_netbk_group_nr; group++) {
-               struct xen_netbk *netbk = &xen_netbk[group];
-               skb_queue_head_init(&netbk->rx_queue);
-               skb_queue_head_init(&netbk->tx_queue);
-
-               init_timer(&netbk->net_timer);
-               netbk->net_timer.data = (unsigned long)netbk;
-               netbk->net_timer.function = xen_netbk_alarm;
-
-               netbk->pending_cons = 0;
-               netbk->pending_prod = MAX_PENDING_REQS;
-               for (i = 0; i < MAX_PENDING_REQS; i++)
-                       netbk->pending_ring[i] = i;
-
-               init_waitqueue_head(&netbk->wq);
-               netbk->task = kthread_create(xen_netbk_kthread,
-                                            (void *)netbk,
-                                            "netback/%u", group);
-
-               if (IS_ERR(netbk->task)) {
-                       printk(KERN_ALERT "kthread_create() fails at 
netback\n");
-                       del_timer(&netbk->net_timer);
-                       rc = PTR_ERR(netbk->task);
-                       goto failed_init;
-               }
-
-               kthread_bind(netbk->task, group);
-
-               INIT_LIST_HEAD(&netbk->net_schedule_list);
-
-               spin_lock_init(&netbk->net_schedule_list_lock);
-
-               atomic_set(&netbk->netfront_count, 0);
-
-               wake_up_process(netbk->task);
-       }
-
        rc = xenvif_xenbus_init();
        if (rc)
                goto failed_init;
@@ -1870,35 +1637,14 @@ static int __init netback_init(void)
        return 0;
 
 failed_init:
-       while (--group >= 0) {
-               struct xen_netbk *netbk = &xen_netbk[group];
-               del_timer(&netbk->net_timer);
-               kthread_stop(netbk->task);
-       }
-       vfree(xen_netbk);
        return rc;
-
 }
 
 module_init(netback_init);
 
 static void __exit netback_fini(void)
 {
-       int i, j;
-
        xenvif_xenbus_fini();
-
-       for (i = 0; i < xen_netbk_group_nr; i++) {
-               struct xen_netbk *netbk = &xen_netbk[i];
-               del_timer_sync(&netbk->net_timer);
-               kthread_stop(netbk->task);
-               for (j = 0; j < MAX_PENDING_REQS; j++) {
-                       if (netbk->mmap_pages[i])
-                               __free_page(netbk->mmap_pages[i]);
-               }
-       }
-
-       vfree(xen_netbk);
 }
 module_exit(netback_fini);
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.