|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC] xen-netback: remove guest RX path dependence on MAX_SKB_FRAGS
This dependence is undesirable and logically incorrect.
It's undesirable because Xen network protocol should not depend on a
OS-specific constant.
It's incorrect because the ring slots required doesn't correspond to the
number of frags a SKB has (consider compound page frags).
This patch removes this dependence by correctly counting the ring slots
required.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
drivers/net/xen-netback/interface.c | 12 +++++++---
drivers/net/xen-netback/netback.c | 41 +++++++++++++----------------------
2 files changed, 24 insertions(+), 29 deletions(-)
diff --git a/drivers/net/xen-netback/interface.c
b/drivers/net/xen-netback/interface.c
index 087d2db..5c10e87 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -96,18 +96,24 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ unsigned int ring_slots_required;
BUG_ON(skb->dev != dev);
if (vif->netbk == NULL)
goto drop;
+ ring_slots_required = xen_netbk_count_skb_slots(vif, skb);
+
+ /* Reserve ring slots for the worst-case number of fragments. */
+ vif->rx_req_cons_peek += ring_slots_required;
+
/* Drop the packet if the target domain has no receive buffers. */
- if (!xenvif_rx_schedulable(vif))
+ if (!xenvif_rx_schedulable(vif)) {
+ vif->rx_req_cons_peek -= ring_slots_required;
goto drop;
+ }
- /* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
xenvif_get(vif);
if (vif->can_queue && xen_netbk_must_stop_queue(vif))
diff --git a/drivers/net/xen-netback/netback.c
b/drivers/net/xen-netback/netback.c
index 64828de..1b2b71b 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -289,24 +289,12 @@ static void xen_netbk_kick_thread(struct xen_netbk *netbk)
wake_up(&netbk->wq);
}
-static int max_required_rx_slots(struct xenvif *vif)
-{
- int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
-
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (vif->can_sg || vif->gso || vif->gso_prefix)
- max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
-
- return max;
-}
-
int xen_netbk_rx_ring_full(struct xenvif *vif)
{
- RING_IDX peek = vif->rx_req_cons_peek;
- RING_IDX needed = max_required_rx_slots(vif);
+ RING_IDX peek = vif->rx_req_cons_peek;
- return ((vif->rx.sring->req_prod - peek) < needed) ||
- ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) <
needed);
+ return ((vif->rx.sring->req_prod < peek) ||
+ (vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE < peek));
}
int xen_netbk_must_stop_queue(struct xenvif *vif)
@@ -314,8 +302,7 @@ int xen_netbk_must_stop_queue(struct xenvif *vif)
if (!xen_netbk_rx_ring_full(vif))
return 0;
- vif->rx.sring->req_event = vif->rx_req_cons_peek +
- max_required_rx_slots(vif);
+ vif->rx.sring->req_event = vif->rx_req_cons_peek + 1;
mb(); /* request notification /then/ check the queue */
return xen_netbk_rx_ring_full(vif);
@@ -675,7 +662,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
struct sk_buff *skb;
LIST_HEAD(notify);
int ret;
- int nr_frags;
int count;
unsigned long offset;
struct skb_cb_overlay *sco;
@@ -690,20 +676,23 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
count = 0;
while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+ unsigned int ring_slots_required;
vif = netdev_priv(skb->dev);
- nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = netbk_gop_skb(skb, &npo);
-
- count += nr_frags + 1;
- __skb_queue_tail(&rxq, skb);
+ ring_slots_required = xen_netbk_count_skb_slots(vif, skb);
- /* Filled the batch queue? */
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
+ if (count + ring_slots_required >= XEN_NETIF_RX_RING_SIZE) {
+ skb_queue_head(&netbk->rx_queue, skb);
break;
+ }
+
+ count += ring_slots_required;
+
+ sco->meta_slots_used = netbk_gop_skb(skb, &npo);
+
+ __skb_queue_tail(&rxq, skb);
}
BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |