Displaying 3 results from an estimated 3 matches for "meta_slots_used".
2013 Jul 02
3
[PATCH RFC] xen-netback: remove guest RX path dependence on MAX_SKB_FRAGS
...xen_netbk_rx_action(struct xen_netbk *netbk)
count = 0;
while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+ unsigned int ring_slots_required;
vif = netdev_priv(skb->dev);
- nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = netbk_gop_skb(skb, &npo);
-
- count += nr_frags + 1;
- __skb_queue_tail(&rxq, skb);
+ ring_slots_required = xen_netbk_count_skb_slots(vif, skb);
- /* Filled the batch queue? */
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_R...
2013 Jul 09
20
[PATCH 1/1] xen/netback: correctly calculate required slots of skb.
When counting required slots for skb, netback directly uses DIV_ROUND_UP to get
slots required by header data. This is wrong when offset in the page of header
data is not zero, and is also inconsistent with following calculation for
required slot in netbk_gop_skb.
In netbk_gop_skb, required slots are calculated based on offset and len in page
of header data. It is possible that required slots
2013 Feb 15
1
[PATCH 7/8] netback: split event channels support
...bk_rx_action(struct xen_netbk *netbk)
{
struct xenvif *vif = NULL, *tmp;
s8 status;
- u16 irq, flags;
+ u16 flags;
struct xen_netif_rx_response *resp;
struct sk_buff_head rxq;
struct sk_buff *skb;
@@ -750,7 +750,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
- irq = vif->irq;
if (ret && list_empty(&vif->notify_list))
list_add_tail(&vif->notify_list, ¬ify);
@@ -762,7 +761,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
}
list_...