Zoltan Kiss
2013-Dec-12 23:48 UTC
[PATCH net-next v2 1/9] xen-netback: Introduce TX grant map definitions
This patch contains the new definitions necessary for grant mapping. v2: - move unmapping to separate thread. The NAPI instance has to be scheduled even from thread context, which can cause huge delays - that causes unfortunately bigger struct xenvif - store grant handle after checking validity Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> --- drivers/net/xen-netback/common.h | 30 ++++++- drivers/net/xen-netback/interface.c | 1 + drivers/net/xen-netback/netback.c | 164 +++++++++++++++++++++++++++++++++++ 3 files changed, 194 insertions(+), 1 deletion(-) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index ba30a6d..33cb12c 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -79,6 +79,11 @@ struct pending_tx_info { * if it is head of one or more tx * reqs */ + /* callback data for released SKBs. The callback is always + * xenvif_zerocopy_callback, ctx points to the next fragment, desc + * contains the pending_idx + */ + struct ubuf_info callback_struct; }; #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) @@ -101,6 +106,8 @@ struct xenvif_rx_meta { #define MAX_PENDING_REQS 256 +#define NETBACK_INVALID_HANDLE -1 + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; @@ -119,13 +126,26 @@ struct xenvif { pending_ring_idx_t pending_cons; u16 pending_ring[MAX_PENDING_REQS]; struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; + grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; /* Coalescing tx requests before copying makes number of grant * copy ops greater or equal to number of slots required. In * worst case a tx request consumes 2 gnttab_copy. */ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; - + struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; + struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; + /* passed to gnttab_[un]map_refs with pages under (un)mapping */ + struct page *pages_to_map[MAX_PENDING_REQS]; + struct page *pages_to_unmap[MAX_PENDING_REQS]; + + spinlock_t dealloc_lock; + spinlock_t response_lock; + pending_ring_idx_t dealloc_prod; + pending_ring_idx_t dealloc_cons; + u16 dealloc_ring[MAX_PENDING_REQS]; + struct task_struct *dealloc_task; + wait_queue_head_t dealloc_wq; /* Use kthread for guest RX */ struct task_struct *task; @@ -215,6 +235,8 @@ int xenvif_tx_action(struct xenvif *vif, int budget); int xenvif_kthread(void *data); void xenvif_kick_thread(struct xenvif *vif); +int xenvif_dealloc_kthread(void *data); + /* Determine whether the needed number of slots (req) are available, * and set req_event if not. */ @@ -222,6 +244,12 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); void xenvif_stop_queue(struct xenvif *vif); +/* Callback from stack when TX packet can be released */ +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); + +/* Unmap a pending page, usually has to be called before xenvif_idx_release */ +void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); + extern bool separate_tx_rx_irq; #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 1dcb960..1c27e9e 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -37,6 +37,7 @@ #include <xen/events.h> #include <asm/xen/hypercall.h> +#include <xen/balloon.h> #define XENVIF_QUEUE_LENGTH 32 #define XENVIF_NAPI_WEIGHT 64 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c1b7a42..3ddc474 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -772,6 +772,20 @@ static struct page *xenvif_alloc_page(struct xenvif *vif, return page; } +static inline void xenvif_tx_create_gop(struct xenvif *vif, u16 pending_idx, + struct xen_netif_tx_request *txp, + struct gnttab_map_grant_ref *gop) +{ + vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; + gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx), + GNTMAP_host_map | GNTMAP_readonly, + txp->gref, vif->domid); + + memcpy(&vif->pending_tx_info[pending_idx].req, txp, + sizeof(*txp)); + +} + static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, struct sk_buff *skb, struct xen_netif_tx_request *txp, @@ -1593,6 +1607,106 @@ static int xenvif_tx_submit(struct xenvif *vif) return work_done; } +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) +{ + unsigned long flags; + pending_ring_idx_t index; + u16 pending_idx = ubuf->desc; + struct pending_tx_info *temp + container_of(ubuf, struct pending_tx_info, callback_struct); + struct xenvif *vif + container_of(temp - pending_idx, struct xenvif, + pending_tx_info[0]); + + spin_lock_irqsave(&vif->dealloc_lock, flags); + do { + pending_idx = ubuf->desc; + ubuf = (struct ubuf_info *) ubuf->ctx; + index = pending_index(vif->dealloc_prod); + vif->dealloc_ring[index] = pending_idx; + /* Sync with xenvif_tx_action_dealloc: + * insert idx then incr producer. + */ + smp_wmb(); + vif->dealloc_prod++; + } while (ubuf); + wake_up(&vif->dealloc_wq); + spin_unlock_irqrestore(&vif->dealloc_lock, flags); +} + +static inline void xenvif_tx_dealloc_action(struct xenvif *vif) +{ + struct gnttab_unmap_grant_ref *gop; + pending_ring_idx_t dc, dp; + u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; + unsigned int i = 0; + + dc = vif->dealloc_cons; + gop = vif->tx_unmap_ops; + + /* Free up any grants we have finished using */ + do { + dp = vif->dealloc_prod; + + /* Ensure we see all indices enqueued by netif_idx_release(). */ + smp_rmb(); + + while (dc != dp) { + pending_idx + vif->dealloc_ring[pending_index(dc++)]; + + /* Already unmapped? */ + if (vif->grant_tx_handle[pending_idx] =+ NETBACK_INVALID_HANDLE) { + netdev_err(vif->dev, + "Trying to unmap invalid handle! " + "pending_idx: %x\n", pending_idx); + continue; + } + + pending_idx_release[gop-vif->tx_unmap_ops] + pending_idx; + vif->pages_to_unmap[gop-vif->tx_unmap_ops] + vif->mmap_pages[pending_idx]; + gnttab_set_unmap_op(gop, + idx_to_kaddr(vif, pending_idx), + GNTMAP_host_map, + vif->grant_tx_handle[pending_idx]); + vif->grant_tx_handle[pending_idx] + NETBACK_INVALID_HANDLE; + ++gop; + } + + } while (dp != vif->dealloc_prod); + + vif->dealloc_cons = dc; + + if (gop - vif->tx_unmap_ops > 0) { + int ret; + ret = gnttab_unmap_refs(vif->tx_unmap_ops, + NULL, + vif->pages_to_unmap, + gop - vif->tx_unmap_ops); + if (ret) { + netdev_err(vif->dev, "Unmap fail: nr_ops %x ret %d\n", + gop - vif->tx_unmap_ops, ret); + for (i = 0; i < gop - vif->tx_unmap_ops; ++i) { + netdev_err(vif->dev, + " host_addr: %llx handle: %x status: %d\n", + gop[i].host_addr, + gop[i].handle, + gop[i].status); + } + BUG(); + } + } + + for (i = 0; i < gop - vif->tx_unmap_ops; ++i) + xenvif_idx_release(vif, pending_idx_release[i], + XEN_NETIF_RSP_OKAY); +} + + /* Called after netfront has transmitted */ int xenvif_tx_action(struct xenvif *vif, int budget) { @@ -1659,6 +1773,26 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, vif->mmap_pages[pending_idx] = NULL; } +void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx) +{ + int ret; + if (vif->grant_tx_handle[pending_idx] == NETBACK_INVALID_HANDLE) { + netdev_err(vif->dev, + "Trying to unmap invalid handle! pending_idx: %x\n", + pending_idx); + return; + } + gnttab_set_unmap_op(&vif->tx_unmap_ops[0], + idx_to_kaddr(vif, pending_idx), + GNTMAP_host_map, + vif->grant_tx_handle[pending_idx]); + ret = gnttab_unmap_refs(vif->tx_unmap_ops, + NULL, + &vif->mmap_pages[pending_idx], + 1); + BUG_ON(ret); + vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; +} static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, @@ -1720,6 +1854,14 @@ static inline int tx_work_todo(struct xenvif *vif) return 0; } +static inline int tx_dealloc_work_todo(struct xenvif *vif) +{ + if (vif->dealloc_cons != vif->dealloc_prod) + return 1; + + return 0; +} + void xenvif_unmap_frontend_rings(struct xenvif *vif) { if (vif->tx.sring) @@ -1808,6 +1950,28 @@ int xenvif_kthread(void *data) return 0; } +int xenvif_dealloc_kthread(void *data) +{ + struct xenvif *vif = data; + + while (!kthread_should_stop()) { + wait_event_interruptible(vif->dealloc_wq, + tx_dealloc_work_todo(vif) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + xenvif_tx_dealloc_action(vif); + cond_resched(); + } + + /* Unmap anything remaining*/ + if (tx_dealloc_work_todo(vif)) + xenvif_tx_dealloc_action(vif); + + return 0; +} + static int __init netback_init(void) { int rc = 0;
Wei Liu
2013-Dec-13 15:31 UTC
Re: [PATCH net-next v2 1/9] xen-netback: Introduce TX grant map definitions
On Thu, Dec 12, 2013 at 11:48:09PM +0000, Zoltan Kiss wrote:> This patch contains the new definitions necessary for grant mapping. > > v2: > - move unmapping to separate thread. The NAPI instance has to be scheduled > even from thread context, which can cause huge delays > - that causes unfortunately bigger struct xenvif > - store grant handle after checking validity >If the size of xenvif really becomes a problem, you can try to make sratch space like struct gnttab_copy per-cpu. The downside is that approach requires much coding and carefully guard against race conditions. You would need to consider cost v.s. benefit.> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> > > ---[...]> #define XENVIF_QUEUE_LENGTH 32 > #define XENVIF_NAPI_WEIGHT 64 > diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c > index c1b7a42..3ddc474 100644 > --- a/drivers/net/xen-netback/netback.c > +++ b/drivers/net/xen-netback/netback.c > @@ -772,6 +772,20 @@ static struct page *xenvif_alloc_page(struct xenvif *vif, > return page; > } > > +static inline void xenvif_tx_create_gop(struct xenvif *vif, u16 pending_idx, > + struct xen_netif_tx_request *txp, > + struct gnttab_map_grant_ref *gop) > +{ > + vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; > + gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx), > + GNTMAP_host_map | GNTMAP_readonly, > + txp->gref, vif->domid); > + > + memcpy(&vif->pending_tx_info[pending_idx].req, txp, > + sizeof(*txp)); > + > +} > +This helper function is not used until next patch. Probably you can move it to the second patch. The same applies to other helper functions as well. Move them to the patch they are used. It would be easier for people to review.> static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, > struct sk_buff *skb, > struct xen_netif_tx_request *txp, > @@ -1593,6 +1607,106 @@ static int xenvif_tx_submit(struct xenvif *vif) > return work_done; > } > > +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) > +{Do we care about zerocopy_success? I don''t see it used in this function.> + unsigned long flags; > + pending_ring_idx_t index; > + u16 pending_idx = ubuf->desc; > + struct pending_tx_info *temp > + container_of(ubuf, struct pending_tx_info, callback_struct); > + struct xenvif *vif > + container_of(temp - pending_idx, struct xenvif, > + pending_tx_info[0]); > +The third parameter to container_of should be the name of the member within the struct.> + spin_lock_irqsave(&vif->dealloc_lock, flags); > + do { > + pending_idx = ubuf->desc; > + ubuf = (struct ubuf_info *) ubuf->ctx; > + index = pending_index(vif->dealloc_prod); > + vif->dealloc_ring[index] = pending_idx; > + /* Sync with xenvif_tx_action_dealloc: > + * insert idx then incr producer. > + */ > + smp_wmb(); > + vif->dealloc_prod++; > + } while (ubuf); > + wake_up(&vif->dealloc_wq); > + spin_unlock_irqrestore(&vif->dealloc_lock, flags); > +} > + > +static inline void xenvif_tx_dealloc_action(struct xenvif *vif) > +{ > + struct gnttab_unmap_grant_ref *gop; > + pending_ring_idx_t dc, dp; > + u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; > + unsigned int i = 0; > + > + dc = vif->dealloc_cons; > + gop = vif->tx_unmap_ops; > + > + /* Free up any grants we have finished using */ > + do { > + dp = vif->dealloc_prod; > + > + /* Ensure we see all indices enqueued by netif_idx_release(). */There is no netif_idx_release in netback code. :-)> + smp_rmb(); > + > + while (dc != dp) { > + pending_idx > + vif->dealloc_ring[pending_index(dc++)]; > + > + /* Already unmapped? */ > + if (vif->grant_tx_handle[pending_idx] => + NETBACK_INVALID_HANDLE) { > + netdev_err(vif->dev, > + "Trying to unmap invalid handle! " > + "pending_idx: %x\n", pending_idx); > + continue; > + }Should this be BUG_ON? AIUI this kthread should be the only one doing unmap, right?> + > + pending_idx_release[gop-vif->tx_unmap_ops] > + pending_idx; > + vif->pages_to_unmap[gop-vif->tx_unmap_ops] > + vif->mmap_pages[pending_idx]; > + gnttab_set_unmap_op(gop, > + idx_to_kaddr(vif, pending_idx), > + GNTMAP_host_map, > + vif->grant_tx_handle[pending_idx]); > + vif->grant_tx_handle[pending_idx] > + NETBACK_INVALID_HANDLE; > + ++gop; > + } > +[...]> +} > > static void make_tx_response(struct xenvif *vif, > struct xen_netif_tx_request *txp, > @@ -1720,6 +1854,14 @@ static inline int tx_work_todo(struct xenvif *vif) > return 0; > } > > +static inline int tx_dealloc_work_todo(struct xenvif *vif)static inline bool Wei.
Zoltan Kiss
2013-Dec-13 18:22 UTC
Re: [PATCH net-next v2 1/9] xen-netback: Introduce TX grant map definitions
On 13/12/13 15:31, Wei Liu wrote:> On Thu, Dec 12, 2013 at 11:48:09PM +0000, Zoltan Kiss wrote: >> This patch contains the new definitions necessary for grant mapping. >> >> v2: >> - move unmapping to separate thread. The NAPI instance has to be scheduled >> even from thread context, which can cause huge delays >> - that causes unfortunately bigger struct xenvif >> - store grant handle after checking validity >> > > If the size of xenvif really becomes a problem, you can try to make > sratch space like struct gnttab_copy per-cpu. The downside is that > approach requires much coding and carefully guard against race > conditions. You would need to consider cost v.s. benefit.I mentioned this because for the first series I had comments that I should be more vigilant about this. At that time there was a problem with struct xenvif allocation which was solved by now. My quick calculation showed this patch will increase the size with ~15kb> >> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> >> >> --- > [...] >> #define XENVIF_QUEUE_LENGTH 32 >> #define XENVIF_NAPI_WEIGHT 64 >> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c >> index c1b7a42..3ddc474 100644 >> --- a/drivers/net/xen-netback/netback.c >> +++ b/drivers/net/xen-netback/netback.c >> @@ -772,6 +772,20 @@ static struct page *xenvif_alloc_page(struct xenvif *vif, >> return page; >> } >> >> +static inline void xenvif_tx_create_gop(struct xenvif *vif, u16 pending_idx, >> + struct xen_netif_tx_request *txp, >> + struct gnttab_map_grant_ref *gop) >> +{ >> + vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; >> + gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx), >> + GNTMAP_host_map | GNTMAP_readonly, >> + txp->gref, vif->domid); >> + >> + memcpy(&vif->pending_tx_info[pending_idx].req, txp, >> + sizeof(*txp)); >> + >> +} >> + > > This helper function is not used until next patch. Probably you can move > it to the second patch. > > The same applies to other helper functions as well. Move them to the > patch they are used. It would be easier for people to review.I just moved them here because the second patch is already huge, and I couldn''t have an idea to splice it up while keeping it bisectable and logically consistent. As I mentioned, I welcome ideas about that.> >> static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, >> struct sk_buff *skb, >> struct xen_netif_tx_request *txp, >> @@ -1593,6 +1607,106 @@ static int xenvif_tx_submit(struct xenvif *vif) >> return work_done; >> } >> >> +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) >> +{ > > Do we care about zerocopy_success? I don''t see it used in this function.It will be used in the 5th patch. Anyway, it''s in the definition of the zerocopy callback.> >> + unsigned long flags; >> + pending_ring_idx_t index; >> + u16 pending_idx = ubuf->desc; >> + struct pending_tx_info *temp >> + container_of(ubuf, struct pending_tx_info, callback_struct); >> + struct xenvif *vif >> + container_of(temp - pending_idx, struct xenvif, >> + pending_tx_info[0]); >> + > > The third parameter to container_of should be the name of the member > within the struct.Here we have the pending_idx, so we get a pointer for the holding struct pending_tx_info, then for the beginning of pending_tx_info (temp - pending_idx), and then to the struct xenvif. It''s a bit tricky and not straightforward, I admit :)> >> + spin_lock_irqsave(&vif->dealloc_lock, flags); >> + do { >> + pending_idx = ubuf->desc; >> + ubuf = (struct ubuf_info *) ubuf->ctx; >> + index = pending_index(vif->dealloc_prod); >> + vif->dealloc_ring[index] = pending_idx; >> + /* Sync with xenvif_tx_action_dealloc: >> + * insert idx then incr producer. >> + */ >> + smp_wmb(); >> + vif->dealloc_prod++; >> + } while (ubuf); >> + wake_up(&vif->dealloc_wq); >> + spin_unlock_irqrestore(&vif->dealloc_lock, flags); >> +} >> + >> +static inline void xenvif_tx_dealloc_action(struct xenvif *vif) >> +{ >> + struct gnttab_unmap_grant_ref *gop; >> + pending_ring_idx_t dc, dp; >> + u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; >> + unsigned int i = 0; >> + >> + dc = vif->dealloc_cons; >> + gop = vif->tx_unmap_ops; >> + >> + /* Free up any grants we have finished using */ >> + do { >> + dp = vif->dealloc_prod; >> + >> + /* Ensure we see all indices enqueued by netif_idx_release(). */ > > There is no netif_idx_release in netback code. :-)Oh yes, that''s from the classic code, it should be xenvif_zerocopy_callback. I will fix it.> >> + smp_rmb(); >> + >> + while (dc != dp) { >> + pending_idx >> + vif->dealloc_ring[pending_index(dc++)]; >> + >> + /* Already unmapped? */ >> + if (vif->grant_tx_handle[pending_idx] =>> + NETBACK_INVALID_HANDLE) { >> + netdev_err(vif->dev, >> + "Trying to unmap invalid handle! " >> + "pending_idx: %x\n", pending_idx); >> + continue; >> + } > > Should this be BUG_ON? AIUI this kthread should be the only one doing > unmap, right?The NAPI instance can do it as well if it is a small packet fits into PKT_PROT_LEN. But still this scenario shouldn''t really happen, I was just not sure we have to crash immediately. Maybe handle it as a fatal error and destroy the vif?
Wei Liu
2013-Dec-13 19:14 UTC
Re: [PATCH net-next v2 1/9] xen-netback: Introduce TX grant map definitions
On Fri, Dec 13, 2013 at 06:22:38PM +0000, Zoltan Kiss wrote:> On 13/12/13 15:31, Wei Liu wrote: > >On Thu, Dec 12, 2013 at 11:48:09PM +0000, Zoltan Kiss wrote: > >>This patch contains the new definitions necessary for grant mapping. > >> > >>v2: > >>- move unmapping to separate thread. The NAPI instance has to be scheduled > >> even from thread context, which can cause huge delays > >>- that causes unfortunately bigger struct xenvif > >>- store grant handle after checking validity > >> > > > >If the size of xenvif really becomes a problem, you can try to make > >sratch space like struct gnttab_copy per-cpu. The downside is that > >approach requires much coding and carefully guard against race > >conditions. You would need to consider cost v.s. benefit. > > I mentioned this because for the first series I had comments that I > should be more vigilant about this. At that time there was a problem > with struct xenvif allocation which was solved by now. My quick > calculation showed this patch will increase the size with ~15kb >15kb doesn''t seem a lot. And the fragmentation problem causing allocation failure was fixed. So I guess this won''t be a problem.> > > >>Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> > >> > >>--- > >[...] > >> #define XENVIF_QUEUE_LENGTH 32 > >> #define XENVIF_NAPI_WEIGHT 64 > >>diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c > >>index c1b7a42..3ddc474 100644 > >>--- a/drivers/net/xen-netback/netback.c > >>+++ b/drivers/net/xen-netback/netback.c > >>@@ -772,6 +772,20 @@ static struct page *xenvif_alloc_page(struct xenvif *vif, > >> return page; > >> } > >> > >>+static inline void xenvif_tx_create_gop(struct xenvif *vif, u16 pending_idx, > >>+ struct xen_netif_tx_request *txp, > >>+ struct gnttab_map_grant_ref *gop) > >>+{ > >>+ vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; > >>+ gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx), > >>+ GNTMAP_host_map | GNTMAP_readonly, > >>+ txp->gref, vif->domid); > >>+ > >>+ memcpy(&vif->pending_tx_info[pending_idx].req, txp, > >>+ sizeof(*txp)); > >>+ > >>+} > >>+ > > > >This helper function is not used until next patch. Probably you can move > >it to the second patch. > > > >The same applies to other helper functions as well. Move them to the > >patch they are used. It would be easier for people to review. > I just moved them here because the second patch is already huge, and > I couldn''t have an idea to splice it up while keeping it bisectable > and logically consistent. As I mentioned, I welcome ideas about > that. >My personal idea would be for a new feature it is not the size of a patch matters most, it''s the logic matters most. So I''m fine with long consolidated patches. The incremental changes you made makes the patches hard to review IMHO. That''s merely my personal opinion. Let''s see what other people say.> > > >> static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, > >> struct sk_buff *skb, > >> struct xen_netif_tx_request *txp, > >>@@ -1593,6 +1607,106 @@ static int xenvif_tx_submit(struct xenvif *vif) > >> return work_done; > >> } > >> > >>+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) > >>+{ > > > >Do we care about zerocopy_success? I don''t see it used in this function. > It will be used in the 5th patch. Anyway, it''s in the definition of > the zerocopy callback. >Oh right, it is actually used. Then you should rearrange you series. Move the patches to add stats before this one, then provide a single patch for functions instead of making incremental changes.> > > >>+ unsigned long flags; > >>+ pending_ring_idx_t index; > >>+ u16 pending_idx = ubuf->desc; > >>+ struct pending_tx_info *temp > >>+ container_of(ubuf, struct pending_tx_info, callback_struct); > >>+ struct xenvif *vif > >>+ container_of(temp - pending_idx, struct xenvif, > >>+ pending_tx_info[0]); > >>+ > > > >The third parameter to container_of should be the name of the member > >within the struct. > Here we have the pending_idx, so we get a pointer for the holding > struct pending_tx_info, then for the beginning of pending_tx_info > (temp - pending_idx), and then to the struct xenvif. It''s a bit > tricky and not straightforward, I admit :) >Well, macro trick. :-)> > > >>+ spin_lock_irqsave(&vif->dealloc_lock, flags); > >>+ do { > >>+ pending_idx = ubuf->desc; > >>+ ubuf = (struct ubuf_info *) ubuf->ctx; > >>+ index = pending_index(vif->dealloc_prod); > >>+ vif->dealloc_ring[index] = pending_idx; > >>+ /* Sync with xenvif_tx_action_dealloc: > >>+ * insert idx then incr producer. > >>+ */ > >>+ smp_wmb(); > >>+ vif->dealloc_prod++; > >>+ } while (ubuf); > >>+ wake_up(&vif->dealloc_wq); > >>+ spin_unlock_irqrestore(&vif->dealloc_lock, flags);[...]> > > >>+ smp_rmb(); > >>+ > >>+ while (dc != dp) { > >>+ pending_idx > >>+ vif->dealloc_ring[pending_index(dc++)]; > >>+ > >>+ /* Already unmapped? */ > >>+ if (vif->grant_tx_handle[pending_idx] => >>+ NETBACK_INVALID_HANDLE) { > >>+ netdev_err(vif->dev, > >>+ "Trying to unmap invalid handle! " > >>+ "pending_idx: %x\n", pending_idx); > >>+ continue; > >>+ } > > > >Should this be BUG_ON? AIUI this kthread should be the only one doing > >unmap, right? > The NAPI instance can do it as well if it is a small packet fits > into PKT_PROT_LEN. But still this scenario shouldn''t really happen, > I was just not sure we have to crash immediately. Maybe handle it as > a fatal error and destroy the vif? >It depends. If this is within the trust boundary, i.e. everything at the stage should have been sanitized then we should BUG_ON because there''s clearly a bug somewhere in the sanitization process, or in the interaction of various backend routines. Wei.