search for: peek_split_desc

Displaying 20 results from an estimated 37 matches for "peek_split_desc".

2020 Jun 04
1
[PATCH RFC 07/13] vhost: format-independent API for used buffers
...> Both vhost_desc and vhost_buf can work for split and packed. > > Do you mean we should add packed ring support based on this? > For sure, this is one of the motivators for the patchset. > Somehow. But the reason I ask is that I see "split" suffix is used in patch 1 as: peek_split_desc() pop_split_desc() push_split_desc() But that suffix is not used for the new used ring API invented in this patch. Thanks
2020 Jun 03
2
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...if (!vq->indirect || !vq->log || !vq->heads || !vq->descs) > goto err_nomem; > } > return 0; > @@ -2277,6 +2285,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > } > EXPORT_SYMBOL_GPL(vhost_get_vq_desc); > > +static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq) > +{ > + BUG_ON(!vq->ndescs); > + return &vq->descs[vq->ndescs - 1]; > +} > + > +static void pop_split_desc(struct vhost_virtqueue *vq) > +{ > + BUG_ON(!vq->ndescs); > + --vq->ndescs; > +} > + > +#define VHOST_DESC_F...
2020 Jun 03
2
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...if (!vq->indirect || !vq->log || !vq->heads || !vq->descs) > goto err_nomem; > } > return 0; > @@ -2277,6 +2285,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > } > EXPORT_SYMBOL_GPL(vhost_get_vq_desc); > > +static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq) > +{ > + BUG_ON(!vq->ndescs); > + return &vq->descs[vq->ndescs - 1]; > +} > + > +static void pop_split_desc(struct vhost_virtqueue *vq) > +{ > + BUG_ON(!vq->ndescs); > + --vq->ndescs; > +} > + > +#define VHOST_DESC_F...
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...then some number of input descriptors, it's actually two > @@ -2562,6 +2588,8 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, > if (ret) > return ret; > > + /* Note: indirect descriptors are not batched */ > + /* TODO: batch up to a limit */ > last = peek_split_desc(vq); > id = last->id; > > @@ -2584,12 +2612,12 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, > if (unlikely(log)) > *log_num = 0; > > - for (i = 0; i < vq->ndescs; ++i) { > + for (i = vq->first_desc; i < vq->ndescs; ++i) { >...
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...then some number of input descriptors, it's actually two > @@ -2562,6 +2588,8 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, > if (ret) > return ret; > > + /* Note: indirect descriptors are not batched */ > + /* TODO: batch up to a limit */ > last = peek_split_desc(vq); > id = last->id; > > @@ -2584,12 +2612,12 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, > if (unlikely(log)) > *log_num = 0; > > - for (i = 0; i < vq->ndescs; ++i) { > + for (i = vq->first_desc; i < vq->ndescs; ++i) { >...
2019 Oct 12
2
[PATCH RFC v2 0/2] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while
2020 Jun 03
1
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...lit_desc(vq, &desc, head); >>> + if (unlikely(ret)) { >>> + vq_err(vq, "Failed to save descriptor: idx %d\n", i); >>> + return -EINVAL; >>> + } >>> + } while ((i = next_desc(vq, &desc)) != -1); >>> + >>> + last = peek_split_desc(vq); >>> + if (unlikely(last->flags & VRING_DESC_F_INDIRECT)) { >>> + pop_split_desc(vq); >>> + ret = fetch_indirect_descs(vq, last, head); >> >> Note that this means we don't supported chained indirect descriptors which >> complies the spec...
2020 Jun 02
0
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...gt;indirect || !vq->log || !vq->heads) + if (!vq->indirect || !vq->log || !vq->heads || !vq->descs) goto err_nomem; } return 0; @@ -2277,6 +2285,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, } EXPORT_SYMBOL_GPL(vhost_get_vq_desc); +static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + return &vq->descs[vq->ndescs - 1]; +} + +static void pop_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + --vq->ndescs; +} + +#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \ + V...
2020 Apr 07
0
[PATCH v7 17/19] vhost: option to fetch descriptors through an independent struct
...gt;indirect || !vq->log || !vq->heads) + if (!vq->indirect || !vq->log || !vq->heads || !vq->descs) goto err_nomem; } return 0; @@ -2277,6 +2285,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, } EXPORT_SYMBOL_GPL(vhost_get_vq_desc); +static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + return &vq->descs[vq->ndescs - 1]; +} + +static void pop_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + --vq->ndescs; +} + +#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \ + V...
2020 Apr 07
0
[PATCH v8 17/19] vhost: option to fetch descriptors through an independent struct
...gt;indirect || !vq->log || !vq->heads) + if (!vq->indirect || !vq->log || !vq->heads || !vq->descs) goto err_nomem; } return 0; @@ -2277,6 +2285,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, } EXPORT_SYMBOL_GPL(vhost_get_vq_desc); +static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + return &vq->descs[vq->ndescs - 1]; +} + +static void pop_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + --vq->ndescs; +} + +#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \ + V...
2020 Jun 03
0
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...vq->log || !vq->heads || !vq->descs) > > goto err_nomem; > > } > > return 0; > > @@ -2277,6 +2285,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > } > > EXPORT_SYMBOL_GPL(vhost_get_vq_desc); > > +static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq) > > +{ > > + BUG_ON(!vq->ndescs); > > + return &vq->descs[vq->ndescs - 1]; > > +} > > + > > +static void pop_split_desc(struct vhost_virtqueue *vq) > > +{ > > + BUG_ON(!vq->ndescs); > > + --vq->ndes...
2019 Oct 11
0
[PATCH RFC v1 1/2] vhost: option to fetch descriptors through an independent struct
...gt;indirect || !vq->log || !vq->heads) + if (!vq->indirect || !vq->log || !vq->heads || !vq->descs) goto err_nomem; } return 0; @@ -2346,6 +2354,295 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, } EXPORT_SYMBOL_GPL(vhost_get_vq_desc); +static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + return &vq->descs[vq->ndescs - 1]; +} + +static void pop_split_desc(struct vhost_virtqueue *vq) +{ + BUG_ON(!vq->ndescs); + --vq->ndescs; +} + +static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc,...
2019 Oct 13
4
[PATCH RFC v3 0/4] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while we are fetching multiple descriptors. And perhaps more importantly,
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while we are fetching multiple descriptors. And perhaps more importantly,
2019 Oct 11
0
[vhost:vhost 6/6] drivers/vhost/vhost.c:2672:9: error: 'desc' undeclared; did you mean 'rdtsc'?
...g, unsigned int *log_num) 2582 { 2583 int ret = fetch_descs(vq); 2584 struct vhost_desc *last; 2585 u16 id; 2586 int i; 2587 2588 if (ret) 2589 return ret; 2590 2591 /* Note: indirect descriptors are not batched */ 2592 /* TODO: batch up to a limit */ 2593 last = peek_split_desc(vq); 2594 id = last->id; 2595 2596 if (last->flags & VRING_DESC_F_INDIRECT) { 2597 int r; 2598 2599 pop_split_desc(vq); 2600 r = fetch_indirect_descs(vq, last, id); 2601 if (unlikely(r < 0)) { 2602 if (r != -EAGAIN) 2603 vq_err(vq, "Fai...
2019 Oct 11
0
[vhost:vhost 6/6] drivers/vhost/vhost.c:2672:9: error: 'desc' undeclared
...g, unsigned int *log_num) 2582 { 2583 int ret = fetch_descs(vq); 2584 struct vhost_desc *last; 2585 u16 id; 2586 int i; 2587 2588 if (ret) 2589 return ret; 2590 2591 /* Note: indirect descriptors are not batched */ 2592 /* TODO: batch up to a limit */ 2593 last = peek_split_desc(vq); 2594 id = last->id; 2595 2596 if (last->flags & VRING_DESC_F_INDIRECT) { 2597 int r; 2598 2599 pop_split_desc(vq); 2600 r = fetch_indirect_descs(vq, last, id); 2601 if (unlikely(r < 0)) { 2602 if (r != -EAGAIN) 2603 vq_err(vq, "Fai...
2019 Oct 11
0
[PATCH RFC v1 2/2] vhost: batching fetches
...s consist of some * number of output then some number of input descriptors, it's actually two @@ -2562,6 +2588,8 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, if (ret) return ret; + /* Note: indirect descriptors are not batched */ + /* TODO: batch up to a limit */ last = peek_split_desc(vq); id = last->id; @@ -2584,12 +2612,12 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, if (unlikely(log)) *log_num = 0; - for (i = 0; i < vq->ndescs; ++i) { + for (i = vq->first_desc; i < vq->ndescs; ++i) { unsigned iov_count = *in_num + *out_num; str...
2019 Oct 12
0
[PATCH RFC v1 2/2] vhost: batching fetches
...put descriptors, it's actually two > > @@ -2562,6 +2588,8 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, > > if (ret) > > return ret; > > + /* Note: indirect descriptors are not batched */ > > + /* TODO: batch up to a limit */ > > last = peek_split_desc(vq); > > id = last->id; > > @@ -2584,12 +2612,12 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq, > > if (unlikely(log)) > > *log_num = 0; > > - for (i = 0; i < vq->ndescs; ++i) { > > + for (i = vq->first_desc; i < vq->ndescs...
2019 Oct 13
6
[PATCH RFC v4 0/5] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while