Displaying 20 results from an estimated 38 matches for "fetch_indirect_descs".
2020 Jun 03
1
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
On 2020/6/3 ??5:48, Michael S. Tsirkin wrote:
> On Wed, Jun 03, 2020 at 03:13:56PM +0800, Jason Wang wrote:
>> On 2020/6/2 ??9:05, Michael S. Tsirkin wrote:
[...]
>>> +
>>> +static int fetch_indirect_descs(struct vhost_virtqueue *vq,
>>> +				struct vhost_desc *indirect,
>>> +				u16 head)
>>> +{
>>> +	struct vring_desc desc;
>>> +	unsigned int i = 0, count, found = 0;
>>> +	u32 len = indirect->len;
>>> +	struct iov_iter from;
>>...
2020 Jun 03
2
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...mp;vq->descs[vq->ndescs++];
> +	h->addr = vhost64_to_cpu(vq, desc->addr);
> +	h->len = vhost32_to_cpu(vq, desc->len);
> +	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
> +	h->id = id;
> +
> +	return 0;
> +}
> +
> +static int fetch_indirect_descs(struct vhost_virtqueue *vq,
> +				struct vhost_desc *indirect,
> +				u16 head)
> +{
> +	struct vring_desc desc;
> +	unsigned int i = 0, count, found = 0;
> +	u32 len = indirect->len;
> +	struct iov_iter from;
> +	int ret;
> +
> +	/* Sanity check */
> +	if (unli...
2020 Jun 03
2
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...mp;vq->descs[vq->ndescs++];
> +	h->addr = vhost64_to_cpu(vq, desc->addr);
> +	h->len = vhost32_to_cpu(vq, desc->len);
> +	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
> +	h->id = id;
> +
> +	return 0;
> +}
> +
> +static int fetch_indirect_descs(struct vhost_virtqueue *vq,
> +				struct vhost_desc *indirect,
> +				u16 head)
> +{
> +	struct vring_desc desc;
> +	unsigned int i = 0, count, found = 0;
> +	u32 len = indirect->len;
> +	struct iov_iter from;
> +	int ret;
> +
> +	/* Sanity check */
> +	if (unli...
2019 Oct 13
4
[PATCH RFC v3 0/4] vhost: ring format independence
This adds infrastructure required for supporting
multiple ring formats.
The idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...u(vq, desc->addr);
>   	h->len = vhost32_to_cpu(vq, desc->len);
> -	h->flags = vhost16_to_cpu(vq, desc->flags);
> +	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
>   	h->id = id;
>   
>   	return 0;
> @@ -2450,7 +2454,7 @@ static int fetch_indirect_descs(struct vhost_virtqueue *vq,
>   	return 0;
>   }
>   
> -static int fetch_descs(struct vhost_virtqueue *vq)
> +static int fetch_buf(struct vhost_virtqueue *vq)
>   {
>   	struct vring_desc desc;
>   	unsigned int i, head, found = 0;
> @@ -2462,7 +2466,11 @@ static int fet...
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...u(vq, desc->addr);
>   	h->len = vhost32_to_cpu(vq, desc->len);
> -	h->flags = vhost16_to_cpu(vq, desc->flags);
> +	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
>   	h->id = id;
>   
>   	return 0;
> @@ -2450,7 +2454,7 @@ static int fetch_indirect_descs(struct vhost_virtqueue *vq,
>   	return 0;
>   }
>   
> -static int fetch_descs(struct vhost_virtqueue *vq)
> +static int fetch_buf(struct vhost_virtqueue *vq)
>   {
>   	struct vring_desc desc;
>   	unsigned int i, head, found = 0;
> @@ -2462,7 +2466,11 @@ static int fet...
2019 Oct 12
2
[PATCH RFC v2 0/2] vhost: ring format independence
This adds infrastructure required for supporting
multiple ring formats.
The idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
2020 Jun 02
0
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...q->max_descs))
+		return -EINVAL;
+	h = &vq->descs[vq->ndescs++];
+	h->addr = vhost64_to_cpu(vq, desc->addr);
+	h->len = vhost32_to_cpu(vq, desc->len);
+	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
+	h->id = id;
+
+	return 0;
+}
+
+static int fetch_indirect_descs(struct vhost_virtqueue *vq,
+				struct vhost_desc *indirect,
+				u16 head)
+{
+	struct vring_desc desc;
+	unsigned int i = 0, count, found = 0;
+	u32 len = indirect->len;
+	struct iov_iter from;
+	int ret;
+
+	/* Sanity check */
+	if (unlikely(len % sizeof desc)) {
+		vq_err(vq, "Invalid...
2020 Apr 07
0
[PATCH v7 17/19] vhost: option to fetch descriptors through an independent struct
...q->max_descs))
+		return -EINVAL;
+	h = &vq->descs[vq->ndescs++];
+	h->addr = vhost64_to_cpu(vq, desc->addr);
+	h->len = vhost32_to_cpu(vq, desc->len);
+	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
+	h->id = id;
+
+	return 0;
+}
+
+static int fetch_indirect_descs(struct vhost_virtqueue *vq,
+				struct vhost_desc *indirect,
+				u16 head)
+{
+	struct vring_desc desc;
+	unsigned int i = 0, count, found = 0;
+	u32 len = indirect->len;
+	struct iov_iter from;
+	int ret;
+
+	/* Sanity check */
+	if (unlikely(len % sizeof desc)) {
+		vq_err(vq, "Invalid...
2020 Apr 07
0
[PATCH v8 17/19] vhost: option to fetch descriptors through an independent struct
...q->max_descs))
+		return -EINVAL;
+	h = &vq->descs[vq->ndescs++];
+	h->addr = vhost64_to_cpu(vq, desc->addr);
+	h->len = vhost32_to_cpu(vq, desc->len);
+	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
+	h->id = id;
+
+	return 0;
+}
+
+static int fetch_indirect_descs(struct vhost_virtqueue *vq,
+				struct vhost_desc *indirect,
+				u16 head)
+{
+	struct vring_desc desc;
+	unsigned int i = 0, count, found = 0;
+	u32 len = indirect->len;
+	struct iov_iter from;
+	int ret;
+
+	/* Sanity check */
+	if (unlikely(len % sizeof desc)) {
+		vq_err(vq, "Invalid...
2020 Jun 02
0
[PATCH RFC 04/13] vhost: cleanup fetch_buf return code handling
...gt;
---
 drivers/vhost/vhost.c | 24 ++++++++++++++++--------
 1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index aca2a5b0d078..bd52b44b0d23 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2146,6 +2146,8 @@ static int fetch_indirect_descs(struct vhost_virtqueue *vq,
 	return 0;
 }
 
+/* This function returns a value > 0 if a descriptor was found, or 0 if none were found.
+ * A negative code is returned on error. */
 static int fetch_buf(struct vhost_virtqueue *vq)
 {
 	unsigned int i, head, found = 0;
@@ -2162,7 +2164,7 @@ static...
2020 Jun 03
0
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...h->addr = vhost64_to_cpu(vq, desc->addr);
> > +	h->len = vhost32_to_cpu(vq, desc->len);
> > +	h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
> > +	h->id = id;
> > +
> > +	return 0;
> > +}
> > +
> > +static int fetch_indirect_descs(struct vhost_virtqueue *vq,
> > +				struct vhost_desc *indirect,
> > +				u16 head)
> > +{
> > +	struct vring_desc desc;
> > +	unsigned int i = 0, count, found = 0;
> > +	u32 len = indirect->len;
> > +	struct iov_iter from;
> > +	int ret;
> >...
2019 Oct 11
0
[PATCH RFC v1 1/2] vhost: option to fetch descriptors through an independent struct
...y(vq->ndescs >= vq->max_descs))
+		return -EINVAL;
+	h = &vq->descs[vq->ndescs++];
+	h->addr = vhost64_to_cpu(vq, desc->addr);
+	h->len = vhost32_to_cpu(vq, desc->len);
+	h->flags = vhost16_to_cpu(vq, desc->flags);
+	h->id = id;
+
+	return 0;
+}
+
+static int fetch_indirect_descs(struct vhost_virtqueue *vq,
+				struct vhost_desc *indirect,
+				u16 head)
+{
+	struct vring_desc desc;
+	unsigned int i = 0, count, found = 0;
+	u32 len = indirect->len;
+	struct iov_iter from;
+	int ret;
+
+	/* Sanity check */
+	if (unlikely(len % sizeof desc)) {
+		vq_err(vq, "Invalid...
2020 Jun 03
2
[PATCH RFC 03/13] vhost: batching fetches
...rg
>   		vq->last_avail_idx = s.num;
>   		/* Forget the cached index value. */
>   		vq->avail_idx = vq->last_avail_idx;
> +		vq->ndescs = vq->first_desc = 0;
>   		break;
>   	case VHOST_GET_VRING_BASE:
>   		s.index = idx;
> @@ -2136,7 +2146,7 @@ static int fetch_indirect_descs(struct vhost_virtqueue *vq,
>   	return 0;
>   }
>   
> -static int fetch_descs(struct vhost_virtqueue *vq)
> +static int fetch_buf(struct vhost_virtqueue *vq)
>   {
>   	unsigned int i, head, found = 0;
>   	struct vhost_desc *last;
> @@ -2149,7 +2159,11 @@ static int fe...
2020 Jun 03
2
[PATCH RFC 03/13] vhost: batching fetches
...rg
>   		vq->last_avail_idx = s.num;
>   		/* Forget the cached index value. */
>   		vq->avail_idx = vq->last_avail_idx;
> +		vq->ndescs = vq->first_desc = 0;
>   		break;
>   	case VHOST_GET_VRING_BASE:
>   		s.index = idx;
> @@ -2136,7 +2146,7 @@ static int fetch_indirect_descs(struct vhost_virtqueue *vq,
>   	return 0;
>   }
>   
> -static int fetch_descs(struct vhost_virtqueue *vq)
> +static int fetch_buf(struct vhost_virtqueue *vq)
>   {
>   	unsigned int i, head, found = 0;
>   	struct vhost_desc *last;
> @@ -2149,7 +2159,11 @@ static int fe...
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
we are fetching multiple descriptors.
And perhaps more importantly,
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
we are fetching multiple descriptors.
And perhaps more importantly,
2019 Oct 13
6
[PATCH RFC v4 0/5] vhost: ring format independence
This adds infrastructure required for supporting
multiple ring formats.
The idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
2019 Oct 11
0
[vhost:vhost 6/6] drivers/vhost/vhost.c:2672:9: error: 'desc' undeclared; did you mean 'rdtsc'?
...2590	
  2591		/* Note: indirect descriptors are not batched */
  2592		/* TODO: batch up to a limit */
  2593		last = peek_split_desc(vq);
  2594		id = last->id;
  2595	
  2596		if (last->flags & VRING_DESC_F_INDIRECT) {
  2597				int r;
  2598	
  2599				pop_split_desc(vq);
  2600				r = fetch_indirect_descs(vq, last, id);
  2601				if (unlikely(r < 0)) {
  2602					if (r != -EAGAIN)
  2603						vq_err(vq, "Failure detected "
  2604						       "in indirect descriptor at idx %d\n", id);
  2605					return ret;
  2606				}
  2607		}
  2608	
  2609		/* Now convert to IOV */
  2610...
2019 Oct 11
0
[vhost:vhost 6/6] drivers/vhost/vhost.c:2672:9: error: 'desc' undeclared
...2590	
  2591		/* Note: indirect descriptors are not batched */
  2592		/* TODO: batch up to a limit */
  2593		last = peek_split_desc(vq);
  2594		id = last->id;
  2595	
  2596		if (last->flags & VRING_DESC_F_INDIRECT) {
  2597				int r;
  2598	
  2599				pop_split_desc(vq);
  2600				r = fetch_indirect_descs(vq, last, id);
  2601				if (unlikely(r < 0)) {
  2602					if (r != -EAGAIN)
  2603						vq_err(vq, "Failure detected "
  2604						       "in indirect descriptor at idx %d\n", id);
  2605					return ret;
  2606				}
  2607		}
  2608	
  2609		/* Now convert to IOV */
  2610...