Displaying 20 results from an estimated 52 matches for "max_descs".
2019 Oct 12
2
[PATCH RFC v1 1/2] vhost: option to fetch descriptors through an independent struct
...gt; vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -369,6 +370,9 @@ static int vhost_worker(void *data)
>
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> + kfree(vq->descs);
> + vq->descs = NULL;
> + vq->max_descs = 0;
> kfree(vq->indirect);
> vq->indirect = NULL;
> kfree(vq->log);
> @@ -385,6 +389,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
>
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> + vq->max_descs = dev-&g...
2019 Oct 12
2
[PATCH RFC v1 1/2] vhost: option to fetch descriptors through an independent struct
...gt; vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -369,6 +370,9 @@ static int vhost_worker(void *data)
>
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> + kfree(vq->descs);
> + vq->descs = NULL;
> + vq->max_descs = 0;
> kfree(vq->indirect);
> vq->indirect = NULL;
> kfree(vq->log);
> @@ -385,6 +389,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
>
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> + vq->max_descs = dev-&g...
2019 Oct 14
2
[PATCH RFC v1 1/2] vhost: option to fetch descriptors through an independent struct
...ULL;
>>> vq->used = NULL;
>>> @@ -369,6 +370,9 @@ static int vhost_worker(void *data)
>>> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
>>> {
>>> + kfree(vq->descs);
>>> + vq->descs = NULL;
>>> + vq->max_descs = 0;
>>> kfree(vq->indirect);
>>> vq->indirect = NULL;
>>> kfree(vq->log);
>>> @@ -385,6 +389,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
>>> for (i = 0; i < dev->nvqs; ++i) {
>>> vq = dev-...
2019 Oct 14
2
[PATCH RFC v1 1/2] vhost: option to fetch descriptors through an independent struct
...ULL;
>>> vq->used = NULL;
>>> @@ -369,6 +370,9 @@ static int vhost_worker(void *data)
>>> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
>>> {
>>> + kfree(vq->descs);
>>> + vq->descs = NULL;
>>> + vq->max_descs = 0;
>>> kfree(vq->indirect);
>>> vq->indirect = NULL;
>>> kfree(vq->log);
>>> @@ -385,6 +389,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
>>> for (i = 0; i < dev->nvqs; ++i) {
>>> vq = dev-...
2020 Jun 03
2
[PATCH RFC 03/13] vhost: batching fetches
...gt;first_desc = 0;
> vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -367,6 +368,11 @@ static int vhost_worker(void *data)
> return 0;
> }
>
> +static int vhost_vq_num_batch_descs(struct vhost_virtqueue *vq)
> +{
> + return vq->max_descs - UIO_MAXIOV;
> +}
1 descriptor does not mean 1 iov, e.g userspace may pass several 1 byte
length memory regions for us to translate.
> +
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> kfree(vq->descs);
> @@ -389,6 +395,9 @@ static long vhost_d...
2020 Jun 03
2
[PATCH RFC 03/13] vhost: batching fetches
...gt;first_desc = 0;
> vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -367,6 +368,11 @@ static int vhost_worker(void *data)
> return 0;
> }
>
> +static int vhost_vq_num_batch_descs(struct vhost_virtqueue *vq)
> +{
> + return vq->max_descs - UIO_MAXIOV;
> +}
1 descriptor does not mean 1 iov, e.g userspace may pass several 1 byte
length memory regions for us to translate.
> +
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> kfree(vq->descs);
> @@ -389,6 +395,9 @@ static long vhost_d...
2020 Jun 17
4
[PATCH RFC v8 02/11] vhost: use batched get_vq_desc version
.... Tsirkin wrote:
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> kfree(vq->descs);
> @@ -394,6 +400,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> vq->max_descs = dev->iov_limit;
> + if (vhost_vq_num_batch_descs(vq) < 0) {
> + return -EINVAL;
> + }
This check breaks vdpa which set iov_limit to zero. Consider iov_limit
is meaningless to vDPA, I wonder we can skip the test when device
doesn't use worker.
Thanks
2020 Jun 17
4
[PATCH RFC v8 02/11] vhost: use batched get_vq_desc version
.... Tsirkin wrote:
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> kfree(vq->descs);
> @@ -394,6 +400,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> vq->max_descs = dev->iov_limit;
> + if (vhost_vq_num_batch_descs(vq) < 0) {
> + return -EINVAL;
> + }
This check breaks vdpa which set iov_limit to zero. Consider iov_limit
is meaningless to vDPA, I wonder we can skip the test when device
doesn't use worker.
Thanks
2020 Jun 03
2
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...gt; vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -368,6 +369,9 @@ static int vhost_worker(void *data)
>
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> + kfree(vq->descs);
> + vq->descs = NULL;
> + vq->max_descs = 0;
> kfree(vq->indirect);
> vq->indirect = NULL;
> kfree(vq->log);
> @@ -384,6 +388,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
>
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> + vq->max_descs = dev-&g...
2020 Jun 03
2
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...gt; vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -368,6 +369,9 @@ static int vhost_worker(void *data)
>
> static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> {
> + kfree(vq->descs);
> + vq->descs = NULL;
> + vq->max_descs = 0;
> kfree(vq->indirect);
> vq->indirect = NULL;
> kfree(vq->log);
> @@ -384,6 +388,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
>
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> + vq->max_descs = dev-&g...
2020 Jun 03
0
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...> vq->avail = NULL;
> > vq->used = NULL;
> > @@ -368,6 +369,9 @@ static int vhost_worker(void *data)
> > static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
> > {
> > + kfree(vq->descs);
> > + vq->descs = NULL;
> > + vq->max_descs = 0;
> > kfree(vq->indirect);
> > vq->indirect = NULL;
> > kfree(vq->log);
> > @@ -384,6 +388,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
> > for (i = 0; i < dev->nvqs; ++i) {
> > vq = dev->vqs[i];
> > +...
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
we are fetching multiple descriptors.
And perhaps more importantly,
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
we are fetching multiple descriptors.
And perhaps more importantly,
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...cs = 0;
> + vq->first_desc = 0;
> vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -390,6 +391,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> vq->max_descs = dev->iov_limit;
> + vq->batch_descs = dev->iov_limit - UIO_MAXIOV;
> vq->descs = kmalloc_array(vq->max_descs,
> sizeof(*vq->descs),
> GFP_KERNEL);
> @@ -2366,6 +2368,8 @@ static void pop_split_desc(struct vhost_virtqueue *vq)
> --vq-&g...
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...cs = 0;
> + vq->first_desc = 0;
> vq->desc = NULL;
> vq->avail = NULL;
> vq->used = NULL;
> @@ -390,6 +391,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
> for (i = 0; i < dev->nvqs; ++i) {
> vq = dev->vqs[i];
> vq->max_descs = dev->iov_limit;
> + vq->batch_descs = dev->iov_limit - UIO_MAXIOV;
> vq->descs = kmalloc_array(vq->max_descs,
> sizeof(*vq->descs),
> GFP_KERNEL);
> @@ -2366,6 +2368,8 @@ static void pop_split_desc(struct vhost_virtqueue *vq)
> --vq-&g...
2019 Oct 12
2
[PATCH RFC v2 0/2] vhost: ring format independence
This adds infrastructure required for supporting
multiple ring formats.
The idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
2020 Apr 07
0
[PATCH v7 19/19] vhost: batching fetches
...v *dev,
{
vq->num = 1;
vq->ndescs = 0;
+ vq->first_desc = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -367,6 +368,11 @@ static int vhost_worker(void *data)
return 0;
}
+static int vhost_vq_num_batch_descs(struct vhost_virtqueue *vq)
+{
+ return vq->max_descs - UIO_MAXIOV;
+}
+
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->descs);
@@ -389,6 +395,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->max_descs = dev->iov_limit;
+ if (...
2020 Apr 07
0
[PATCH v8 19/19] vhost: batching fetches
...v *dev,
{
vq->num = 1;
vq->ndescs = 0;
+ vq->first_desc = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -367,6 +368,11 @@ static int vhost_worker(void *data)
return 0;
}
+static int vhost_vq_num_batch_descs(struct vhost_virtqueue *vq)
+{
+ return vq->max_descs - UIO_MAXIOV;
+}
+
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->descs);
@@ -389,6 +395,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->max_descs = dev->iov_limit;
+ if (...
2020 Jun 02
0
[PATCH RFC 03/13] vhost: batching fetches
...v *dev,
{
vq->num = 1;
vq->ndescs = 0;
+ vq->first_desc = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -367,6 +368,11 @@ static int vhost_worker(void *data)
return 0;
}
+static int vhost_vq_num_batch_descs(struct vhost_virtqueue *vq)
+{
+ return vq->max_descs - UIO_MAXIOV;
+}
+
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->descs);
@@ -389,6 +395,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->max_descs = dev->iov_limit;
+ if (...
2020 Jun 02
0
[PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
...rtqueue *vq)
{
vq->num = 1;
+ vq->ndescs = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -368,6 +369,9 @@ static int vhost_worker(void *data)
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
+ kfree(vq->descs);
+ vq->descs = NULL;
+ vq->max_descs = 0;
kfree(vq->indirect);
vq->indirect = NULL;
kfree(vq->log);
@@ -384,6 +388,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
+ vq->max_descs = dev->iov_limit;
+ vq->descs = kmalloc_array(v...