Displaying 20 results from an estimated 360 matches for "vhost_poll_queue".
2018 Jul 23
3
[PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...et_busy_poll_vq_check(struct vhost_net *net,
> + struct vhost_virtqueue *rvq,
> + struct vhost_virtqueue *tvq,
> + bool rx)
> +{
> + struct socket *sock = rvq->private_data;
> +
> + if (rx) {
> + if (!vhost_vq_avail_empty(&net->dev, tvq)) {
> + vhost_poll_queue(&tvq->poll);
> + } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
> + vhost_disable_notify(&net->dev, tvq);
> + vhost_poll_queue(&tvq->poll);
> + }
> + } else if ((sock && sk_has_rx_data(sock->sk)) &&
> + !vhost_...
2018 Jul 23
3
[PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...et_busy_poll_vq_check(struct vhost_net *net,
> + struct vhost_virtqueue *rvq,
> + struct vhost_virtqueue *tvq,
> + bool rx)
> +{
> + struct socket *sock = rvq->private_data;
> +
> + if (rx) {
> + if (!vhost_vq_avail_empty(&net->dev, tvq)) {
> + vhost_poll_queue(&tvq->poll);
> + } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
> + vhost_disable_notify(&net->dev, tvq);
> + vhost_poll_queue(&tvq->poll);
> + }
> + } else if ((sock && sk_has_rx_data(sock->sk)) &&
> + !vhost_...
2018 Jul 24
3
[PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...bool rx)
>>>>> +{
>>>>> + struct socket *sock = rvq->private_data;
>>>>> +
>>>>> + if (rx) {
>>>>> + if (!vhost_vq_avail_empty(&net->dev, tvq)) {
>>>>> + vhost_poll_queue(&tvq->poll);
>>>>> + } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
>>>>> + vhost_disable_notify(&net->dev, tvq);
>>>>> + vhost_poll_queue(&tvq->poll);
>>&...
2018 Jul 24
3
[PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...bool rx)
>>>>> +{
>>>>> + struct socket *sock = rvq->private_data;
>>>>> +
>>>>> + if (rx) {
>>>>> + if (!vhost_vq_avail_empty(&net->dev, tvq)) {
>>>>> + vhost_poll_queue(&tvq->poll);
>>>>> + } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
>>>>> + vhost_disable_notify(&net->dev, tvq);
>>>>> + vhost_poll_queue(&tvq->poll);
>>&...
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...VHOST_NET_BATCH)
@@ -845,11 +846,10 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->done_idx].len = 0;
++nvq->done_idx;
- if (vhost_exceeds_weight(++sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
+
+ if (next_round)
+ vhost_poll_queue(&vq->poll);
vhost_tx_batch(net, nvq, sock, &msg);
}
@@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, stru...
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...VHOST_NET_BATCH)
@@ -845,11 +846,10 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->done_idx].len = 0;
++nvq->done_idx;
- if (vhost_exceeds_weight(++sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
+
+ if (next_round)
+ vhost_poll_queue(&vq->poll);
vhost_tx_batch(net, nvq, sock, &msg);
}
@@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, stru...
2014 Feb 12
2
[PATCH V2 5/6] vhost_net: poll vhost queue after marking DMA is done
...gger polling thread if guest stopped submitting new buffers:
> * in this case, the refcount after decrement will eventually reach 1
> @@ -318,10 +323,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
> */
> if (cnt <= 2 || !(cnt % 16))
> vhost_poll_queue(&vq->poll);
> - /* set len to mark this desc buffers done DMA */
> - vq->heads[ubuf->desc].len = success ?
> - VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
> - vhost_net_ubuf_put(ubufs);
> }
>
> /* Expects to be always run from workqueue - which acts as
>
wit...
2014 Feb 12
2
[PATCH V2 5/6] vhost_net: poll vhost queue after marking DMA is done
...gger polling thread if guest stopped submitting new buffers:
> * in this case, the refcount after decrement will eventually reach 1
> @@ -318,10 +323,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
> */
> if (cnt <= 2 || !(cnt % 16))
> vhost_poll_queue(&vq->poll);
> - /* set len to mark this desc buffers done DMA */
> - vq->heads[ubuf->desc].len = success ?
> - VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
> - vhost_net_ubuf_put(ubufs);
> }
>
> /* Expects to be always run from workqueue - which acts as
>
wit...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...id handle_tx_copy(struct vhost_net *net, struct socket *sock)
>> vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
>> vq->heads[nvq->done_idx].len = 0;
>> ++nvq->done_idx;
>> - if (vhost_exceeds_weight(++sent_pkts, total_len)) {
>> - vhost_poll_queue(&vq->poll);
>> - break;
>> - }
>> - }
>> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
>> +
>> + if (next_round)
>> + vhost_poll_queue(&vq->poll);
>>
>> vhost_tx_batch(net, nvq, sock, &msg)...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...id handle_tx_copy(struct vhost_net *net, struct socket *sock)
>> vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
>> vq->heads[nvq->done_idx].len = 0;
>> ++nvq->done_idx;
>> - if (vhost_exceeds_weight(++sent_pkts, total_len)) {
>> - vhost_poll_queue(&vq->poll);
>> - break;
>> - }
>> - }
>> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
>> +
>> + if (next_round)
>> + vhost_poll_queue(&vq->poll);
>>
>> vhost_tx_batch(net, nvq, sock, &msg)...
2019 May 12
2
[PATCH net] vhost_net: fix possible infinite loop
...??? vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
> > > > ????????? vq->heads[nvq->done_idx].len = 0;
> > > > ????????? ++nvq->done_idx;
> > > > -??????? if (vhost_exceeds_weight(++sent_pkts, total_len)) {
> > > > -??????????? vhost_poll_queue(&vq->poll);
> > > > -??????????? break;
> > > > -??????? }
> > > > -??? }
> > > > +??? } while (!(next_round = vhost_exceeds_weight(++sent_pkts,
> > > > total_len)));
> > > > +
> > > > +??? if (next_round)
&g...
2019 May 12
2
[PATCH net] vhost_net: fix possible infinite loop
...??? vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
> > > > ????????? vq->heads[nvq->done_idx].len = 0;
> > > > ????????? ++nvq->done_idx;
> > > > -??????? if (vhost_exceeds_weight(++sent_pkts, total_len)) {
> > > > -??????????? vhost_poll_queue(&vq->poll);
> > > > -??????????? break;
> > > > -??????? }
> > > > -??? }
> > > > +??? } while (!(next_round = vhost_exceeds_weight(++sent_pkts,
> > > > total_len)));
> > > > +
> > > > +??? if (next_round)
&g...
2019 Apr 25
0
[PATCH net] vhost_net: fix possible infinite loop
...11 +846,10 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
> vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
> vq->heads[nvq->done_idx].len = 0;
> ++nvq->done_idx;
> - if (vhost_exceeds_weight(++sent_pkts, total_len)) {
> - vhost_poll_queue(&vq->poll);
> - break;
> - }
> - }
> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
> +
> + if (next_round)
> + vhost_poll_queue(&vq->poll);
>
> vhost_tx_batch(net, nvq, sock, &msg);
> }
> @@ -873,8 +873,9 @@ sta...
2019 May 05
0
[PATCH net] vhost_net: fix possible infinite loop
...t *sock)
>>> ????????? vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
>>> ????????? vq->heads[nvq->done_idx].len = 0;
>>> ????????? ++nvq->done_idx;
>>> -??????? if (vhost_exceeds_weight(++sent_pkts, total_len)) {
>>> -??????????? vhost_poll_queue(&vq->poll);
>>> -??????????? break;
>>> -??????? }
>>> -??? }
>>> +??? } while (!(next_round = vhost_exceeds_weight(++sent_pkts,
>>> total_len)));
>>> +
>>> +??? if (next_round)
>>> +??????? vhost_poll_queue(&vq->...
2018 Nov 30
3
[PATCH] vhost: fix IOTLB locking
...otlb. vhost_iotlb_notify_vq() now takes vq->mutex while holding
the device's IOTLB spinlock. And on the vhost_iotlb_miss() path, the
spinlock is taken while holding vq->mutex.
As long as we hold dev->mutex to prevent an ioctl from modifying
vq->poll concurrently, we can safely call vhost_poll_queue() without
holding vq->mutex. Since vhost_process_iotlb_msg() holds dev->mutex when
calling vhost_iotlb_notify_vq(), avoid the deadlock by not taking
vq->mutex.
Fixes: 78139c94dc8c ("net: vhost: lock the vqs one by one")
Signed-off-by: Jean-Philippe Brucker <jean-philippe.bruc...
2018 Nov 30
3
[PATCH] vhost: fix IOTLB locking
...otlb. vhost_iotlb_notify_vq() now takes vq->mutex while holding
the device's IOTLB spinlock. And on the vhost_iotlb_miss() path, the
spinlock is taken while holding vq->mutex.
As long as we hold dev->mutex to prevent an ioctl from modifying
vq->poll concurrently, we can safely call vhost_poll_queue() without
holding vq->mutex. Since vhost_process_iotlb_msg() holds dev->mutex when
calling vhost_iotlb_notify_vq(), avoid the deadlock by not taking
vq->mutex.
Fixes: 78139c94dc8c ("net: vhost: lock the vqs one by one")
Signed-off-by: Jean-Philippe Brucker <jean-philippe.bruc...
2019 May 13
0
[PATCH net] vhost_net: fix possible infinite loop
...->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
>>>>> ????????? vq->heads[nvq->done_idx].len = 0;
>>>>> ????????? ++nvq->done_idx;
>>>>> -??????? if (vhost_exceeds_weight(++sent_pkts, total_len)) {
>>>>> -??????????? vhost_poll_queue(&vq->poll);
>>>>> -??????????? break;
>>>>> -??????? }
>>>>> -??? }
>>>>> +??? } while (!(next_round = vhost_exceeds_weight(++sent_pkts,
>>>>> total_len)));
>>>>> +
>>>>> +??? if (next_ro...
2018 Jul 23
0
[PATCH net-next v6 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...bool rx)
> >>> +{
> >>> + struct socket *sock = rvq->private_data;
> >>> +
> >>> + if (rx) {
> >>> + if (!vhost_vq_avail_empty(&net->dev, tvq)) {
> >>> + vhost_poll_queue(&tvq->poll);
> >>> + } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
> >>> + vhost_disable_notify(&net->dev, tvq);
> >>> + vhost_poll_queue(&tvq->poll);
> >>>...
2018 Jul 03
2
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
..._rx_data(sock->sk)) &&
> + vhost_vq_avail_empty(tvq->dev, tvq))
> + cpu_relax();
> + preempt_enable();
> +
> + if ((rx && !vhost_vq_avail_empty(&net->dev, vq)) ||
> + (!rx && (sock && sk_has_rx_data(sock->sk)))) {
> + vhost_poll_queue(&vq->poll);
> + } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
One last question, do we need this for rx? This check will be always
true under light or medium load.
Thanks
> + vhost_disable_notify(&net->dev, vq);
> + vhost_poll_queue(&vq->poll)...
2018 Jul 03
2
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
..._rx_data(sock->sk)) &&
> + vhost_vq_avail_empty(tvq->dev, tvq))
> + cpu_relax();
> + preempt_enable();
> +
> + if ((rx && !vhost_vq_avail_empty(&net->dev, vq)) ||
> + (!rx && (sock && sk_has_rx_data(sock->sk)))) {
> + vhost_poll_queue(&vq->poll);
> + } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
One last question, do we need this for rx? This check will be always
true under light or medium load.
Thanks
> + vhost_disable_notify(&net->dev, vq);
> + vhost_poll_queue(&vq->poll)...