Displaying 10 results from an estimated 10 matches for "nvq_tx".
Did you mean:
vq_tx
2018 Jul 02
1
[PATCH net-next v3 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...*nvq = &net->vqs[VHOST_NET_VQ_TX];
> - struct vhost_virtqueue *vq = &nvq->vq;
> - unsigned long uninitialized_var(endtime);
> - int len = peek_head_len(rvq, sk);
> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> + struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
It looks to me rnvq and tnvq is slightly better.
Other looks good to me.
Thanks
>
> - if (!len && vq->busyloop_timeout) {
> - /* Flush batched heads first */
> - vhost_rx_signal_used(rvq);
> - /* Both tx vq and rx socket were...
2018 Jun 30
0
[PATCH net-next v3 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...truct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
- struct vhost_virtqueue *vq = &nvq->vq;
- unsigned long uninitialized_var(endtime);
- int len = peek_head_len(rvq, sk);
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
- if (!len && vq->busyloop_timeout) {
- /* Flush batched heads first */
- vhost_rx_signal_used(rvq);
- /* Both tx vq and rx socket were polled here */
- mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
- vhost_disable_notify(&net->...
2018 Dec 10
0
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...mutex_unlock(&vq_rx->mutex);
mutex_unlock(&vq->mutex);
}
@@ -1060,7 +1065,9 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_virtqueue *vq_tx = &nvq_tx->vq;
unsigned uninitialized_var(in), log;
struct vhost_log *vq_log;
struct msghdr msg = {
@@ -1086,6 +1093,9 @@ static void handle_rx(struct vhost_net *net)...
2018 Jun 30
9
[PATCH net-next v3 0/4] net: vhost: improve performance when enable busyloop
From: Tonghao Zhang <xiangxia.m.yue at gmail.com>
This patches improve the guest receive and transmit performance.
On the handle_tx side, we poll the sock receive queue at the same time.
handle_rx do that in the same way.
This patches are splited from previous big patch:
http://patchwork.ozlabs.org/patch/934673/
For more performance report, see patch 4.
Tonghao Zhang (4):
net: vhost:
2018 Jun 26
3
[PATCH net-next v2] net: vhost: improve performance when enable busyloop
...loop_timeout;
-
- while (vhost_can_busy_poll(&net->dev, endtime) &&
- !sk_has_rx_data(sk) &&
- vhost_vq_avail_empty(&net->dev, vq))
- cpu_relax();
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
- preempt_enable();
+ int len = peek_head_len(nvq_rx, sk);
- if (!vhost_vq_avail_empty(&net->dev, vq))
- vhost_poll_queue(&vq->poll);
- else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
- vhost_disable_notify(&net->d...
2018 Jun 26
3
[PATCH net-next v2] net: vhost: improve performance when enable busyloop
...loop_timeout;
-
- while (vhost_can_busy_poll(&net->dev, endtime) &&
- !sk_has_rx_data(sk) &&
- vhost_vq_avail_empty(&net->dev, vq))
- cpu_relax();
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
- preempt_enable();
+ int len = peek_head_len(nvq_rx, sk);
- if (!vhost_vq_avail_empty(&net->dev, vq))
- vhost_poll_queue(&vq->poll);
- else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
- vhost_disable_notify(&net->d...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...at
all times, let's just use a single mutex.
> @@ -1060,7 +1065,9 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
> static void handle_rx(struct vhost_net *net)
> {
> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
> + struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
> struct vhost_virtqueue *vq = &nvq->vq;
> + struct vhost_virtqueue *vq_tx = &nvq_tx->vq;
> unsigned uninitialized_var(in), log;
> struct vhost_log *vq_log;
> struct msghdr msg = {
> @@ -1086,6 +1093,9 @@ static void handl...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...at
all times, let's just use a single mutex.
> @@ -1060,7 +1065,9 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
> static void handle_rx(struct vhost_net *net)
> {
> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
> + struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
> struct vhost_virtqueue *vq = &nvq->vq;
> + struct vhost_virtqueue *vq_tx = &nvq_tx->vq;
> unsigned uninitialized_var(in), log;
> struct vhost_log *vq_log;
> struct msghdr msg = {
> @@ -1086,6 +1093,9 @@ static void handl...
2018 Dec 10
9
[PATCH net 0/4] Fix various issue of vhost
Hi:
This series tries to fix various issues of vhost:
- Patch 1 adds a missing write barrier between used idx updating and
logging.
- Patch 2-3 brings back the protection of device IOTLB through vq
mutex, this fixes possible use after free in device IOTLB entries.
- Patch 4 fixes the diry page logging when device IOTLB is
enabled. We should done through GPA instead of GIOVA, this was done
2018 Dec 10
9
[PATCH net 0/4] Fix various issue of vhost
Hi:
This series tries to fix various issues of vhost:
- Patch 1 adds a missing write barrier between used idx updating and
logging.
- Patch 2-3 brings back the protection of device IOTLB through vq
mutex, this fixes possible use after free in device IOTLB entries.
- Patch 4 fixes the diry page logging when device IOTLB is
enabled. We should done through GPA instead of GIOVA, this was done