Displaying 15 results from an estimated 15 matches for "nvq_rx".
Did you mean:
vq_rx
2018 Jul 02
1
[PATCH net-next v3 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...eue *rvq = &net->vqs[VHOST_NET_VQ_RX];
> - struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> - struct vhost_virtqueue *vq = &nvq->vq;
> - unsigned long uninitialized_var(endtime);
> - int len = peek_head_len(rvq, sk);
> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> + struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
It looks to me rnvq and tnvq is slightly better.
Other looks good to me.
Thanks
>
> - if (!len && vq->busyloop_timeout) {
> - /* Flush batched heads first...
2018 Jun 30
0
[PATCH net-next v3 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...- struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
- struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
- struct vhost_virtqueue *vq = &nvq->vq;
- unsigned long uninitialized_var(endtime);
- int len = peek_head_len(rvq, sk);
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *nvq_tx = &net->vqs[VHOST_NET_VQ_TX];
- if (!len && vq->busyloop_timeout) {
- /* Flush batched heads first */
- vhost_rx_signal_used(rvq);
- /* Both tx vq and rx socket were polled here */
- mutex_lock_nested(...
2018 Jun 30
9
[PATCH net-next v3 0/4] net: vhost: improve performance when enable busyloop
From: Tonghao Zhang <xiangxia.m.yue at gmail.com>
This patches improve the guest receive and transmit performance.
On the handle_tx side, we poll the sock receive queue at the same time.
handle_rx do that in the same way.
This patches are splited from previous big patch:
http://patchwork.ozlabs.org/patch/934673/
For more performance report, see patch 4.
Tonghao Zhang (4):
net: vhost:
2018 Jun 26
3
[PATCH net-next v2] net: vhost: improve performance when enable busyloop
...vq->mutex);
+}
+
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num)
{
- unsigned long uninitialized_var(endtime);
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
+
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
- preempt_disable();
- endtime = busy_clock() + vq->busyloop_timeout;
- while (vhost_ca...
2018 Jun 26
3
[PATCH net-next v2] net: vhost: improve performance when enable busyloop
...vq->mutex);
+}
+
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num)
{
- unsigned long uninitialized_var(endtime);
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
+
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
- preempt_disable();
- endtime = busy_clock() + vq->busyloop_timeout;
- while (vhost_ca...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...desc(struct vhost_net *net,
> @@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> static void handle_tx(struct vhost_net *net)
> {
> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> struct vhost_virtqueue *vq = &nvq->vq;
> + struct vhost_virtqueue *vq_rx = &nvq_rx->vq;
> struct socket *sock;
>
> + mutex_lock_nested(&vq_rx->mutex, VHOST_NET_VQ_RX);
> mutex_lock_nested(&vq->mutex, VHOST_...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...desc(struct vhost_net *net,
> @@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> static void handle_tx(struct vhost_net *net)
> {
> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> struct vhost_virtqueue *vq = &nvq->vq;
> + struct vhost_virtqueue *vq_rx = &nvq_rx->vq;
> struct socket *sock;
>
> + mutex_lock_nested(&vq_rx->mutex, VHOST_NET_VQ_RX);
> mutex_lock_nested(&vq->mutex, VHOST_...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...+910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> > > static void handle_tx(struct vhost_net *net)
> > > {
> > > struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> > > + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> > > struct vhost_virtqueue *vq = &nvq->vq;
> > > + struct vhost_virtqueue *vq_rx = &nvq_rx->vq;
> > > struct socket *sock;
> > > + mutex_lock_nested(&vq_rx->mutex, VHOST_NET_VQ_RX);
> > >...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...+910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> > > static void handle_tx(struct vhost_net *net)
> > > {
> > > struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> > > + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> > > struct vhost_virtqueue *vq = &nvq->vq;
> > > + struct vhost_virtqueue *vq_rx = &nvq_rx->vq;
> > > struct socket *sock;
> > > + mutex_lock_nested(&vq_rx->mutex, VHOST_NET_VQ_RX);
> > >...
2018 Jun 30
0
[PATCH net-next v3 4/4] net: vhost: add rx busy polling in tx path
...ost/net.c
+++ b/drivers/vhost/net.c
@@ -478,17 +478,13 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num)
{
- unsigned long uninitialized_var(endtime);
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
- preempt_disable();
- endtime = busy_clock() + vq->busyloop_timeout;
- while (vhost_can_...
2018 Dec 10
0
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...int vhost_net_tx_get_vq_desc(struct vhost_net *net,
@@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
static void handle_tx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_virtqueue *vq_rx = &nvq_rx->vq;
struct socket *sock;
+ mutex_lock_nested(&vq_rx->mutex, VHOST_NET_VQ_RX);
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
+ if (!vq->busy...
2018 Dec 11
0
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...net,
>> @@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
>> static void handle_tx(struct vhost_net *net)
>> {
>> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
>> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
>> struct vhost_virtqueue *vq = &nvq->vq;
>> + struct vhost_virtqueue *vq_rx = &nvq_rx->vq;
>> struct socket *sock;
>>
>> + mutex_lock_nested(&vq_rx->mutex, VHOST_NET_VQ_RX);
>> mutex_lock_nest...
2018 Dec 12
0
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
>>>> static void handle_tx(struct vhost_net *net)
>>>> {
>>>> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
>>>> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
>>>> struct vhost_virtqueue *vq = &nvq->vq;
>>>> + struct vhost_virtqueue *vq_rx = &nvq_rx->vq;
>>>> struct socket *sock;
>>>> + mutex_lock_nested(&vq_rx->mutex, VHOST_NET_VQ_RX);
>&...
2018 Dec 10
9
[PATCH net 0/4] Fix various issue of vhost
Hi:
This series tries to fix various issues of vhost:
- Patch 1 adds a missing write barrier between used idx updating and
logging.
- Patch 2-3 brings back the protection of device IOTLB through vq
mutex, this fixes possible use after free in device IOTLB entries.
- Patch 4 fixes the diry page logging when device IOTLB is
enabled. We should done through GPA instead of GIOVA, this was done
2018 Dec 10
9
[PATCH net 0/4] Fix various issue of vhost
Hi:
This series tries to fix various issues of vhost:
- Patch 1 adds a missing write barrier between used idx updating and
logging.
- Patch 2-3 brings back the protection of device IOTLB through vq
mutex, this fixes possible use after free in device IOTLB entries.
- Patch 4 fixes the diry page logging when device IOTLB is
enabled. We should done through GPA instead of GIOVA, this was done