Displaying 15 results from an estimated 15 matches for "last_avail_wrap_counter".
2018 Oct 12
2
[PATCH net-next V2 6/8] vhost: packed ring support
...397,48 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
> vq->last_avail_idx = s.num;
> /* Forget the cached index value. */
> vq->avail_idx = vq->last_avail_idx;
> + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
> + vq->last_avail_wrap_counter = wrap_counter;
> + vq->avail_wrap_counter = vq->last_avail_wrap_counter;
> + }
> break;
> case VHOST_GET_VRING_BASE:
> s.index = idx;
> s.num = vq->last_avail_idx;
> + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
> + s.num |= vq->last_avail_...
2018 Oct 12
2
[PATCH net-next V2 6/8] vhost: packed ring support
...397,48 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
> vq->last_avail_idx = s.num;
> /* Forget the cached index value. */
> vq->avail_idx = vq->last_avail_idx;
> + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
> + vq->last_avail_wrap_counter = wrap_counter;
> + vq->avail_wrap_counter = vq->last_avail_wrap_counter;
> + }
> break;
> case VHOST_GET_VRING_BASE:
> s.index = idx;
> s.num = vq->last_avail_idx;
> + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
> + s.num |= vq->last_avail_...
2018 Oct 15
2
[PATCH net-next V2 6/8] vhost: packed ring support
...st_dev *d, unsigned int ioctl, void __user *arg
>>> vq->last_avail_idx = s.num;
>>> /* Forget the cached index value. */
>>> vq->avail_idx = vq->last_avail_idx;
>>> + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
>>> + vq->last_avail_wrap_counter = wrap_counter;
>>> + vq->avail_wrap_counter = vq->last_avail_wrap_counter;
>>> + }
>>> break;
>>> case VHOST_GET_VRING_BASE:
>>> s.index = idx;
>>> s.num = vq->last_avail_idx;
>>> + if (vhost_has_feature(vq,...
2018 Oct 15
2
[PATCH net-next V2 6/8] vhost: packed ring support
...st_dev *d, unsigned int ioctl, void __user *arg
>>> vq->last_avail_idx = s.num;
>>> /* Forget the cached index value. */
>>> vq->avail_idx = vq->last_avail_idx;
>>> + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
>>> + vq->last_avail_wrap_counter = wrap_counter;
>>> + vq->avail_wrap_counter = vq->last_avail_wrap_counter;
>>> + }
>>> break;
>>> case VHOST_GET_VRING_BASE:
>>> s.index = idx;
>>> s.num = vq->last_avail_idx;
>>> + if (vhost_has_feature(vq,...
2018 Oct 15
1
[PATCH net-next V2 6/8] vhost: packed ring support
...*arg
>>>>> vq->last_avail_idx = s.num;
>>>>> /* Forget the cached index value. */
>>>>> vq->avail_idx = vq->last_avail_idx;
>>>>> + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
>>>>> + vq->last_avail_wrap_counter = wrap_counter;
>>>>> + vq->avail_wrap_counter = vq->last_avail_wrap_counter;
>>>>> + }
>>>>> break;
>>>>> case VHOST_GET_VRING_BASE:
>>>>> s.index = idx;
>>>>> s.num = vq->last_...
2018 Oct 12
0
[PATCH net-next V2 6/8] vhost: packed ring support
...g_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
> > vq->last_avail_idx = s.num;
> > /* Forget the cached index value. */
> > vq->avail_idx = vq->last_avail_idx;
> > + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
> > + vq->last_avail_wrap_counter = wrap_counter;
> > + vq->avail_wrap_counter = vq->last_avail_wrap_counter;
> > + }
> > break;
> > case VHOST_GET_VRING_BASE:
> > s.index = idx;
> > s.num = vq->last_avail_idx;
> > + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
&...
2018 Oct 15
0
[PATCH net-next V2 6/8] vhost: packed ring support
...__user *arg
> > > > vq->last_avail_idx = s.num;
> > > > /* Forget the cached index value. */
> > > > vq->avail_idx = vq->last_avail_idx;
> > > > + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
> > > > + vq->last_avail_wrap_counter = wrap_counter;
> > > > + vq->avail_wrap_counter = vq->last_avail_wrap_counter;
> > > > + }
> > > > break;
> > > > case VHOST_GET_VRING_BASE:
> > > > s.index = idx;
> > > > s.num = vq->last_avail_idx;...
2018 Jul 16
0
[PATCH net-next V2 6/8] vhost: packed ring support
...st.c
index 060a431..63b79e8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -323,6 +323,9 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
vq->busyloop_timeout = 0;
+ vq->last_used_wrap_counter = true;
+ vq->last_avail_wrap_counter = true;
+ vq->avail_wrap_counter = true;
vq->umem = NULL;
vq->iotlb = NULL;
__vhost_vq_meta_reset(vq);
@@ -1106,11 +1109,22 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
return 0;
}
-static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned...
2018 Jul 16
11
[PATCH net-next V2 0/8] Packed virtqueue support for vhost
Hi all:
This series implements packed virtqueues. The code were tested with
Tiwei's guest driver series at https://patchwork.ozlabs.org/cover/942297/
Pktgen test for both RX and TX does not show obvious difference with
split virtqueues. The main bottleneck is the guest Linux driver, since
it can not stress vhost for a 100% CPU utilization. A full TCP
benchmark is ongoing. Will test
2018 Jul 16
11
[PATCH net-next V2 0/8] Packed virtqueue support for vhost
Hi all:
This series implements packed virtqueues. The code were tested with
Tiwei's guest driver series at https://patchwork.ozlabs.org/cover/942297/
Pktgen test for both RX and TX does not show obvious difference with
split virtqueues. The main bottleneck is the guest Linux driver, since
it can not stress vhost for a 100% CPU utilization. A full TCP
benchmark is ongoing. Will test
2018 May 29
9
[RFC V5 PATCH 0/8] Packed ring layout for vhost
Hi all:
This RFC implement packed ring layout. The code were tested with
Tiwei's RFC V5 at https://lkml.org/lkml/2018/5/22/138. Some fixups and
tweaks were needed on top of Tiwei's code to make it run for event
index.
Pktgen reports about 20% improvement on TX PPS when doing pktgen from
guest to host. No ovbious improvement on RX PPS. We can do lots of
optimizations on top but for simple
2018 Jul 03
12
[PATCH net-next 0/8] Packed virtqueue for vhost
Hi all:
This series implements packed virtqueues. The code were tested with
Tiwei's RFC V6 at https://lkml.org/lkml/2018/6/5/120.
Pktgen test for both RX and TX does not show obvious difference with
split virtqueues. The main bottleneck is the guest Linux driver, since
it can not stress vhost for a 100% CPU utilization. A full TCP
benchmark is ongoing. Will test virtio-net pmd as well when
2018 Jul 03
12
[PATCH net-next 0/8] Packed virtqueue for vhost
Hi all:
This series implements packed virtqueues. The code were tested with
Tiwei's RFC V6 at https://lkml.org/lkml/2018/6/5/120.
Pktgen test for both RX and TX does not show obvious difference with
split virtqueues. The main bottleneck is the guest Linux driver, since
it can not stress vhost for a 100% CPU utilization. A full TCP
benchmark is ongoing. Will test virtio-net pmd as well when
2019 Jul 17
17
[PATCH V3 00/15] Packed virtqueue support for vhost
Hi all:
This series implements packed virtqueues which were described
at [1]. In this version we try to address the performance regression
saw by V2. The root cause is packed virtqueue need more times of
userspace memory accesssing which turns out to be very
expensive. Thanks to the help of 7f466032dc9e ("vhost: access vq
metadata through kernel virtual address"), such overhead cold be
2019 Jul 17
17
[PATCH V3 00/15] Packed virtqueue support for vhost
Hi all:
This series implements packed virtqueues which were described
at [1]. In this version we try to address the performance regression
saw by V2. The root cause is packed virtqueue need more times of
userspace memory accesssing which turns out to be very
expensive. Thanks to the help of 7f466032dc9e ("vhost: access vq
metadata through kernel virtual address"), such overhead cold be