Displaying 20 results from an estimated 26 matches for "desc_event_off".
2018 Feb 27
3
[PATCH RFC 1/2] virtio: introduce packed ring defines
...fer. It's unreliable, so it's simply an optimization. Guest
> * will still kick if it's out of buffers. */
>@@ -104,6 +107,36 @@ struct vring {
> struct vring_used *used;
> };
>
>+struct vring_packed_desc_event {
>+ /* Descriptor Event Offset */
>+ __virtio16 desc_event_off : 15,
>+ /* Descriptor Event Wrap Counter */
>+ desc_event_wrap : 1;
>+ /* Descriptor Event Flags */
>+ __virtio16 desc_event_flags : 2;
>+};
Where would the virtqueue number go in driver notifications?
regards,
Jens
2018 Mar 30
1
[RFC PATCH V2 8/8] vhost: event suppression for packed ring
...h can be ORed or ANDed. Instead, they
are defined as values:
0x0 enable
0x1 disable
0x2 desc
0x3 reserved
> +
> + /* Read desc event flags before event_off and event_wrap */
> + smp_rmb();
> +
> + if (vhost_get_avail(vq, event_off_wrap,
> + &vq->driver_event->desc_event_off_warp) < 0) {
> + vq_err(vq, "Failed to get driver desc_event_off/wrap");
> + return true;
> + }
> +
> + off = vhost16_to_cpu(vq, event_off_wrap);
> +
> + wrap = off & 0x1;
> + off >>= 1;
Based on the below definitions in spec, wrap counter is
the mo...
2018 Feb 27
1
[PATCH RFC 1/2] virtio: introduce packed ring defines
From: Tiwei Bie
> Sent: 23 February 2018 11:18
...
> +struct vring_packed_desc_event {
> + /* Descriptor Event Offset */
> + __virtio16 desc_event_off : 15,
> + /* Descriptor Event Wrap Counter */
> + desc_event_wrap : 1;
> + /* Descriptor Event Flags */
> + __virtio16 desc_event_flags : 2;
> +};
This looks like you are assuming that a bit-field has a defined
layout and can be used to map a 'hardware' structure.
The...
2018 Feb 23
0
[PATCH RFC 1/2] virtio: introduce packed ring defines
...kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
@@ -104,6 +107,36 @@ struct vring {
struct vring_used *used;
};
+struct vring_packed_desc_event {
+ /* Descriptor Event Offset */
+ __virtio16 desc_event_off : 15,
+ /* Descriptor Event Wrap Counter */
+ desc_event_wrap : 1;
+ /* Descriptor Event Flags */
+ __virtio16 desc_event_flags : 2;
+};
+
+struct vring_packed_desc {
+ /* Buffer Address. */
+ __virtio64 addr;
+ /* Buffer Length. */
+ __virtio32 len;
+ /* Buffer ID. */
+ __virtio16 id;
+ /*...
2018 May 16
0
[RFC V4 PATCH 8/8] vhost: event suppression for packed ring
...if (event_flags == cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE))
+ return true;
+
+ /* Read desc event flags before event_off and event_wrap */
+ smp_rmb();
+
+ if (vhost_get_avail(vq, event_off_wrap,
+ &vq->driver_event->off_warp) < 0) {
+ vq_err(vq, "Failed to get driver desc_event_off/wrap");
+ return true;
+ }
+
+ off_wrap = vhost16_to_cpu(vq, event_off_wrap);
+
+ old = vq->signalled_used;
+ v = vq->signalled_used_valid;
+ new = vq->signalled_used = vq->last_used_idx;
+ vq->signalled_used_valid = true;
+
+ if (unlikely(!v))
+ return true;
+
+ return vhost_...
2018 Mar 26
0
[RFC PATCH V2 8/8] vhost: event suppression for packed ring
...event_flags & cpu_to_vhost16(vq, RING_EVENT_FLAGS_DESC)))
+ return event_flags ==
+ cpu_to_vhost16(vq, RING_EVENT_FLAGS_ENABLE);
+
+ /* Read desc event flags before event_off and event_wrap */
+ smp_rmb();
+
+ if (vhost_get_avail(vq, event_off_wrap,
+ &vq->driver_event->desc_event_off_warp) < 0) {
+ vq_err(vq, "Failed to get driver desc_event_off/wrap");
+ return true;
+ }
+
+ off = vhost16_to_cpu(vq, event_off_wrap);
+
+ wrap = off & 0x1;
+ off >>= 1;
+
+ old = vq->signalled_used;
+ v = vq->signalled_used_valid;
+ new = vq->signalled_used = vq-...
2018 Jul 03
0
[PATCH net-next 8/8] vhost: event suppression for packed ring
...+ return event_flags !=
+ cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
+
+ /* Read desc event flags before event_off and event_wrap */
+ smp_rmb();
+
+ if (vhost_get_avail(vq, event_off_wrap,
+ &vq->driver_event->off_wrap) < 0) {
+ vq_err(vq, "Failed to get driver desc_event_off/wrap");
+ return true;
+ }
+
+ off_wrap = vhost16_to_cpu(vq, event_off_wrap);
+
+ if (unlikely(!v))
+ return true;
+
+ return vhost_vring_packed_need_event(vq, vq->last_used_wrap_counter,
+ off_wrap, new, old);
+}
+
+static bool vhost_notify(struct vhost_dev *dev, struct vhost_vi...
2018 May 29
0
[RFC V5 PATCH 8/8] vhost: event suppression for packed ring
...+ return event_flags !=
+ cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
+
+ /* Read desc event flags before event_off and event_wrap */
+ smp_rmb();
+
+ if (vhost_get_avail(vq, event_off_wrap,
+ &vq->driver_event->off_wrap) < 0) {
+ vq_err(vq, "Failed to get driver desc_event_off/wrap");
+ return true;
+ }
+
+ off_wrap = vhost16_to_cpu(vq, event_off_wrap);
+
+ if (unlikely(!v))
+ return true;
+
+ return vhost_vring_packed_need_event(vq, vq->used_wrap_counter,
+ off_wrap, new, old);
+}
+
+static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtque...
2018 May 30
2
[RFC V5 PATCH 8/8] vhost: event suppression for packed ring
...cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
> +
> + /* Read desc event flags before event_off and event_wrap */
> + smp_rmb();
> +
> + if (vhost_get_avail(vq, event_off_wrap,
> + &vq->driver_event->off_wrap) < 0) {
> + vq_err(vq, "Failed to get driver desc_event_off/wrap");
> + return true;
> + }
> +
> + off_wrap = vhost16_to_cpu(vq, event_off_wrap);
> +
> + if (unlikely(!v))
> + return true;
> +
> + return vhost_vring_packed_need_event(vq, vq->used_wrap_counter,
> + off_wrap, new, old);
> +}
> +
> +sta...
2018 May 30
2
[RFC V5 PATCH 8/8] vhost: event suppression for packed ring
...cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
> +
> + /* Read desc event flags before event_off and event_wrap */
> + smp_rmb();
> +
> + if (vhost_get_avail(vq, event_off_wrap,
> + &vq->driver_event->off_wrap) < 0) {
> + vq_err(vq, "Failed to get driver desc_event_off/wrap");
> + return true;
> + }
> +
> + off_wrap = vhost16_to_cpu(vq, event_off_wrap);
> +
> + if (unlikely(!v))
> + return true;
> +
> + return vhost_vring_packed_need_event(vq, vq->used_wrap_counter,
> + off_wrap, new, old);
> +}
> +
> +sta...
2018 Jul 04
2
[PATCH net-next 8/8] vhost: event suppression for packed ring
...cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
> +
> + /* Read desc event flags before event_off and event_wrap */
> + smp_rmb();
> +
> + if (vhost_get_avail(vq, event_off_wrap,
> + &vq->driver_event->off_wrap) < 0) {
> + vq_err(vq, "Failed to get driver desc_event_off/wrap");
> + return true;
> + }
> +
> + off_wrap = vhost16_to_cpu(vq, event_off_wrap);
> +
> + if (unlikely(!v))
> + return true;
> +
> + return vhost_vring_packed_need_event(vq, vq->last_used_wrap_counter,
> + off_wrap, new, old);
> +}
> +
>...
2018 Jul 04
2
[PATCH net-next 8/8] vhost: event suppression for packed ring
...cpu_to_vhost16(vq, RING_EVENT_FLAGS_DISABLE);
> +
> + /* Read desc event flags before event_off and event_wrap */
> + smp_rmb();
> +
> + if (vhost_get_avail(vq, event_off_wrap,
> + &vq->driver_event->off_wrap) < 0) {
> + vq_err(vq, "Failed to get driver desc_event_off/wrap");
> + return true;
> + }
> +
> + off_wrap = vhost16_to_cpu(vq, event_off_wrap);
> +
> + if (unlikely(!v))
> + return true;
> +
> + return vhost_vring_packed_need_event(vq, vq->last_used_wrap_counter,
> + off_wrap, new, old);
> +}
> +
>...
2018 Feb 23
5
[PATCH RFC 0/2] Packed ring for virtio
Hello everyone,
This RFC implements a subset of packed ring which is described at
https://github.com/oasis-tcs/virtio-docs/blob/master/virtio-v1.1-packed-wd08.pdf
The code was tested with DPDK vhost (testpmd/vhost-PMD) implemented
by Jens at http://dpdk.org/ml/archives/dev/2018-January/089417.html
Minor changes are needed for the vhost code, e.g. to kick the guest.
It's not a complete
2018 Mar 26
12
[RFC PATCH V2 0/8] Packed ring for vhost
Hi all:
This RFC implement packed ring layout. The code were tested with pmd
implement by Jens at
http://dpdk.org/ml/archives/dev/2018-January/089417.html. Minor change
was needed for pmd codes to kick virtqueue since it assumes a busy
polling backend.
Test were done between localhost and guest. Testpmd (rxonly) in guest
reports 2.4Mpps. Testpmd (txonly) repots about 2.1Mpps.
Notes: The event
2018 Mar 26
12
[RFC PATCH V2 0/8] Packed ring for vhost
Hi all:
This RFC implement packed ring layout. The code were tested with pmd
implement by Jens at
http://dpdk.org/ml/archives/dev/2018-January/089417.html. Minor change
was needed for pmd codes to kick virtqueue since it assumes a busy
polling backend.
Test were done between localhost and guest. Testpmd (rxonly) in guest
reports 2.4Mpps. Testpmd (txonly) repots about 2.1Mpps.
Notes: The event
2018 May 16
12
[RFC V4 PATCH 0/8] Packed ring layout for vhost
Hi all:
This RFC implement packed ring layout. The code were tested with
Tiwei's RFC V3 ahttps://lkml.org/lkml/2018/4/25/34. Some fixups and
tweaks were needed on top of Tiwei's code to make it run for event
index.
Pktgen reports about 20% improvement on PPS (event index is off). More
testing is ongoing.
Notes for tester:
- Start from this version, vhost need qemu co-operation to work
2018 May 16
12
[RFC V4 PATCH 0/8] Packed ring layout for vhost
Hi all:
This RFC implement packed ring layout. The code were tested with
Tiwei's RFC V3 ahttps://lkml.org/lkml/2018/4/25/34. Some fixups and
tweaks were needed on top of Tiwei's code to make it run for event
index.
Pktgen reports about 20% improvement on PPS (event index is off). More
testing is ongoing.
Notes for tester:
- Start from this version, vhost need qemu co-operation to work
2018 May 29
9
[RFC V5 PATCH 0/8] Packed ring layout for vhost
Hi all:
This RFC implement packed ring layout. The code were tested with
Tiwei's RFC V5 at https://lkml.org/lkml/2018/5/22/138. Some fixups and
tweaks were needed on top of Tiwei's code to make it run for event
index.
Pktgen reports about 20% improvement on TX PPS when doing pktgen from
guest to host. No ovbious improvement on RX PPS. We can do lots of
optimizations on top but for simple
2018 Jul 03
12
[PATCH net-next 0/8] Packed virtqueue for vhost
Hi all:
This series implements packed virtqueues. The code were tested with
Tiwei's RFC V6 at https://lkml.org/lkml/2018/6/5/120.
Pktgen test for both RX and TX does not show obvious difference with
split virtqueues. The main bottleneck is the guest Linux driver, since
it can not stress vhost for a 100% CPU utilization. A full TCP
benchmark is ongoing. Will test virtio-net pmd as well when
2018 Jul 03
12
[PATCH net-next 0/8] Packed virtqueue for vhost
Hi all:
This series implements packed virtqueues. The code were tested with
Tiwei's RFC V6 at https://lkml.org/lkml/2018/6/5/120.
Pktgen test for both RX and TX does not show obvious difference with
split virtqueues. The main bottleneck is the guest Linux driver, since
it can not stress vhost for a 100% CPU utilization. A full TCP
benchmark is ongoing. Will test virtio-net pmd as well when