Displaying 10 results from an estimated 10 matches for "virtqueue_enable_cb_prepare_urg".
2014 Oct 13
1
[PATCH net-next RFC 1/3] virtio: support for urgent descriptors
...;> drivers/virtio/virtio_ring.c | 75 +++++++++++++++++++++++++++++++++++++---
>> include/linux/virtio.h | 14 ++++++++
>> include/uapi/linux/virtio_ring.h | 5 ++-
>> 3 files changed, 89 insertions(+), 5 deletions(-)
>>
[...]
>>
>> +unsigned virtqueue_enable_cb_prepare_urgent(struct virtqueue *_vq)
>> +{
>> + struct vring_virtqueue *vq = to_vvq(_vq);
>> + u16 last_used_idx;
>> +
>> + START_USE(vq);
>> + vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_URGENT_INTERRUPT;
>> + last_used_idx = vq->last_used_idx;
>>...
2014 Oct 13
1
[PATCH net-next RFC 1/3] virtio: support for urgent descriptors
...;> drivers/virtio/virtio_ring.c | 75 +++++++++++++++++++++++++++++++++++++---
>> include/linux/virtio.h | 14 ++++++++
>> include/uapi/linux/virtio_ring.h | 5 ++-
>> 3 files changed, 89 insertions(+), 5 deletions(-)
>>
[...]
>>
>> +unsigned virtqueue_enable_cb_prepare_urgent(struct virtqueue *_vq)
>> +{
>> + struct vring_virtqueue *vq = to_vvq(_vq);
>> + u16 last_used_idx;
>> +
>> + START_USE(vq);
>> + vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_URGENT_INTERRUPT;
>> + last_used_idx = vq->last_used_idx;
>>...
2014 Oct 11
2
[PATCH net-next RFC 1/3] virtio: support for urgent descriptors
...eue_disable_cb_urgent);
+
/**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @vq: the struct virtqueue we're talking about.
@@ -626,6 +672,19 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
+unsigned virtqueue_enable_cb_prepare_urgent(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 last_used_idx;
+
+ START_USE(vq);
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_URGENT_INTERRUPT;
+ last_used_idx = vq->last_used_idx;
+ END_USE(vq);
+ return last_used_idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_e...
2014 Oct 11
2
[PATCH net-next RFC 1/3] virtio: support for urgent descriptors
...eue_disable_cb_urgent);
+
/**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @vq: the struct virtqueue we're talking about.
@@ -626,6 +672,19 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
+unsigned virtqueue_enable_cb_prepare_urgent(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 last_used_idx;
+
+ START_USE(vq);
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_URGENT_INTERRUPT;
+ last_used_idx = vq->last_used_idx;
+ END_USE(vq);
+ return last_used_idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_e...
2014 Oct 12
0
[PATCH net-next RFC 1/3] virtio: support for urgent descriptors
...* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
> * @vq: the struct virtqueue we're talking about.
> @@ -626,6 +672,19 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
> }
> EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
>
> +unsigned virtqueue_enable_cb_prepare_urgent(struct virtqueue *_vq)
> +{
> + struct vring_virtqueue *vq = to_vvq(_vq);
> + u16 last_used_idx;
> +
> + START_USE(vq);
> + vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_URGENT_INTERRUPT;
> + last_used_idx = vq->last_used_idx;
> + END_USE(vq);
> + return las...
2014 Oct 11
10
[PATCH net-next RFC 0/3] virtio-net: Conditionally enable tx interrupt
Hello all:
We free old transmitted packets in ndo_start_xmit() currently, so any
packet must be orphaned also there. This was used to reduce the overhead of
tx interrupt to achieve better performance. But this may not work for some
protocols such as TCP stream. TCP depends on the value of sk_wmem_alloc to
implement various optimization for small packets stream such as TCP small
queue and auto
2014 Oct 11
10
[PATCH net-next RFC 0/3] virtio-net: Conditionally enable tx interrupt
Hello all:
We free old transmitted packets in ndo_start_xmit() currently, so any
packet must be orphaned also there. This was used to reduce the overhead of
tx interrupt to achieve better performance. But this may not work for some
protocols such as TCP stream. TCP depends on the value of sk_wmem_alloc to
implement various optimization for small packets stream such as TCP small
queue and auto
2014 Oct 14
0
[PATCH net-next RFC 3/3] virtio-net: conditionally enable tx interrupt
...t;priv;
> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
> + unsigned int r, sent = 0;
> +
> +again:
> + __netif_tx_lock(txq, smp_processor_id());
> + sent += free_old_xmit_skbs(sq, budget - sent);
> +
> + if (sent < budget) {
> + r = virtqueue_enable_cb_prepare_urgent(sq->vq);
> + napi_complete(napi);
> + __netif_tx_unlock(txq);
> + if (unlikely(virtqueue_poll(sq->vq, r)) &&
> + napi_schedule_prep(napi)) {
> + virtqueue_disable_cb_urgent(sq->vq);
> + __napi_schedule(napi);
> + goto again;
> + }
> + }...
2014 Oct 11
2
[PATCH net-next RFC 3/3] virtio-net: conditionally enable tx interrupt
...ruct virtnet_info *vi = sq->vq->vdev->priv;
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+ unsigned int r, sent = 0;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ r = virtqueue_enable_cb_prepare_urgent(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(virtqueue_poll(sq->vq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb_urgent(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ net...
2014 Oct 11
2
[PATCH net-next RFC 3/3] virtio-net: conditionally enable tx interrupt
...ruct virtnet_info *vi = sq->vq->vdev->priv;
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+ unsigned int r, sent = 0;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ r = virtqueue_enable_cb_prepare_urgent(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(virtqueue_poll(sq->vq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb_urgent(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ net...