? 2022/2/27 ??9:41, Eugenio P?rez ??:> First half of the buffers forwarding part, preparing vhost-vdpa
> callbacks to SVQ to offer it. QEMU cannot enable it at this moment, so
> this is effectively dead code at the moment, but it helps to reduce
> patch size.
>
> Signed-off-by: Eugenio P?rez <eperezma at redhat.com>
> ---
> hw/virtio/vhost-vdpa.c | 84 ++++++++++++++++++++++++++++++++++++------
> 1 file changed, 73 insertions(+), 11 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index d614c435f3..b2c4e92fcf 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -344,6 +344,16 @@ static bool vhost_vdpa_one_time_request(struct
vhost_dev *dev)
> return v->index != 0;
> }
>
> +static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
> + uint64_t *features)
> +{
> + int ret;
> +
> + ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
> + trace_vhost_vdpa_get_features(dev, *features);
> + return ret;
> +}
> +
> static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa
*v,
> Error **errp)
> {
> @@ -356,7 +366,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev,
struct vhost_vdpa *v,
> return 0;
> }
>
> - r = hdev->vhost_ops->vhost_get_features(hdev,
&dev_features);
> + r = vhost_vdpa_get_dev_features(hdev, &dev_features);
> if (r != 0) {
> error_setg_errno(errp, -r, "Can't get vdpa device
features");
> return r;
> @@ -583,12 +593,26 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev
*dev,
> static int vhost_vdpa_set_features(struct vhost_dev *dev,
> uint64_t features)
> {
> + struct vhost_vdpa *v = dev->opaque;
> int ret;
>
> if (vhost_vdpa_one_time_request(dev)) {
> return 0;
> }
>
> + if (v->shadow_vqs_enabled) {
> + uint64_t features_ok = features;
> + bool ok;
> +
> + ok = vhost_svq_valid_features(&features_ok);
> + if (unlikely(!ok)) {
> + error_report(
> + "Invalid guest acked feature flag, acked: 0x%"
> + PRIx64", ok: 0x%"PRIx64, features, features_ok);
> + return -EINVAL;
> + }
> + }
> +
> trace_vhost_vdpa_set_features(dev, features);
> ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
> if (ret) {
> @@ -735,6 +759,13 @@ static int vhost_vdpa_get_config(struct vhost_dev
*dev, uint8_t *config,
> return ret;
> }
>
> +static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
> + struct vhost_vring_state *ring)
> +{
> + trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
> + return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
> +}
> +
> static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
> struct vhost_vring_file *file)
> {
> @@ -749,6 +780,18 @@ static int vhost_vdpa_set_vring_dev_call(struct
vhost_dev *dev,
> return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
> }
>
> +static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
> + struct vhost_vring_addr *addr)
> +{
> + trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
> + addr->desc_user_addr,
addr->used_user_addr,
> + addr->avail_user_addr,
> + addr->log_guest_addr);
> +
> + return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
> +
> +}
> +
> /**
> * Set the shadow virtqueue descriptors to the device
> *
> @@ -859,11 +902,17 @@ static int vhost_vdpa_set_log_base(struct vhost_dev
*dev, uint64_t base,
> static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
> struct vhost_vring_addr *addr)
> {
> - trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
> - addr->desc_user_addr,
addr->used_user_addr,
> - addr->avail_user_addr,
> - addr->log_guest_addr);
> - return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
> + struct vhost_vdpa *v = dev->opaque;
> +
> + if (v->shadow_vqs_enabled) {
> + /*
> + * Device vring addr was set at device start. SVQ base is handled
by
> + * VirtQueue code.
> + */
> + return 0;
> + }
> +
> + return vhost_vdpa_set_vring_dev_addr(dev, addr);
> }
>
> static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
> @@ -876,8 +925,17 @@ static int vhost_vdpa_set_vring_num(struct vhost_dev
*dev,
> static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
> struct vhost_vring_state *ring)
> {
> - trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
> - return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
> + struct vhost_vdpa *v = dev->opaque;
> +
> + if (v->shadow_vqs_enabled) {
> + /*
> + * Device vring base was set at device start. SVQ base is handled
by
> + * VirtQueue code.
> + */
> + return 0;
> + }
> +
> + return vhost_vdpa_set_dev_vring_base(dev, ring);
> }
>
> static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
> @@ -924,10 +982,14 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev
*dev,
> static int vhost_vdpa_get_features(struct vhost_dev *dev,
> uint64_t *features)
> {
> - int ret;
> + struct vhost_vdpa *v = dev->opaque;
> + int ret = vhost_vdpa_get_dev_features(dev, features);
> +
> + if (ret == 0 && v->shadow_vqs_enabled) {
> + /* Filter only features that SVQ can offer to guest */
> + vhost_svq_valid_features(features);
I think it's better not silently clear features here (e.g the feature
could be explicitly enabled via cli). This may give more troubles in the
future cross vendor/backend live migration.
We can simple during vhost_vdpa init.
Thanks
> + }
>
> - ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
> - trace_vhost_vdpa_get_features(dev, *features);
> return ret;
> }
>