Hi, Now that virtio-rpmsg endianness fixes have been merged we can proceed with the next step. v4: - add endianness conversions to comply with the VirtIO standard v3: - address several checkpatch warnings - address comments from Mathieu Poirier v2: - update patch #5 with a correct vhost_dev_init() prototype - drop patch #6 - it depends on a different patch, that is currently an RFC - address comments from Pierre-Louis Bossart: * remove "default n" from Kconfig Linux supports RPMsg over VirtIO for "remote processor" / AMP use cases. It can however also be used for virtualisation scenarios, e.g. when using KVM to run Linux on both the host and the guests. This patch set adds a wrapper API to facilitate writing vhost drivers for such RPMsg-based solutions. The first use case is an audio DSP virtualisation project, currently under development, ready for review and submission, available at https://github.com/thesofproject/linux/pull/1501/commits Thanks Guennadi Guennadi Liakhovetski (4): vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl rpmsg: move common structures and defines to headers rpmsg: update documentation vhost: add an RPMsg API Documentation/rpmsg.txt | 6 +- drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ drivers/vhost/Kconfig | 7 + drivers/vhost/Makefile | 3 + drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ drivers/vhost/vhost_rpmsg.h | 74 ++++++ include/linux/virtio_rpmsg.h | 83 +++++++ include/uapi/linux/rpmsg.h | 3 + include/uapi/linux/vhost.h | 4 +- 9 files changed, 553 insertions(+), 80 deletions(-) create mode 100644 drivers/vhost/rpmsg.c create mode 100644 drivers/vhost/vhost_rpmsg.h create mode 100644 include/linux/virtio_rpmsg.h -- 2.27.0
Guennadi Liakhovetski
2020-Jul-22 15:09 UTC
[PATCH v4 1/4] vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl
VHOST_VSOCK_SET_RUNNING is used by the vhost vsock driver to perform crucial VirtQueue initialisation, like assigning .private fields and calling vhost_vq_init_access(), and clean up. However, this ioctl is actually extremely useful for any vhost driver, that doesn't have a side channel to inform it of a status change, e.g. upon a guest reboot. This patch makes that ioctl generic, while preserving its numeric value and also keeping the original alias. Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski at linux.intel.com> --- include/uapi/linux/vhost.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 0c2349612e77..5d9254e2a6b6 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -95,6 +95,8 @@ #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) +#define VHOST_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int) + /* VHOST_NET specific defines */ /* Attach virtio net ring to a raw socket, or tap device. @@ -116,7 +118,7 @@ /* VHOST_VSOCK specific defines */ #define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64) -#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int) +#define VHOST_VSOCK_SET_RUNNING VHOST_SET_RUNNING /* VHOST_VDPA specific defines */ -- 2.27.0
Guennadi Liakhovetski
2020-Jul-22 15:09 UTC
[PATCH v4 2/4] rpmsg: move common structures and defines to headers
virtio_rpmsg_bus.c keeps RPMsg protocol structure declarations and
common defines like the ones, needed for name-space announcements,
internal. Move them to common headers instead.
Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski at
linux.intel.com>
---
drivers/rpmsg/virtio_rpmsg_bus.c | 78 +-----------------------------
include/linux/virtio_rpmsg.h | 83 ++++++++++++++++++++++++++++++++
include/uapi/linux/rpmsg.h | 3 ++
3 files changed, 88 insertions(+), 76 deletions(-)
create mode 100644 include/linux/virtio_rpmsg.h
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 9006fc7f73d0..9d5dd3f0a648 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -26,7 +26,9 @@
#include <linux/virtio_byteorder.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_rpmsg.h>
#include <linux/wait.h>
+#include <uapi/linux/rpmsg.h>
#include "rpmsg_internal.h"
@@ -70,58 +72,6 @@ struct virtproc_info {
struct rpmsg_endpoint *ns_ept;
};
-/* The feature bitmap for virtio rpmsg */
-#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
-
-/**
- * struct rpmsg_hdr - common header for all rpmsg messages
- * @src: source address
- * @dst: destination address
- * @reserved: reserved for future use
- * @len: length of payload (in bytes)
- * @flags: message flags
- * @data: @len bytes of message payload data
- *
- * Every message sent(/received) on the rpmsg bus begins with this header.
- */
-struct rpmsg_hdr {
- __virtio32 src;
- __virtio32 dst;
- __virtio32 reserved;
- __virtio16 len;
- __virtio16 flags;
- u8 data[];
-} __packed;
-
-/**
- * struct rpmsg_ns_msg - dynamic name service announcement message
- * @name: name of remote service that is published
- * @addr: address of remote service that is published
- * @flags: indicates whether service is created or destroyed
- *
- * This message is sent across to publish a new service, or announce
- * about its removal. When we receive these messages, an appropriate
- * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
- * or ->remove() handler of the appropriate rpmsg driver will be invoked
- * (if/as-soon-as one is registered).
- */
-struct rpmsg_ns_msg {
- char name[RPMSG_NAME_SIZE];
- __virtio32 addr;
- __virtio32 flags;
-} __packed;
-
-/**
- * enum rpmsg_ns_flags - dynamic name service announcement flags
- *
- * @RPMSG_NS_CREATE: a new remote service was just created
- * @RPMSG_NS_DESTROY: a known remote service was just destroyed
- */
-enum rpmsg_ns_flags {
- RPMSG_NS_CREATE = 0,
- RPMSG_NS_DESTROY = 1,
-};
-
/**
* @vrp: the remote processor this channel belongs to
*/
@@ -134,27 +84,6 @@ struct virtio_rpmsg_channel {
#define to_virtio_rpmsg_channel(_rpdev) \
container_of(_rpdev, struct virtio_rpmsg_channel, rpdev)
-/*
- * We're allocating buffers of 512 bytes each for communications. The
- * number of buffers will be computed from the number of buffers supported
- * by the vring, upto a maximum of 512 buffers (256 in each direction).
- *
- * Each buffer will have 16 bytes for the msg header and 496 bytes for
- * the payload.
- *
- * This will utilize a maximum total space of 256KB for the buffers.
- *
- * We might also want to add support for user-provided buffers in time.
- * This will allow bigger buffer size flexibility, and can also be used
- * to achieve zero-copy messaging.
- *
- * Note that these numbers are purely a decision of this driver - we
- * can change this without changing anything in the firmware of the remote
- * processor.
- */
-#define MAX_RPMSG_NUM_BUFS (512)
-#define MAX_RPMSG_BUF_SIZE (512)
-
/*
* Local addresses are dynamically allocated on-demand.
* We do not dynamically assign addresses from the low 1024 range,
@@ -162,9 +91,6 @@ struct virtio_rpmsg_channel {
*/
#define RPMSG_RESERVED_ADDRESSES (1024)
-/* Address 53 is reserved for advertising remote services */
-#define RPMSG_NS_ADDR (53)
-
static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
diff --git a/include/linux/virtio_rpmsg.h b/include/linux/virtio_rpmsg.h
new file mode 100644
index 000000000000..fcb523831e73
--- /dev/null
+++ b/include/linux/virtio_rpmsg.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_VIRTIO_RPMSG_H
+#define _LINUX_VIRTIO_RPMSG_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+
+/**
+ * struct rpmsg_hdr - common header for all rpmsg messages
+ * @src: source address
+ * @dst: destination address
+ * @reserved: reserved for future use
+ * @len: length of payload (in bytes)
+ * @flags: message flags
+ * @data: @len bytes of message payload data
+ *
+ * Every message sent(/received) on the rpmsg bus begins with this header.
+ */
+struct rpmsg_hdr {
+ __virtio32 src;
+ __virtio32 dst;
+ __virtio32 reserved;
+ __virtio16 len;
+ __virtio16 flags;
+ u8 data[];
+} __packed;
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ * @flags: indicates whether service is created or destroyed
+ *
+ * This message is sent across to publish a new service, or announce
+ * about its removal. When we receive these messages, an appropriate
+ * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
+ * or ->remove() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ __virtio32 addr;
+ __virtio32 flags;
+} __packed;
+
+/**
+ * enum rpmsg_ns_flags - dynamic name service announcement flags
+ *
+ * @RPMSG_NS_CREATE: a new remote service was just created
+ * @RPMSG_NS_DESTROY: a known remote service was just destroyed
+ */
+enum rpmsg_ns_flags {
+ RPMSG_NS_CREATE = 0,
+ RPMSG_NS_DESTROY = 1,
+};
+
+/*
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers will be computed from the number of buffers supported
+ * by the vring, upto a maximum of 512 buffers (256 in each direction).
+ *
+ * Each buffer will have 16 bytes for the msg header and 496 bytes for
+ * the payload.
+ *
+ * This will utilize a maximum total space of 256KB for the buffers.
+ *
+ * We might also want to add support for user-provided buffers in time.
+ * This will allow bigger buffer size flexibility, and can also be used
+ * to achieve zero-copy messaging.
+ *
+ * Note that these numbers are purely a decision of this driver - we
+ * can change this without changing anything in the firmware of the remote
+ * processor.
+ */
+#define MAX_RPMSG_NUM_BUFS 512
+#define MAX_RPMSG_BUF_SIZE 512
+
+/* Address 53 is reserved for advertising remote services */
+#define RPMSG_NS_ADDR 53
+
+#endif
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index e14c6dab4223..d669c04ef289 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -24,4 +24,7 @@ struct rpmsg_endpoint_info {
#define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info)
#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2)
+/* The feature bitmap for virtio rpmsg */
+#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
+
#endif
--
2.27.0
rpmsg_create_ept() takes struct rpmsg_channel_info chinfo as its last argument, not a u32 value. The first two arguments are also updated. Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski at linux.intel.com> Reviewed-by: Mathieu Poirier <mathieu.poirier at linaro.org> --- Documentation/rpmsg.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/rpmsg.txt b/Documentation/rpmsg.txt index 24b7a9e1a5f9..1ce353cb232a 100644 --- a/Documentation/rpmsg.txt +++ b/Documentation/rpmsg.txt @@ -192,9 +192,9 @@ Returns 0 on success and an appropriate error value on failure. :: - struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, - void (*cb)(struct rpmsg_channel *, void *, int, void *, u32), - void *priv, u32 addr); + struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, + rpmsg_rx_cb_t cb, void *priv, + struct rpmsg_channel_info chinfo); every rpmsg address in the system is bound to an rx callback (so when inbound messages arrive, they are dispatched by the rpmsg bus using the -- 2.27.0
Linux supports running the RPMsg protocol over the VirtIO transport
protocol, but currently there is only support for VirtIO clients and
no support for a VirtIO server. This patch adds a vhost-based RPMsg
server implementation.
Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski at
linux.intel.com>
---
drivers/vhost/Kconfig | 7 +
drivers/vhost/Makefile | 3 +
drivers/vhost/rpmsg.c | 375 ++++++++++++++++++++++++++++++++++++
drivers/vhost/vhost_rpmsg.h | 74 +++++++
4 files changed, 459 insertions(+)
create mode 100644 drivers/vhost/rpmsg.c
create mode 100644 drivers/vhost/vhost_rpmsg.h
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index d3688c6afb87..602421bf1d03 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -38,6 +38,13 @@ config VHOST_NET
To compile this driver as a module, choose M here: the module will
be called vhost_net.
+config VHOST_RPMSG
+ tristate
+ depends on VHOST
+ help
+ Vhost RPMsg API allows vhost drivers to communicate with VirtIO
+ drivers, using the RPMsg over VirtIO protocol.
+
config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index f3e1897cce85..9cf459d59f97 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -2,6 +2,9 @@
obj-$(CONFIG_VHOST_NET) += vhost_net.o
vhost_net-y := net.o
+obj-$(CONFIG_VHOST_RPMSG) += vhost_rpmsg.o
+vhost_rpmsg-y := rpmsg.o
+
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o
diff --git a/drivers/vhost/rpmsg.c b/drivers/vhost/rpmsg.c
new file mode 100644
index 000000000000..d7ab48414224
--- /dev/null
+++ b/drivers/vhost/rpmsg.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Guennadi Liakhovetski <guennadi.liakhovetski at
linux.intel.com>
+ *
+ * Vhost RPMsg VirtIO interface. It provides a set of functions to match the
+ * guest side RPMsg VirtIO API, provided by drivers/rpmsg/virtio_rpmsg_bus.c
+ * These functions handle creation of 2 virtual queues, handling of endpoint
+ * addresses, sending a name-space announcement to the guest as well as any
+ * user messages. This API can be used by any vhost driver to handle RPMsg
+ * specific processing.
+ * Specific vhost drivers, using this API will use their own VirtIO device
+ * IDs, that should then also be added to the ID table in virtio_rpmsg_bus.c
+ */
+
+#include <linux/compat.h>
+#include <linux/file.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vhost.h>
+#include <linux/virtio_rpmsg.h>
+#include <uapi/linux/rpmsg.h>
+
+#include "vhost.h"
+#include "vhost_rpmsg.h"
+
+/*
+ * All virtio-rpmsg virtual queue kicks always come with just one buffer -
+ * either input or output
+ */
+static int vhost_rpmsg_get_single(struct vhost_virtqueue *vq)
+{
+ struct vhost_rpmsg *vr = container_of(vq->dev, struct vhost_rpmsg, dev);
+ unsigned int out, in;
+ int head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out,
&in,
+ NULL, NULL);
+ if (head < 0) {
+ vq_err(vq, "%s(): error %d getting buffer\n",
+ __func__, head);
+ return head;
+ }
+
+ /* Nothing new? */
+ if (head == vq->num)
+ return head;
+
+ if (vq == &vr->vq[VIRTIO_RPMSG_RESPONSE] && (out || in != 1)) {
+ vq_err(vq,
+ "%s(): invalid %d input and %d output in response queue\n",
+ __func__, in, out);
+ goto return_buf;
+ }
+
+ if (vq == &vr->vq[VIRTIO_RPMSG_REQUEST] && (in || out != 1)) {
+ vq_err(vq,
+ "%s(): invalid %d input and %d output in request queue\n",
+ __func__, in, out);
+ goto return_buf;
+ }
+
+ return head;
+
+return_buf:
+ /*
+ * FIXME: might need to return the buffer using vhost_add_used()
+ * or vhost_discard_vq_desc(). vhost_discard_vq_desc() is
+ * described as "being useful for error handling," but it makes
+ * the thus discarded buffers "unseen," so next time we look we
+ * retrieve them again?
+ */
+ return -EINVAL;
+}
+
+static const struct vhost_rpmsg_ept *vhost_rpmsg_ept_find(struct vhost_rpmsg
*vr, int addr)
+{
+ unsigned int i;
+
+ for (i = 0; i < vr->n_epts; i++)
+ if (vr->ept[i].addr == addr)
+ return vr->ept + i;
+
+ return NULL;
+}
+
+/*
+ * if len < 0, then for reading a request, the complete virtual queue buffer
+ * size is prepared, for sending a response, the length in the iterator is used
+ */
+int vhost_rpmsg_start_lock(struct vhost_rpmsg *vr, struct vhost_rpmsg_iter
*iter,
+ unsigned int qid, ssize_t len)
+ __acquires(vq->mutex)
+{
+ struct vhost_virtqueue *vq = vr->vq + qid;
+ size_t tmp;
+
+ if (qid >= VIRTIO_RPMSG_NUM_OF_VQS)
+ return -EINVAL;
+
+ iter->vq = vq;
+
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(&vr->dev, vq);
+
+ iter->head = vhost_rpmsg_get_single(vq);
+ if (iter->head == vq->num)
+ iter->head = -EAGAIN;
+
+ if (iter->head < 0)
+ goto unlock;
+
+ tmp = vq->iov[0].iov_len;
+ if (tmp < sizeof(iter->rhdr)) {
+ vq_err(vq, "%s(): size %zu too small\n", __func__, tmp);
+ iter->head = -ENOBUFS;
+ goto return_buf;
+ }
+
+ switch (qid) {
+ case VIRTIO_RPMSG_REQUEST:
+ if (len < 0) {
+ len = tmp - sizeof(iter->rhdr);
+ } else if (tmp < sizeof(iter->rhdr) + len) {
+ iter->head = -ENOBUFS;
+ goto return_buf;
+ }
+
+ /* len is now the size of the payload */
+ iov_iter_init(&iter->iov_iter, WRITE,
+ vq->iov, 1, sizeof(iter->rhdr) + len);
+
+ /* Read the RPMSG header with endpoint addresses */
+ tmp = copy_from_iter(&iter->rhdr, sizeof(iter->rhdr),
&iter->iov_iter);
+ if (tmp != sizeof(iter->rhdr)) {
+ vq_err(vq, "%s(): got %zu instead of %zu\n", __func__,
+ tmp, sizeof(iter->rhdr));
+ iter->head = -EIO;
+ goto return_buf;
+ }
+
+ iter->ept = vhost_rpmsg_ept_find(vr, vhost32_to_cpu(vq,
iter->rhdr.dst));
+ if (!iter->ept) {
+ vq_err(vq, "%s(): no endpoint with address %d\n",
+ __func__, vhost32_to_cpu(vq, iter->rhdr.dst));
+ iter->head = -ENOENT;
+ goto return_buf;
+ }
+
+ /* Let the endpoint read the payload */
+ if (iter->ept->read) {
+ ssize_t ret = iter->ept->read(vr, iter);
+
+ if (ret < 0) {
+ iter->head = ret;
+ goto return_buf;
+ }
+
+ iter->rhdr.len = cpu_to_vhost16(vq, ret);
+ } else {
+ iter->rhdr.len = 0;
+ }
+
+ /* Prepare for the response phase */
+ iter->rhdr.dst = iter->rhdr.src;
+ iter->rhdr.src = cpu_to_vhost32(vq, iter->ept->addr);
+
+ break;
+ case VIRTIO_RPMSG_RESPONSE:
+ if (!iter->ept && iter->rhdr.dst != cpu_to_vhost32(vq,
RPMSG_NS_ADDR)) {
+ /*
+ * Usually the iterator is configured when processing a
+ * message on the request queue, but it's also possible
+ * to send a message on the response queue without a
+ * preceding request, in that case the iterator must
+ * contain source and destination addresses.
+ */
+ iter->ept = vhost_rpmsg_ept_find(vr, vhost32_to_cpu(vq,
iter->rhdr.src));
+ if (!iter->ept) {
+ iter->head = -ENOENT;
+ goto return_buf;
+ }
+ }
+
+ if (len < 0) {
+ len = tmp - sizeof(iter->rhdr);
+ } else if (tmp < sizeof(iter->rhdr) + len) {
+ iter->head = -ENOBUFS;
+ goto return_buf;
+ } else {
+ iter->rhdr.len = cpu_to_vhost16(vq, len);
+ }
+
+ /* len is now the size of the payload */
+ iov_iter_init(&iter->iov_iter, READ, vq->iov, 1,
sizeof(iter->rhdr) + len);
+
+ /* Write the RPMSG header with endpoint addresses */
+ tmp = copy_to_iter(&iter->rhdr, sizeof(iter->rhdr),
&iter->iov_iter);
+ if (tmp != sizeof(iter->rhdr)) {
+ iter->head = -EIO;
+ goto return_buf;
+ }
+
+ /* Let the endpoint write the payload */
+ if (iter->ept && iter->ept->write) {
+ ssize_t ret = iter->ept->write(vr, iter);
+
+ if (ret < 0) {
+ iter->head = ret;
+ goto return_buf;
+ }
+ }
+
+ break;
+ }
+
+ return 0;
+
+return_buf:
+ /*
+ * FIXME: vhost_discard_vq_desc() or vhost_add_used(), see comment in
+ * vhost_rpmsg_get_single()
+ */
+unlock:
+ vhost_enable_notify(&vr->dev, vq);
+ mutex_unlock(&vq->mutex);
+
+ return iter->head;
+}
+EXPORT_SYMBOL_GPL(vhost_rpmsg_start_lock);
+
+size_t vhost_rpmsg_copy(struct vhost_rpmsg *vr, struct vhost_rpmsg_iter *iter,
+ void *data, size_t size)
+{
+ /*
+ * We could check for excess data, but copy_{to,from}_iter() don't do
+ * that either
+ */
+ if (iter->vq == vr->vq + VIRTIO_RPMSG_RESPONSE)
+ return copy_to_iter(data, size, &iter->iov_iter);
+
+ return copy_from_iter(data, size, &iter->iov_iter);
+}
+EXPORT_SYMBOL_GPL(vhost_rpmsg_copy);
+
+int vhost_rpmsg_finish_unlock(struct vhost_rpmsg *vr,
+ struct vhost_rpmsg_iter *iter)
+ __releases(vq->mutex)
+{
+ if (iter->head >= 0)
+ vhost_add_used_and_signal(iter->vq->dev, iter->vq, iter->head,
+ vhost16_to_cpu(iter->vq, iter->rhdr.len) +
+ sizeof(iter->rhdr));
+
+ vhost_enable_notify(&vr->dev, iter->vq);
+ mutex_unlock(&iter->vq->mutex);
+
+ return iter->head;
+}
+EXPORT_SYMBOL_GPL(vhost_rpmsg_finish_unlock);
+
+/*
+ * Return false to terminate the external loop only if we fail to obtain either
+ * a request or a response buffer
+ */
+static bool handle_rpmsg_req_single(struct vhost_rpmsg *vr,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_rpmsg_iter iter;
+ int ret = vhost_rpmsg_start_lock(vr, &iter, VIRTIO_RPMSG_REQUEST,
-EINVAL);
+ if (!ret)
+ ret = vhost_rpmsg_finish_unlock(vr, &iter);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "%s(): RPMSG processing failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!iter.ept->write)
+ return true;
+
+ ret = vhost_rpmsg_start_lock(vr, &iter, VIRTIO_RPMSG_RESPONSE, -EINVAL);
+ if (!ret)
+ ret = vhost_rpmsg_finish_unlock(vr, &iter);
+ if (ret < 0) {
+ vq_err(vq, "%s(): RPMSG finalising failed %d\n", __func__, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static void handle_rpmsg_req_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_rpmsg *vr = container_of(vq->dev, struct vhost_rpmsg, dev);
+
+ while (handle_rpmsg_req_single(vr, vq))
+ ;
+}
+
+/*
+ * initialise two virtqueues with an array of endpoints,
+ * request and response callbacks
+ */
+void vhost_rpmsg_init(struct vhost_rpmsg *vr, const struct vhost_rpmsg_ept
*ept,
+ unsigned int n_epts)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vr->vq); i++)
+ vr->vq_p[i] = &vr->vq[i];
+
+ /* vq[0]: host -> guest, vq[1]: host <- guest */
+ vr->vq[VIRTIO_RPMSG_REQUEST].handle_kick = handle_rpmsg_req_kick;
+ vr->vq[VIRTIO_RPMSG_RESPONSE].handle_kick = NULL;
+
+ vr->ept = ept;
+ vr->n_epts = n_epts;
+
+ vhost_dev_init(&vr->dev, vr->vq_p, VIRTIO_RPMSG_NUM_OF_VQS,
+ UIO_MAXIOV, 0, 0, true, NULL);
+}
+EXPORT_SYMBOL_GPL(vhost_rpmsg_init);
+
+void vhost_rpmsg_destroy(struct vhost_rpmsg *vr)
+{
+ if (vhost_dev_has_owner(&vr->dev))
+ vhost_poll_flush(&vr->vq[VIRTIO_RPMSG_REQUEST].poll);
+
+ vhost_dev_cleanup(&vr->dev);
+}
+EXPORT_SYMBOL_GPL(vhost_rpmsg_destroy);
+
+/* send namespace */
+int vhost_rpmsg_ns_announce(struct vhost_rpmsg *vr, const char *name, unsigned
int src)
+{
+ struct vhost_virtqueue *vq = &vr->vq[VIRTIO_RPMSG_RESPONSE];
+ struct vhost_rpmsg_iter iter = {
+ .rhdr = {
+ .src = 0,
+ .dst = cpu_to_vhost32(vq, RPMSG_NS_ADDR),
+ .flags = cpu_to_vhost16(vq, RPMSG_NS_CREATE), /* rpmsg_recv_single() */
+ },
+ };
+ struct rpmsg_ns_msg ns = {
+ .addr = cpu_to_vhost32(vq, src),
+ .flags = cpu_to_vhost32(vq, RPMSG_NS_CREATE), /* for rpmsg_ns_cb() */
+ };
+ int ret = vhost_rpmsg_start_lock(vr, &iter, VIRTIO_RPMSG_RESPONSE,
sizeof(ns));
+
+ if (ret < 0)
+ return ret;
+
+ strlcpy(ns.name, name, sizeof(ns.name));
+
+ ret = vhost_rpmsg_copy(vr, &iter, &ns, sizeof(ns));
+ if (ret != sizeof(ns))
+ vq_err(iter.vq, "%s(): added %d instead of %zu bytes\n",
+ __func__, ret, sizeof(ns));
+
+ ret = vhost_rpmsg_finish_unlock(vr, &iter);
+ if (ret < 0)
+ vq_err(iter.vq, "%s(): namespace announcement failed: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_rpmsg_ns_announce);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel, Inc.");
+MODULE_DESCRIPTION("Vhost RPMsg API");
diff --git a/drivers/vhost/vhost_rpmsg.h b/drivers/vhost/vhost_rpmsg.h
new file mode 100644
index 000000000000..30072cecb8a0
--- /dev/null
+++ b/drivers/vhost/vhost_rpmsg.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Guennadi Liakhovetski <guennadi.liakhovetski at
linux.intel.com>
+ */
+
+#ifndef VHOST_RPMSG_H
+#define VHOST_RPMSG_H
+
+#include <linux/uio.h>
+#include <linux/virtio_rpmsg.h>
+
+#include "vhost.h"
+
+/* RPMsg uses two VirtQueues: one for each direction */
+enum {
+ VIRTIO_RPMSG_RESPONSE, /* RPMsg response (host->guest) buffers */
+ VIRTIO_RPMSG_REQUEST, /* RPMsg request (guest->host) buffers */
+ /* Keep last */
+ VIRTIO_RPMSG_NUM_OF_VQS,
+};
+
+struct vhost_rpmsg_ept;
+
+struct vhost_rpmsg_iter {
+ struct iov_iter iov_iter;
+ struct rpmsg_hdr rhdr;
+ struct vhost_virtqueue *vq;
+ const struct vhost_rpmsg_ept *ept;
+ int head;
+ void *priv;
+};
+
+struct vhost_rpmsg {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vq[VIRTIO_RPMSG_NUM_OF_VQS];
+ struct vhost_virtqueue *vq_p[VIRTIO_RPMSG_NUM_OF_VQS];
+ const struct vhost_rpmsg_ept *ept;
+ unsigned int n_epts;
+};
+
+struct vhost_rpmsg_ept {
+ ssize_t (*read)(struct vhost_rpmsg *, struct vhost_rpmsg_iter *);
+ ssize_t (*write)(struct vhost_rpmsg *, struct vhost_rpmsg_iter *);
+ int addr;
+};
+
+static inline size_t vhost_rpmsg_iter_len(const struct vhost_rpmsg_iter *iter)
+{
+ return iter->rhdr.len;
+}
+
+#define VHOST_RPMSG_ITER(_vq, _src, _dst) { \
+ .rhdr = { \
+ .src = cpu_to_vhost32(_vq, _src), \
+ .dst = cpu_to_vhost32(_vq, _dst), \
+ }, \
+ }
+
+void vhost_rpmsg_init(struct vhost_rpmsg *vr, const struct vhost_rpmsg_ept
*ept,
+ unsigned int n_epts);
+void vhost_rpmsg_destroy(struct vhost_rpmsg *vr);
+int vhost_rpmsg_ns_announce(struct vhost_rpmsg *vr, const char *name,
+ unsigned int src);
+int vhost_rpmsg_start_lock(struct vhost_rpmsg *vr,
+ struct vhost_rpmsg_iter *iter,
+ unsigned int qid, ssize_t len);
+size_t vhost_rpmsg_copy(struct vhost_rpmsg *vr, struct vhost_rpmsg_iter *iter,
+ void *data, size_t size);
+int vhost_rpmsg_finish_unlock(struct vhost_rpmsg *vr,
+ struct vhost_rpmsg_iter *iter);
+
+#endif
--
2.27.0
Stefano Garzarella
2020-Jul-23 08:34 UTC
[PATCH v4 1/4] vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl
On Wed, Jul 22, 2020 at 05:09:24PM +0200, Guennadi Liakhovetski wrote:> VHOST_VSOCK_SET_RUNNING is used by the vhost vsock driver to perform > crucial VirtQueue initialisation, like assigning .private fields and > calling vhost_vq_init_access(), and clean up. However, this ioctl is > actually extremely useful for any vhost driver, that doesn't have a > side channel to inform it of a status change, e.g. upon a guest > reboot. This patch makes that ioctl generic, while preserving its > numeric value and also keeping the original alias. > > Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski at linux.intel.com> > --- > include/uapi/linux/vhost.h | 4 +++- > 1 file changed, 3 insertions(+), 1 deletion(-) > > diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h > index 0c2349612e77..5d9254e2a6b6 100644 > --- a/include/uapi/linux/vhost.h > +++ b/include/uapi/linux/vhost.h > @@ -95,6 +95,8 @@ > #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) > #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) > > +#define VHOST_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int) > + > /* VHOST_NET specific defines */ > > /* Attach virtio net ring to a raw socket, or tap device. > @@ -116,7 +118,7 @@ > /* VHOST_VSOCK specific defines */ > > #define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64) > -#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int) > +#define VHOST_VSOCK_SET_RUNNING VHOST_SET_RUNNING > > /* VHOST_VDPA specific defines */ > > -- > 2.27.0 >Reviewed-by: Stefano Garzarella <sgarzare at redhat.com>
On Wed, Jul 22, 2020 at 05:09:23PM +0200, Guennadi Liakhovetski wrote:> Hi, > > Now that virtio-rpmsg endianness fixes have been merged we can > proceed with the next step.Which tree is this for?> v4: > - add endianness conversions to comply with the VirtIO standard > > v3: > - address several checkpatch warnings > - address comments from Mathieu Poirier > > v2: > - update patch #5 with a correct vhost_dev_init() prototype > - drop patch #6 - it depends on a different patch, that is currently > an RFC > - address comments from Pierre-Louis Bossart: > * remove "default n" from Kconfig > > Linux supports RPMsg over VirtIO for "remote processor" / AMP use > cases. It can however also be used for virtualisation scenarios, > e.g. when using KVM to run Linux on both the host and the guests. > This patch set adds a wrapper API to facilitate writing vhost > drivers for such RPMsg-based solutions. The first use case is an > audio DSP virtualisation project, currently under development, ready > for review and submission, available at > https://github.com/thesofproject/linux/pull/1501/commits > > Thanks > Guennadi > > Guennadi Liakhovetski (4): > vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl > rpmsg: move common structures and defines to headers > rpmsg: update documentation > vhost: add an RPMsg API > > Documentation/rpmsg.txt | 6 +- > drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ > drivers/vhost/Kconfig | 7 + > drivers/vhost/Makefile | 3 + > drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ > drivers/vhost/vhost_rpmsg.h | 74 ++++++ > include/linux/virtio_rpmsg.h | 83 +++++++ > include/uapi/linux/rpmsg.h | 3 + > include/uapi/linux/vhost.h | 4 +- > 9 files changed, 553 insertions(+), 80 deletions(-) > create mode 100644 drivers/vhost/rpmsg.c > create mode 100644 drivers/vhost/vhost_rpmsg.h > create mode 100644 include/linux/virtio_rpmsg.h > > -- > 2.27.0
Hi Michael, On Thu, Jul 30, 2020 at 12:08:29PM -0400, Michael S. Tsirkin wrote:> On Wed, Jul 22, 2020 at 05:09:23PM +0200, Guennadi Liakhovetski wrote: > > Hi, > > > > Now that virtio-rpmsg endianness fixes have been merged we can > > proceed with the next step. > > Which tree is this for?The essential part of this series is for drivers/vhost, so, I presume that should be the target tree as well. There is however a small part for the drivers/rpmsg, should I split this series in two or shall we first review is as a whole to make its goals clearer? Thanks Guennadi> > v4: > > - add endianness conversions to comply with the VirtIO standard > > > > v3: > > - address several checkpatch warnings > > - address comments from Mathieu Poirier > > > > v2: > > - update patch #5 with a correct vhost_dev_init() prototype > > - drop patch #6 - it depends on a different patch, that is currently > > an RFC > > - address comments from Pierre-Louis Bossart: > > * remove "default n" from Kconfig > > > > Linux supports RPMsg over VirtIO for "remote processor" / AMP use > > cases. It can however also be used for virtualisation scenarios, > > e.g. when using KVM to run Linux on both the host and the guests. > > This patch set adds a wrapper API to facilitate writing vhost > > drivers for such RPMsg-based solutions. The first use case is an > > audio DSP virtualisation project, currently under development, ready > > for review and submission, available at > > https://github.com/thesofproject/linux/pull/1501/commits > > > > Thanks > > Guennadi > > > > Guennadi Liakhovetski (4): > > vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl > > rpmsg: move common structures and defines to headers > > rpmsg: update documentation > > vhost: add an RPMsg API > > > > Documentation/rpmsg.txt | 6 +- > > drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ > > drivers/vhost/Kconfig | 7 + > > drivers/vhost/Makefile | 3 + > > drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ > > drivers/vhost/vhost_rpmsg.h | 74 ++++++ > > include/linux/virtio_rpmsg.h | 83 +++++++ > > include/uapi/linux/rpmsg.h | 3 + > > include/uapi/linux/vhost.h | 4 +- > > 9 files changed, 553 insertions(+), 80 deletions(-) > > create mode 100644 drivers/vhost/rpmsg.c > > create mode 100644 drivers/vhost/vhost_rpmsg.h > > create mode 100644 include/linux/virtio_rpmsg.h > > > > -- > > 2.27.0 >
On Mon, Aug 03, 2020 at 07:25:24AM -0600, Mathieu Poirier wrote:> On Thu, 30 Jul 2020 at 23:47, Guennadi Liakhovetski > <guennadi.liakhovetski at linux.intel.com> wrote: > > > > Hi Michael, > > > > On Thu, Jul 30, 2020 at 12:08:29PM -0400, Michael S. Tsirkin wrote: > > > On Wed, Jul 22, 2020 at 05:09:23PM +0200, Guennadi Liakhovetski wrote: > > > > Hi, > > > > > > > > Now that virtio-rpmsg endianness fixes have been merged we can > > > > proceed with the next step. > > > > > > Which tree is this for? > > > > The essential part of this series is for drivers/vhost, so, I presume > > that should be the target tree as well. There is however a small part > > for the drivers/rpmsg, should I split this series in two or shall we > > first review is as a whole to make its goals clearer? > > I suggest to keep it whole for now.Ok can I get some acks please? Also, I put this in my linux-next branch on https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git there were some conflicts - could you pls test and report it's ok?> > > > Thanks > > Guennadi > > > > > > v4: > > > > - add endianness conversions to comply with the VirtIO standard > > > > > > > > v3: > > > > - address several checkpatch warnings > > > > - address comments from Mathieu Poirier > > > > > > > > v2: > > > > - update patch #5 with a correct vhost_dev_init() prototype > > > > - drop patch #6 - it depends on a different patch, that is currently > > > > an RFC > > > > - address comments from Pierre-Louis Bossart: > > > > * remove "default n" from Kconfig > > > > > > > > Linux supports RPMsg over VirtIO for "remote processor" / AMP use > > > > cases. It can however also be used for virtualisation scenarios, > > > > e.g. when using KVM to run Linux on both the host and the guests. > > > > This patch set adds a wrapper API to facilitate writing vhost > > > > drivers for such RPMsg-based solutions. The first use case is an > > > > audio DSP virtualisation project, currently under development, ready > > > > for review and submission, available at > > > > https://github.com/thesofproject/linux/pull/1501/commits > > > > > > > > Thanks > > > > Guennadi > > > > > > > > Guennadi Liakhovetski (4): > > > > vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl > > > > rpmsg: move common structures and defines to headers > > > > rpmsg: update documentation > > > > vhost: add an RPMsg API > > > > > > > > Documentation/rpmsg.txt | 6 +- > > > > drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ > > > > drivers/vhost/Kconfig | 7 + > > > > drivers/vhost/Makefile | 3 + > > > > drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ > > > > drivers/vhost/vhost_rpmsg.h | 74 ++++++ > > > > include/linux/virtio_rpmsg.h | 83 +++++++ > > > > include/uapi/linux/rpmsg.h | 3 + > > > > include/uapi/linux/vhost.h | 4 +- > > > > 9 files changed, 553 insertions(+), 80 deletions(-) > > > > create mode 100644 drivers/vhost/rpmsg.c > > > > create mode 100644 drivers/vhost/vhost_rpmsg.h > > > > create mode 100644 include/linux/virtio_rpmsg.h > > > > > > > > -- > > > > 2.27.0 > > >
On Wed, Jul 22, 2020 at 05:09:23PM +0200, Guennadi Liakhovetski wrote:> Hi, > > Now that virtio-rpmsg endianness fixes have been merged we can > proceed with the next step.OK my attempts to resolve conflicts just created a mess. I dropped these for now, could you pls rebase on top of linux-next branch in my tree, and repost? Thanks!> v4: > - add endianness conversions to comply with the VirtIO standard > > v3: > - address several checkpatch warnings > - address comments from Mathieu Poirier > > v2: > - update patch #5 with a correct vhost_dev_init() prototype > - drop patch #6 - it depends on a different patch, that is currently > an RFC > - address comments from Pierre-Louis Bossart: > * remove "default n" from Kconfig > > Linux supports RPMsg over VirtIO for "remote processor" / AMP use > cases. It can however also be used for virtualisation scenarios, > e.g. when using KVM to run Linux on both the host and the guests. > This patch set adds a wrapper API to facilitate writing vhost > drivers for such RPMsg-based solutions. The first use case is an > audio DSP virtualisation project, currently under development, ready > for review and submission, available at > https://github.com/thesofproject/linux/pull/1501/commits > > Thanks > Guennadi > > Guennadi Liakhovetski (4): > vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl > rpmsg: move common structures and defines to headers > rpmsg: update documentation > vhost: add an RPMsg API > > Documentation/rpmsg.txt | 6 +- > drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ > drivers/vhost/Kconfig | 7 + > drivers/vhost/Makefile | 3 + > drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ > drivers/vhost/vhost_rpmsg.h | 74 ++++++ > include/linux/virtio_rpmsg.h | 83 +++++++ > include/uapi/linux/rpmsg.h | 3 + > include/uapi/linux/vhost.h | 4 +- > 9 files changed, 553 insertions(+), 80 deletions(-) > create mode 100644 drivers/vhost/rpmsg.c > create mode 100644 drivers/vhost/vhost_rpmsg.h > create mode 100644 include/linux/virtio_rpmsg.h > > -- > 2.27.0
Hi Michael, On Tue, Aug 04, 2020 at 08:26:53AM -0400, Michael S. Tsirkin wrote:> On Wed, Jul 22, 2020 at 05:09:23PM +0200, Guennadi Liakhovetski wrote: > > Hi, > > > > Now that virtio-rpmsg endianness fixes have been merged we can > > proceed with the next step. > > OK my attempts to resolve conflicts just created a mess.You just need to apply my previous patch for virtio-rpmsg first https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/drivers/rpmsg/virtio_rpmsg_bus.c?id=111d1089700cdb752681ef44f54ab6137736f5c2 Then this series should apply cleanly. Thanks Guennadi> I dropped these for now, could you pls rebase on top > of linux-next branch in my tree, and repost? > Thanks! > > > > v4: > > - add endianness conversions to comply with the VirtIO standard > > > > v3: > > - address several checkpatch warnings > > - address comments from Mathieu Poirier > > > > v2: > > - update patch #5 with a correct vhost_dev_init() prototype > > - drop patch #6 - it depends on a different patch, that is currently > > an RFC > > - address comments from Pierre-Louis Bossart: > > * remove "default n" from Kconfig > > > > Linux supports RPMsg over VirtIO for "remote processor" / AMP use > > cases. It can however also be used for virtualisation scenarios, > > e.g. when using KVM to run Linux on both the host and the guests. > > This patch set adds a wrapper API to facilitate writing vhost > > drivers for such RPMsg-based solutions. The first use case is an > > audio DSP virtualisation project, currently under development, ready > > for review and submission, available at > > https://github.com/thesofproject/linux/pull/1501/commits > > > > Thanks > > Guennadi > > > > Guennadi Liakhovetski (4): > > vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl > > rpmsg: move common structures and defines to headers > > rpmsg: update documentation > > vhost: add an RPMsg API > > > > Documentation/rpmsg.txt | 6 +- > > drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ > > drivers/vhost/Kconfig | 7 + > > drivers/vhost/Makefile | 3 + > > drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ > > drivers/vhost/vhost_rpmsg.h | 74 ++++++ > > include/linux/virtio_rpmsg.h | 83 +++++++ > > include/uapi/linux/rpmsg.h | 3 + > > include/uapi/linux/vhost.h | 4 +- > > 9 files changed, 553 insertions(+), 80 deletions(-) > > create mode 100644 drivers/vhost/rpmsg.c > > create mode 100644 drivers/vhost/vhost_rpmsg.h > > create mode 100644 include/linux/virtio_rpmsg.h > > > > -- > > 2.27.0 >
On Tue, Aug 04, 2020 at 07:37:49AM -0600, Mathieu Poirier wrote:> On Mon, 3 Aug 2020 at 14:47, Michael S. Tsirkin <mst at redhat.com> wrote: > > > > On Mon, Aug 03, 2020 at 07:25:24AM -0600, Mathieu Poirier wrote: > > > On Thu, 30 Jul 2020 at 23:47, Guennadi Liakhovetski > > > <guennadi.liakhovetski at linux.intel.com> wrote: > > > > > > > > Hi Michael, > > > > > > > > On Thu, Jul 30, 2020 at 12:08:29PM -0400, Michael S. Tsirkin wrote: > > > > > On Wed, Jul 22, 2020 at 05:09:23PM +0200, Guennadi Liakhovetski wrote: > > > > > > Hi, > > > > > > > > > > > > Now that virtio-rpmsg endianness fixes have been merged we can > > > > > > proceed with the next step. > > > > > > > > > > Which tree is this for? > > > > > > > > The essential part of this series is for drivers/vhost, so, I presume > > > > that should be the target tree as well. There is however a small part > > > > for the drivers/rpmsg, should I split this series in two or shall we > > > > first review is as a whole to make its goals clearer? > > > > > > I suggest to keep it whole for now. > > > > > > Ok can I get some acks please? > > Yes, as soon as I have the opportunity to review the work. There is a > lot of volume on the linux-remoteproc mailing list lately and > patchsets are reviewed in the order they have been received.Well the merge window is open, I guess I'll merge this and any issues can be addressed later then?> > Also, I put this in my linux-next branch on > > > > https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git > > > > there were some conflicts - could you pls test and report it's ok? > > > > > > > > > > Thanks > > > > Guennadi > > > > > > > > > > v4: > > > > > > - add endianness conversions to comply with the VirtIO standard > > > > > > > > > > > > v3: > > > > > > - address several checkpatch warnings > > > > > > - address comments from Mathieu Poirier > > > > > > > > > > > > v2: > > > > > > - update patch #5 with a correct vhost_dev_init() prototype > > > > > > - drop patch #6 - it depends on a different patch, that is currently > > > > > > an RFC > > > > > > - address comments from Pierre-Louis Bossart: > > > > > > * remove "default n" from Kconfig > > > > > > > > > > > > Linux supports RPMsg over VirtIO for "remote processor" / AMP use > > > > > > cases. It can however also be used for virtualisation scenarios, > > > > > > e.g. when using KVM to run Linux on both the host and the guests. > > > > > > This patch set adds a wrapper API to facilitate writing vhost > > > > > > drivers for such RPMsg-based solutions. The first use case is an > > > > > > audio DSP virtualisation project, currently under development, ready > > > > > > for review and submission, available at > > > > > > https://github.com/thesofproject/linux/pull/1501/commits > > > > > > > > > > > > Thanks > > > > > > Guennadi > > > > > > > > > > > > Guennadi Liakhovetski (4): > > > > > > vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl > > > > > > rpmsg: move common structures and defines to headers > > > > > > rpmsg: update documentation > > > > > > vhost: add an RPMsg API > > > > > > > > > > > > Documentation/rpmsg.txt | 6 +- > > > > > > drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ > > > > > > drivers/vhost/Kconfig | 7 + > > > > > > drivers/vhost/Makefile | 3 + > > > > > > drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ > > > > > > drivers/vhost/vhost_rpmsg.h | 74 ++++++ > > > > > > include/linux/virtio_rpmsg.h | 83 +++++++ > > > > > > include/uapi/linux/rpmsg.h | 3 + > > > > > > include/uapi/linux/vhost.h | 4 +- > > > > > > 9 files changed, 553 insertions(+), 80 deletions(-) > > > > > > create mode 100644 drivers/vhost/rpmsg.c > > > > > > create mode 100644 drivers/vhost/vhost_rpmsg.h > > > > > > create mode 100644 include/linux/virtio_rpmsg.h > > > > > > > > > > > > -- > > > > > > 2.27.0 > > > > > > >
On Wed, Jul 22, 2020 at 05:09:27PM +0200, Guennadi Liakhovetski wrote:> Linux supports running the RPMsg protocol over the VirtIO transport > protocol, but currently there is only support for VirtIO clients and > no support for a VirtIO server. This patch adds a vhost-based RPMsg > server implementation. > > Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski at linux.intel.com> > --- > drivers/vhost/Kconfig | 7 + > drivers/vhost/Makefile | 3 + > drivers/vhost/rpmsg.c | 375 ++++++++++++++++++++++++++++++++++++ > drivers/vhost/vhost_rpmsg.h | 74 +++++++ > 4 files changed, 459 insertions(+) > create mode 100644 drivers/vhost/rpmsg.c > create mode 100644 drivers/vhost/vhost_rpmsg.h > > diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig > index d3688c6afb87..602421bf1d03 100644 > --- a/drivers/vhost/Kconfig > +++ b/drivers/vhost/Kconfig > @@ -38,6 +38,13 @@ config VHOST_NET > To compile this driver as a module, choose M here: the module will > be called vhost_net. > > +config VHOST_RPMSG > + tristateSo this lacks a description line so it does not appear in menuconfig. How is user supposed to set it? I added a one-line description.> + depends on VHOSTOther drivers select VHOST instead. Any reason not to do it like this here?> + help > + Vhost RPMsg API allows vhost drivers to communicate with VirtIO > + drivers, using the RPMsg over VirtIO protocol. > +> config VHOST_SCSI > tristate "VHOST_SCSI TCM fabric driver" > depends on TARGET_CORE && EVENTFD > diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile > index f3e1897cce85..9cf459d59f97 100644 > --- a/drivers/vhost/Makefile > +++ b/drivers/vhost/Makefile > @@ -2,6 +2,9 @@ > obj-$(CONFIG_VHOST_NET) += vhost_net.o > vhost_net-y := net.o > > +obj-$(CONFIG_VHOST_RPMSG) += vhost_rpmsg.o > +vhost_rpmsg-y := rpmsg.o > + > obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o > vhost_scsi-y := scsi.o > > diff --git a/drivers/vhost/rpmsg.c b/drivers/vhost/rpmsg.c > new file mode 100644 > index 000000000000..d7ab48414224 > --- /dev/null > +++ b/drivers/vhost/rpmsg.c > @@ -0,0 +1,375 @@ > +// SPDX-License-Identifier: GPL-2.0-only > +/* > + * Copyright(c) 2020 Intel Corporation. All rights reserved. > + * > + * Author: Guennadi Liakhovetski <guennadi.liakhovetski at linux.intel.com> > + * > + * Vhost RPMsg VirtIO interface. It provides a set of functions to match the > + * guest side RPMsg VirtIO API, provided by drivers/rpmsg/virtio_rpmsg_bus.c > + * These functions handle creation of 2 virtual queues, handling of endpoint > + * addresses, sending a name-space announcement to the guest as well as any > + * user messages. This API can be used by any vhost driver to handle RPMsg > + * specific processing. > + * Specific vhost drivers, using this API will use their own VirtIO device > + * IDs, that should then also be added to the ID table in virtio_rpmsg_bus.c > + */ > + > +#include <linux/compat.h> > +#include <linux/file.h> > +#include <linux/miscdevice.h> > +#include <linux/module.h> > +#include <linux/mutex.h> > +#include <linux/vhost.h> > +#include <linux/virtio_rpmsg.h> > +#include <uapi/linux/rpmsg.h> > + > +#include "vhost.h" > +#include "vhost_rpmsg.h" > + > +/* > + * All virtio-rpmsg virtual queue kicks always come with just one buffer - > + * either input or output > + */ > +static int vhost_rpmsg_get_single(struct vhost_virtqueue *vq) > +{ > + struct vhost_rpmsg *vr = container_of(vq->dev, struct vhost_rpmsg, dev); > + unsigned int out, in; > + int head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, > + NULL, NULL); > + if (head < 0) { > + vq_err(vq, "%s(): error %d getting buffer\n", > + __func__, head); > + return head; > + } > + > + /* Nothing new? */ > + if (head == vq->num) > + return head; > + > + if (vq == &vr->vq[VIRTIO_RPMSG_RESPONSE] && (out || in != 1)) {This in != 1 looks like a dependency on a specific message layout. virtio spec says to avoid these. Using iov iters it's not too hard to do ...> + vq_err(vq, > + "%s(): invalid %d input and %d output in response queue\n", > + __func__, in, out); > + goto return_buf; > + } > + > + if (vq == &vr->vq[VIRTIO_RPMSG_REQUEST] && (in || out != 1)) { > + vq_err(vq, > + "%s(): invalid %d input and %d output in request queue\n", > + __func__, in, out); > + goto return_buf; > + } > + > + return head; > + > +return_buf: > + /* > + * FIXME: might need to return the buffer using vhost_add_used() > + * or vhost_discard_vq_desc(). vhost_discard_vq_desc() is > + * described as "being useful for error handling," but it makes > + * the thus discarded buffers "unseen," so next time we look we > + * retrieve them again?Yes. It's your decision what to do on error. if you also signal an eventfd using vq_err, then discarding will make it so userspace can poke at ring and hopefully fix it ...> + */ > + return -EINVAL; > +} > + > +static const struct vhost_rpmsg_ept *vhost_rpmsg_ept_find(struct vhost_rpmsg *vr, int addr) > +{ > + unsigned int i; > + > + for (i = 0; i < vr->n_epts; i++) > + if (vr->ept[i].addr == addr) > + return vr->ept + i; > + > + return NULL; > +} > + > +/* > + * if len < 0, then for reading a request, the complete virtual queue buffer > + * size is prepared, for sending a response, the length in the iterator is used > + */ > +int vhost_rpmsg_start_lock(struct vhost_rpmsg *vr, struct vhost_rpmsg_iter *iter, > + unsigned int qid, ssize_t len) > + __acquires(vq->mutex) > +{ > + struct vhost_virtqueue *vq = vr->vq + qid; > + size_t tmp; > + > + if (qid >= VIRTIO_RPMSG_NUM_OF_VQS) > + return -EINVAL; > + > + iter->vq = vq; > + > + mutex_lock(&vq->mutex); > + vhost_disable_notify(&vr->dev, vq); > + > + iter->head = vhost_rpmsg_get_single(vq); > + if (iter->head == vq->num) > + iter->head = -EAGAIN; > + > + if (iter->head < 0) > + goto unlock; > + > + tmp = vq->iov[0].iov_len; > + if (tmp < sizeof(iter->rhdr)) { > + vq_err(vq, "%s(): size %zu too small\n", __func__, tmp); > + iter->head = -ENOBUFS; > + goto return_buf; > + } > + > + switch (qid) { > + case VIRTIO_RPMSG_REQUEST: > + if (len < 0) { > + len = tmp - sizeof(iter->rhdr); > + } else if (tmp < sizeof(iter->rhdr) + len) { > + iter->head = -ENOBUFS; > + goto return_buf; > + } > + > + /* len is now the size of the payload */ > + iov_iter_init(&iter->iov_iter, WRITE, > + vq->iov, 1, sizeof(iter->rhdr) + len); > + > + /* Read the RPMSG header with endpoint addresses */ > + tmp = copy_from_iter(&iter->rhdr, sizeof(iter->rhdr), &iter->iov_iter); > + if (tmp != sizeof(iter->rhdr)) { > + vq_err(vq, "%s(): got %zu instead of %zu\n", __func__, > + tmp, sizeof(iter->rhdr)); > + iter->head = -EIO; > + goto return_buf; > + } > + > + iter->ept = vhost_rpmsg_ept_find(vr, vhost32_to_cpu(vq, iter->rhdr.dst)); > + if (!iter->ept) { > + vq_err(vq, "%s(): no endpoint with address %d\n", > + __func__, vhost32_to_cpu(vq, iter->rhdr.dst)); > + iter->head = -ENOENT; > + goto return_buf; > + } > + > + /* Let the endpoint read the payload */ > + if (iter->ept->read) { > + ssize_t ret = iter->ept->read(vr, iter); > + > + if (ret < 0) { > + iter->head = ret; > + goto return_buf; > + } > + > + iter->rhdr.len = cpu_to_vhost16(vq, ret); > + } else { > + iter->rhdr.len = 0; > + } > + > + /* Prepare for the response phase */ > + iter->rhdr.dst = iter->rhdr.src; > + iter->rhdr.src = cpu_to_vhost32(vq, iter->ept->addr); > + > + break; > + case VIRTIO_RPMSG_RESPONSE: > + if (!iter->ept && iter->rhdr.dst != cpu_to_vhost32(vq, RPMSG_NS_ADDR)) { > + /* > + * Usually the iterator is configured when processing a > + * message on the request queue, but it's also possible > + * to send a message on the response queue without a > + * preceding request, in that case the iterator must > + * contain source and destination addresses. > + */ > + iter->ept = vhost_rpmsg_ept_find(vr, vhost32_to_cpu(vq, iter->rhdr.src)); > + if (!iter->ept) { > + iter->head = -ENOENT; > + goto return_buf; > + } > + } > + > + if (len < 0) { > + len = tmp - sizeof(iter->rhdr); > + } else if (tmp < sizeof(iter->rhdr) + len) { > + iter->head = -ENOBUFS; > + goto return_buf; > + } else { > + iter->rhdr.len = cpu_to_vhost16(vq, len); > + } > + > + /* len is now the size of the payload */ > + iov_iter_init(&iter->iov_iter, READ, vq->iov, 1, sizeof(iter->rhdr) + len); > + > + /* Write the RPMSG header with endpoint addresses */ > + tmp = copy_to_iter(&iter->rhdr, sizeof(iter->rhdr), &iter->iov_iter); > + if (tmp != sizeof(iter->rhdr)) { > + iter->head = -EIO; > + goto return_buf; > + } > + > + /* Let the endpoint write the payload */ > + if (iter->ept && iter->ept->write) { > + ssize_t ret = iter->ept->write(vr, iter); > + > + if (ret < 0) { > + iter->head = ret; > + goto return_buf; > + } > + } > + > + break; > + } > + > + return 0; > + > +return_buf: > + /* > + * FIXME: vhost_discard_vq_desc() or vhost_add_used(), see comment in > + * vhost_rpmsg_get_single() > + */What's to be done with this FIXME?> +unlock: > + vhost_enable_notify(&vr->dev, vq); > + mutex_unlock(&vq->mutex); > + > + return iter->head; > +} > +EXPORT_SYMBOL_GPL(vhost_rpmsg_start_lock); > + > +size_t vhost_rpmsg_copy(struct vhost_rpmsg *vr, struct vhost_rpmsg_iter *iter, > + void *data, size_t size) > +{ > + /* > + * We could check for excess data, but copy_{to,from}_iter() don't do > + * that either > + */ > + if (iter->vq == vr->vq + VIRTIO_RPMSG_RESPONSE) > + return copy_to_iter(data, size, &iter->iov_iter); > + > + return copy_from_iter(data, size, &iter->iov_iter); > +} > +EXPORT_SYMBOL_GPL(vhost_rpmsg_copy); > + > +int vhost_rpmsg_finish_unlock(struct vhost_rpmsg *vr, > + struct vhost_rpmsg_iter *iter) > + __releases(vq->mutex) > +{ > + if (iter->head >= 0) > + vhost_add_used_and_signal(iter->vq->dev, iter->vq, iter->head, > + vhost16_to_cpu(iter->vq, iter->rhdr.len) + > + sizeof(iter->rhdr)); > + > + vhost_enable_notify(&vr->dev, iter->vq); > + mutex_unlock(&iter->vq->mutex); > + > + return iter->head; > +} > +EXPORT_SYMBOL_GPL(vhost_rpmsg_finish_unlock); > + > +/* > + * Return false to terminate the external loop only if we fail to obtain either > + * a request or a response buffer > + */ > +static bool handle_rpmsg_req_single(struct vhost_rpmsg *vr, > + struct vhost_virtqueue *vq) > +{ > + struct vhost_rpmsg_iter iter; > + int ret = vhost_rpmsg_start_lock(vr, &iter, VIRTIO_RPMSG_REQUEST, -EINVAL); > + if (!ret) > + ret = vhost_rpmsg_finish_unlock(vr, &iter); > + if (ret < 0) { > + if (ret != -EAGAIN) > + vq_err(vq, "%s(): RPMSG processing failed %d\n", > + __func__, ret); > + return false; > + } > + > + if (!iter.ept->write) > + return true; > + > + ret = vhost_rpmsg_start_lock(vr, &iter, VIRTIO_RPMSG_RESPONSE, -EINVAL); > + if (!ret) > + ret = vhost_rpmsg_finish_unlock(vr, &iter); > + if (ret < 0) { > + vq_err(vq, "%s(): RPMSG finalising failed %d\n", __func__, ret); > + return false; > + } > + > + return true; > +} > + > +static void handle_rpmsg_req_kick(struct vhost_work *work) > +{ > + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, > + poll.work); > + struct vhost_rpmsg *vr = container_of(vq->dev, struct vhost_rpmsg, dev); > + > + while (handle_rpmsg_req_single(vr, vq)) > + ; > +} > + > +/* > + * initialise two virtqueues with an array of endpoints, > + * request and response callbacks > + */ > +void vhost_rpmsg_init(struct vhost_rpmsg *vr, const struct vhost_rpmsg_ept *ept, > + unsigned int n_epts) > +{ > + unsigned int i; > + > + for (i = 0; i < ARRAY_SIZE(vr->vq); i++) > + vr->vq_p[i] = &vr->vq[i]; > + > + /* vq[0]: host -> guest, vq[1]: host <- guest */ > + vr->vq[VIRTIO_RPMSG_REQUEST].handle_kick = handle_rpmsg_req_kick; > + vr->vq[VIRTIO_RPMSG_RESPONSE].handle_kick = NULL; > + > + vr->ept = ept; > + vr->n_epts = n_epts; > + > + vhost_dev_init(&vr->dev, vr->vq_p, VIRTIO_RPMSG_NUM_OF_VQS, > + UIO_MAXIOV, 0, 0, true, NULL); > +} > +EXPORT_SYMBOL_GPL(vhost_rpmsg_init); > + > +void vhost_rpmsg_destroy(struct vhost_rpmsg *vr) > +{ > + if (vhost_dev_has_owner(&vr->dev)) > + vhost_poll_flush(&vr->vq[VIRTIO_RPMSG_REQUEST].poll); > + > + vhost_dev_cleanup(&vr->dev); > +} > +EXPORT_SYMBOL_GPL(vhost_rpmsg_destroy); > + > +/* send namespace */ > +int vhost_rpmsg_ns_announce(struct vhost_rpmsg *vr, const char *name, unsigned int src) > +{ > + struct vhost_virtqueue *vq = &vr->vq[VIRTIO_RPMSG_RESPONSE]; > + struct vhost_rpmsg_iter iter = { > + .rhdr = { > + .src = 0, > + .dst = cpu_to_vhost32(vq, RPMSG_NS_ADDR), > + .flags = cpu_to_vhost16(vq, RPMSG_NS_CREATE), /* rpmsg_recv_single() */ > + }, > + }; > + struct rpmsg_ns_msg ns = { > + .addr = cpu_to_vhost32(vq, src), > + .flags = cpu_to_vhost32(vq, RPMSG_NS_CREATE), /* for rpmsg_ns_cb() */ > + }; > + int ret = vhost_rpmsg_start_lock(vr, &iter, VIRTIO_RPMSG_RESPONSE, sizeof(ns)); > + > + if (ret < 0) > + return ret; > + > + strlcpy(ns.name, name, sizeof(ns.name)); > + > + ret = vhost_rpmsg_copy(vr, &iter, &ns, sizeof(ns)); > + if (ret != sizeof(ns)) > + vq_err(iter.vq, "%s(): added %d instead of %zu bytes\n", > + __func__, ret, sizeof(ns)); > + > + ret = vhost_rpmsg_finish_unlock(vr, &iter); > + if (ret < 0) > + vq_err(iter.vq, "%s(): namespace announcement failed: %d\n", > + __func__, ret); > + > + return ret; > +} > +EXPORT_SYMBOL_GPL(vhost_rpmsg_ns_announce); > + > +MODULE_LICENSE("GPL v2"); > +MODULE_AUTHOR("Intel, Inc."); > +MODULE_DESCRIPTION("Vhost RPMsg API"); > diff --git a/drivers/vhost/vhost_rpmsg.h b/drivers/vhost/vhost_rpmsg.h > new file mode 100644 > index 000000000000..30072cecb8a0 > --- /dev/null > +++ b/drivers/vhost/vhost_rpmsg.h > @@ -0,0 +1,74 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +/* > + * Copyright(c) 2020 Intel Corporation. All rights reserved. > + * > + * Author: Guennadi Liakhovetski <guennadi.liakhovetski at linux.intel.com> > + */ > + > +#ifndef VHOST_RPMSG_H > +#define VHOST_RPMSG_H > + > +#include <linux/uio.h> > +#include <linux/virtio_rpmsg.h> > + > +#include "vhost.h" > + > +/* RPMsg uses two VirtQueues: one for each direction */ > +enum { > + VIRTIO_RPMSG_RESPONSE, /* RPMsg response (host->guest) buffers */ > + VIRTIO_RPMSG_REQUEST, /* RPMsg request (guest->host) buffers */ > + /* Keep last */ > + VIRTIO_RPMSG_NUM_OF_VQS, > +}; > + > +struct vhost_rpmsg_ept; > + > +struct vhost_rpmsg_iter { > + struct iov_iter iov_iter; > + struct rpmsg_hdr rhdr; > + struct vhost_virtqueue *vq; > + const struct vhost_rpmsg_ept *ept; > + int head; > + void *priv; > +}; > + > +struct vhost_rpmsg { > + struct vhost_dev dev; > + struct vhost_virtqueue vq[VIRTIO_RPMSG_NUM_OF_VQS]; > + struct vhost_virtqueue *vq_p[VIRTIO_RPMSG_NUM_OF_VQS]; > + const struct vhost_rpmsg_ept *ept; > + unsigned int n_epts; > +}; > + > +struct vhost_rpmsg_ept { > + ssize_t (*read)(struct vhost_rpmsg *, struct vhost_rpmsg_iter *); > + ssize_t (*write)(struct vhost_rpmsg *, struct vhost_rpmsg_iter *); > + int addr; > +}; > + > +static inline size_t vhost_rpmsg_iter_len(const struct vhost_rpmsg_iter *iter) > +{ > + return iter->rhdr.len; > +} > + > +#define VHOST_RPMSG_ITER(_vq, _src, _dst) { \ > + .rhdr = { \ > + .src = cpu_to_vhost32(_vq, _src), \ > + .dst = cpu_to_vhost32(_vq, _dst), \ > + }, \ > + } > + > +void vhost_rpmsg_init(struct vhost_rpmsg *vr, const struct vhost_rpmsg_ept *ept, > + unsigned int n_epts); > +void vhost_rpmsg_destroy(struct vhost_rpmsg *vr); > +int vhost_rpmsg_ns_announce(struct vhost_rpmsg *vr, const char *name, > + unsigned int src); > +int vhost_rpmsg_start_lock(struct vhost_rpmsg *vr, > + struct vhost_rpmsg_iter *iter, > + unsigned int qid, ssize_t len); > +size_t vhost_rpmsg_copy(struct vhost_rpmsg *vr, struct vhost_rpmsg_iter *iter, > + void *data, size_t size); > +int vhost_rpmsg_finish_unlock(struct vhost_rpmsg *vr, > + struct vhost_rpmsg_iter *iter); > + > +#endif > -- > 2.27.0
On Tue, Aug 04, 2020 at 01:30:32PM -0600, Mathieu Poirier wrote:> On Tue, 4 Aug 2020 at 08:07, Michael S. Tsirkin <mst at redhat.com> wrote: > > > > On Tue, Aug 04, 2020 at 07:37:49AM -0600, Mathieu Poirier wrote: > > > On Mon, 3 Aug 2020 at 14:47, Michael S. Tsirkin <mst at redhat.com> wrote: > > > > > > > > On Mon, Aug 03, 2020 at 07:25:24AM -0600, Mathieu Poirier wrote: > > > > > On Thu, 30 Jul 2020 at 23:47, Guennadi Liakhovetski > > > > > <guennadi.liakhovetski at linux.intel.com> wrote: > > > > > > > > > > > > Hi Michael, > > > > > > > > > > > > On Thu, Jul 30, 2020 at 12:08:29PM -0400, Michael S. Tsirkin wrote: > > > > > > > On Wed, Jul 22, 2020 at 05:09:23PM +0200, Guennadi Liakhovetski wrote: > > > > > > > > Hi, > > > > > > > > > > > > > > > > Now that virtio-rpmsg endianness fixes have been merged we can > > > > > > > > proceed with the next step. > > > > > > > > > > > > > > Which tree is this for? > > > > > > > > > > > > The essential part of this series is for drivers/vhost, so, I presume > > > > > > that should be the target tree as well. There is however a small part > > > > > > for the drivers/rpmsg, should I split this series in two or shall we > > > > > > first review is as a whole to make its goals clearer? > > > > > > > > > > I suggest to keep it whole for now. > > > > > > > > > > > > Ok can I get some acks please? > > > > > > Yes, as soon as I have the opportunity to review the work. There is a > > > lot of volume on the linux-remoteproc mailing list lately and > > > patchsets are reviewed in the order they have been received. > > > > Well the merge window is open, I guess I'll merge this and > > any issues can be addressed later then? > > Please don't do that. I prefer to miss a merge window than impacting > upstream consumers. This patch will be reviewed, just not in time for > this merge window.OK then.> > > > > > Also, I put this in my linux-next branch on > > > > > > > > https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git > > > > > > > > there were some conflicts - could you pls test and report it's ok? > > > > > > > > > > > > > > > > Thanks > > > > > > Guennadi > > > > > > > > > > > > > > v4: > > > > > > > > - add endianness conversions to comply with the VirtIO standard > > > > > > > > > > > > > > > > v3: > > > > > > > > - address several checkpatch warnings > > > > > > > > - address comments from Mathieu Poirier > > > > > > > > > > > > > > > > v2: > > > > > > > > - update patch #5 with a correct vhost_dev_init() prototype > > > > > > > > - drop patch #6 - it depends on a different patch, that is currently > > > > > > > > an RFC > > > > > > > > - address comments from Pierre-Louis Bossart: > > > > > > > > * remove "default n" from Kconfig > > > > > > > > > > > > > > > > Linux supports RPMsg over VirtIO for "remote processor" / AMP use > > > > > > > > cases. It can however also be used for virtualisation scenarios, > > > > > > > > e.g. when using KVM to run Linux on both the host and the guests. > > > > > > > > This patch set adds a wrapper API to facilitate writing vhost > > > > > > > > drivers for such RPMsg-based solutions. The first use case is an > > > > > > > > audio DSP virtualisation project, currently under development, ready > > > > > > > > for review and submission, available at > > > > > > > > https://github.com/thesofproject/linux/pull/1501/commits > > > > > > > > > > > > > > > > Thanks > > > > > > > > Guennadi > > > > > > > > > > > > > > > > Guennadi Liakhovetski (4): > > > > > > > > vhost: convert VHOST_VSOCK_SET_RUNNING to a generic ioctl > > > > > > > > rpmsg: move common structures and defines to headers > > > > > > > > rpmsg: update documentation > > > > > > > > vhost: add an RPMsg API > > > > > > > > > > > > > > > > Documentation/rpmsg.txt | 6 +- > > > > > > > > drivers/rpmsg/virtio_rpmsg_bus.c | 78 +------ > > > > > > > > drivers/vhost/Kconfig | 7 + > > > > > > > > drivers/vhost/Makefile | 3 + > > > > > > > > drivers/vhost/rpmsg.c | 375 +++++++++++++++++++++++++++++++ > > > > > > > > drivers/vhost/vhost_rpmsg.h | 74 ++++++ > > > > > > > > include/linux/virtio_rpmsg.h | 83 +++++++ > > > > > > > > include/uapi/linux/rpmsg.h | 3 + > > > > > > > > include/uapi/linux/vhost.h | 4 +- > > > > > > > > 9 files changed, 553 insertions(+), 80 deletions(-) > > > > > > > > create mode 100644 drivers/vhost/rpmsg.c > > > > > > > > create mode 100644 drivers/vhost/vhost_rpmsg.h > > > > > > > > create mode 100644 include/linux/virtio_rpmsg.h > > > > > > > > > > > > > > > > -- > > > > > > > > 2.27.0 > > > > > > > > > > > > >