Displaying 11 results from an estimated 11 matches for "in_iov".
2018 Nov 02
0
[PATCH 1/1] Add vhost_blk driver
...t; +
> +enum {
> + VHOST_BLK_VQ_MAX = 16,
> + VHOST_BLK_VQ_MAX_REQS = 128,
> +};
> +
> +struct vhost_blk_req {
> + struct llist_node list;
> + int index;
> + struct vhost_blk_queue *q;
> + struct virtio_blk_outhdr hdr;
> + struct iovec *out_iov;
> + struct iovec *in_iov;
> + u8 out_num;
> + u8 in_num;
> + long len;
> + struct kiocb iocb;
> + struct iov_iter i;
> + int res;
> + void __user *status;
> +};
> +
> +struct vhost_blk_queue {
> + int index;
> + struct vhost_blk *blk;
> + struct vhost_virtqueue vq;
> + struct vhost...
2018 Nov 03
0
[PATCH 1/1] Add vhost_blk driver
...ent->pid, current->comm,
155 write ? "WRITE" : "READ", req->hdr.sector, len);
156
157 req->len = len;
158 rem_len = len;
159 iov_iter_init(&req->i, (write ? WRITE : READ),
160 write ? &req->out_iov[0] : &req->in_iov[0],
161 nr_seg, len);
162
163 req->iocb.ki_pos = sector << 9;
164 req->iocb.ki_filp = blk->backend;
165 req->iocb.ki_complete = vhost_blk_iocb_complete;
166 req->iocb.ki_flags = IOCB_DIRECT;
167
168 if (write)
169 ret = call_wr...
2008 Apr 05
11
[PATCH RFC 1/5] vringfd syscall
...ee_vr;
+
+ fd_install(fd, filp);
+ return fd;
+
+free_vr:
+ kfree(vr);
+put_filp:
+ put_filp(filp);
+put_fd:
+ put_unused_fd(fd);
+out:
+ return err;
+}
+
+/* Returns an error, or 0 (no buffers), or an id for vring_used_buffer() */
+int vring_get_buffer(struct vring_info *vr,
+ struct iovec *in_iov,
+ unsigned int *num_in, unsigned long *in_len,
+ struct iovec *out_iov,
+ unsigned int *num_out, unsigned long *out_len)
+{
+ unsigned int i, in = 0, out = 0;
+ unsigned long dummy;
+ u16 head;
+ struct vring_desc d;
+
+ if (unlikely(get_user(head, &vr->ring.avail->idx)...
2008 Apr 05
11
[PATCH RFC 1/5] vringfd syscall
...ee_vr;
+
+ fd_install(fd, filp);
+ return fd;
+
+free_vr:
+ kfree(vr);
+put_filp:
+ put_filp(filp);
+put_fd:
+ put_unused_fd(fd);
+out:
+ return err;
+}
+
+/* Returns an error, or 0 (no buffers), or an id for vring_used_buffer() */
+int vring_get_buffer(struct vring_info *vr,
+ struct iovec *in_iov,
+ unsigned int *num_in, unsigned long *in_len,
+ struct iovec *out_iov,
+ unsigned int *num_out, unsigned long *out_len)
+{
+ unsigned int i, in = 0, out = 0;
+ unsigned long dummy;
+ u16 head;
+ struct vring_desc d;
+
+ if (unlikely(get_user(head, &vr->ring.avail->idx)...
2018 Nov 06
0
[PATCH 1/1] Add vhost_blk driver
...+ VHOST_BLK_VQ_MAX_REQS = 128,
> +};
These limits seem arbitrary and probably too low.
> +
> +struct vhost_blk_req {
> + struct llist_node list;
> + int index;
> + struct vhost_blk_queue *q;
> + struct virtio_blk_outhdr hdr;
> + struct iovec *out_iov;
> + struct iovec *in_iov;
> + u8 out_num;
> + u8 in_num;
> + long len;
> + struct kiocb iocb;
> + struct iov_iter i;
> + int res;
> + void __user *status;
> +};
> +
> +struct vhost_blk_queue {
> + int index;
> + struct vhost_blk *blk;
> + struct vhost_virtqueue vq;
> + struct vhost...
2023 Apr 07
2
[PATCH 0/2] vdpa_sim_blk: support shared backend
This series is mainly for testing live migration between 2 vdpa_sim_blk
devices.
The first patch is preparation and moves the buffer allocation into devices,
the second patch adds the `shared_buffer_mutex` parameter to vdpa_sim_blk to
use the same ramdisk for all devices.
Tested with QEMU v8.0.0-rc2 in this way:
modprobe vhost_vdpa
modprobe vdpa_sim_blk shared_backend=true
vdpa dev add mgmtdev
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2023 Mar 28
12
[PATCH v6 00/11] vhost: multiple worker support
The following patches were built over linux-next which contains various
vhost patches in mst's tree and the vhost_task patchset in Christian
Brauner's tree:
git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
kernel.user_worker branch:
https://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git/log/?h=kernel.user_worker
The latter patchset handles the review comment
2017 Apr 07
34
[RFC 0/3] virtio-iommu: a paravirtualized IOMMU
This is the initial proposal for a paravirtualized IOMMU device using
virtio transport. It contains a description of the device, a Linux driver,
and a toy implementation in kvmtool. With this prototype, you can
translate DMA to guest memory from emulated (virtio), or passed-through
(VFIO) devices.
In its simplest form, implemented here, the device handles map/unmap
requests from the guest. Future
2017 Apr 07
34
[RFC 0/3] virtio-iommu: a paravirtualized IOMMU
This is the initial proposal for a paravirtualized IOMMU device using
virtio transport. It contains a description of the device, a Linux driver,
and a toy implementation in kvmtool. With this prototype, you can
translate DMA to guest memory from emulated (virtio), or passed-through
(VFIO) devices.
In its simplest form, implemented here, the device handles map/unmap
requests from the guest. Future