Displaying 19 results from an estimated 19 matches for "vhost_attach_cgroups".
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
..._poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
return dev->mm == current->mm ? 0 : -EPERM;
}
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+...
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
..._poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
return dev->mm == current->mm ? 0 : -EPERM;
}
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+...
2020 May 29
0
[PATCH 1/6] vhost: allow device that does not depend on vhost worker
...%d", current->pid);
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
+ goto err_worker;
+ }
- dev->worker = worker;
- wake_up_process(worker); /* avoid contributing to loadavg */
+ dev->worker = worker;
+ wake_up_process(worker); /* avoid contributing to loadavg */
- err = vhost_attach_cgroups(dev);
- if (err)
- goto err_cgroup;
+ err = vhost_attach_cgroups(dev);
+ if (err)
+ goto err_cgroup;
+ }
err = vhost_dev_alloc_iovecs(dev);
if (err)
@@ -568,8 +578,10 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
return 0;
err_cgroup:
- kthread_stop(worker);
- dev->worker =...
2016 Aug 22
4
[PATCH] CodingStyle: add some more error handling guidelines
On Mon, Aug 22, 2016 at 08:16:17AM -0600, Jonathan Corbet wrote:
> On Mon, 22 Aug 2016 16:57:46 +0300
> "Michael S. Tsirkin" <mst at redhat.com> wrote:
>
> > commit commit ea04036032edda6f771c1381d03832d2ed0f6c31 ("CodingStyle:
> > add some more error handling guidelines") suggests never naming goto
> > labels after the goto location - that is
2016 Aug 22
4
[PATCH] CodingStyle: add some more error handling guidelines
On Mon, Aug 22, 2016 at 08:16:17AM -0600, Jonathan Corbet wrote:
> On Mon, 22 Aug 2016 16:57:46 +0300
> "Michael S. Tsirkin" <mst at redhat.com> wrote:
>
> > commit commit ea04036032edda6f771c1381d03832d2ed0f6c31 ("CodingStyle:
> > add some more error handling guidelines") suggests never naming goto
> > labels after the goto location - that is
2016 Aug 22
0
[PATCH] CodingStyle: add some more error handling guidelines
...if (IS_ERR(worker)) {
489 err = PTR_ERR(worker);
490 goto err_worker;
491 }
492
493 dev->worker = worker;
494 wake_up_process(worker); /* avoid contributing to loadavg */
495
496 err = vhost_attach_cgroups(dev);
497 if (err)
498 goto err_cgroup;
499
500 err = vhost_dev_alloc_iovecs(dev);
501 if (err)
502 goto err_cgroup;
This name doesn't make sense because it's a come-from label which is
used twice. Some peopl...
2013 Jun 06
0
[PATCH net 1/2] vhost: check owner before we overwrite ubuf_info
..._dev_has_owner(&n->dev)) {
+ r = -EBUSY;
+ goto out;
+ }
r = vhost_net_set_ubuf_info(n);
if (r)
goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index beee7f5..60aa5ad 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -344,13 +344,19 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
}
/* Caller should have device mutex */
+bool vhost_dev_has_owner(struct vhost_dev *dev)
+{
+ return dev->mm;
+}
+
+/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct task_struct *worker;
int err;
/* Is there an own...
2020 Sep 22
0
[PATCH 7/8] vhost: remove work arg from vhost_work_flush
...hat are also used by the callback. */
> void vhost_poll_flush(struct vhost_poll *poll)
> {
> - vhost_work_flush(poll->dev, &poll->work);
> + vhost_work_dev_flush(poll->dev);
> }
> EXPORT_SYMBOL_GPL(vhost_poll_flush);
>
> @@ -542,7 +542,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
> attach.owner = current;
> vhost_work_init(&attach.work, vhost_attach_cgroups_work);
> vhost_work_queue(dev, &attach.work);
> - vhost_work_flush(dev, &attach.work);
> + vhost_work_dev_flush(dev);
> return attach.ret;
> }
>...
2013 Jun 06
5
[PATCH net 0/2] vhost fixes for 3.10
Two patches fixing the fallout from the vhost cleanup in 3.10.
Thanks to Tommi Rantala who reported the issue.
Tommi, could you please confirm this fixes the crashes for you?
Michael S. Tsirkin (2):
vhost: check owner before we overwrite ubuf_info
vhost: fix ubuf_info cleanup
drivers/vhost/net.c | 26 +++++++++++---------------
drivers/vhost/vhost.c | 8 +++++++-
drivers/vhost/vhost.h |
2013 Jun 06
5
[PATCH net 0/2] vhost fixes for 3.10
Two patches fixing the fallout from the vhost cleanup in 3.10.
Thanks to Tommi Rantala who reported the issue.
Tommi, could you please confirm this fixes the crashes for you?
Michael S. Tsirkin (2):
vhost: check owner before we overwrite ubuf_info
vhost: fix ubuf_info cleanup
drivers/vhost/net.c | 26 +++++++++++---------------
drivers/vhost/vhost.c | 8 +++++++-
drivers/vhost/vhost.h |
2023 Jun 01
1
[syzbot] [kvm?] [net?] [virt?] general protection fault in vhost_work_queue
On Wed, May 31, 2023 at 11:27:12AM -0500, Mike Christie wrote:
>On 5/31/23 10:15 AM, Mike Christie wrote:
>>>> rcu would work for your case and for what Jason had requested.
>>> Yeah, so you already have some patches?
>>>
>>> Do you want to send it to solve this problem?
>>>
>> Yeah, I'll break them out and send them later today when I
2011 Nov 11
10
[RFC] [ver3 PATCH 0/6] Implement multiqueue virtio-net
This patch series resurrects the earlier multiple TX/RX queues
functionality for virtio_net, and addresses the issues pointed
out. It also includes an API to share irq's, f.e. amongst the
TX vqs.
I plan to run TCP/UDP STREAM and RR tests for local->host and
local->remote, and send the results in the next couple of days.
patch #1: Introduce VIRTIO_NET_F_MULTIQUEUE
patch #2: Move
2011 Nov 11
10
[RFC] [ver3 PATCH 0/6] Implement multiqueue virtio-net
This patch series resurrects the earlier multiple TX/RX queues
functionality for virtio_net, and addresses the issues pointed
out. It also includes an API to share irq's, f.e. amongst the
TX vqs.
I plan to run TCP/UDP STREAM and RR tests for local->host and
local->remote, and send the results in the next couple of days.
patch #1: Introduce VIRTIO_NET_F_MULTIQUEUE
patch #2: Move
2014 Aug 10
0
[PATCH] vhost: Add polling mode
..._init(struct vhost_dev *dev,
> vhost_vq_reset(dev, vq);
> if (vq->handle_kick)
> vhost_poll_init(&vq->poll, vq->handle_kick,
> - POLLIN, dev);
> + POLLIN, vq);
> }
> }
> EXPORT_SYMBOL_GPL(vhost_dev_init);
> @@ -350,7 +532,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
> struct vhost_attach_cgroups_struct attach;
>
> attach.owner = current;
> - vhost_work_init(&attach.work, vhost_attach_cgroups_work);
> + vhost_work_init(&attach.work, NULL, vhost_attach_cgroups_work);
> vhost_work_queue(dev, &attach.wor...
2020 May 29
12
[PATCH 0/6] vDPA: doorbell mapping
Hi all:
This series introduce basic functionality of doorbell mapping support
for vhost-vDPA. Userspace program may use mmap() to map a the doorbell
of a specific virtqueue into its address space. This is help to reudce
the syscall or vmexit overhead.
A new vdpa_config_ops was introduced to report the location of the
doorbell, vhost_vdpa may then choose to map the doorbell when:
- The doorbell
2020 May 29
12
[PATCH 0/6] vDPA: doorbell mapping
Hi all:
This series introduce basic functionality of doorbell mapping support
for vhost-vDPA. Userspace program may use mmap() to map a the doorbell
of a specific virtqueue into its address space. This is help to reudce
the syscall or vmexit overhead.
A new vdpa_config_ops was introduced to report the location of the
doorbell, vhost_vdpa may then choose to map the doorbell when:
- The doorbell
2014 Aug 20
0
[PATCH] vhost: Add polling mode
..._init(struct vhost_dev *dev,
> vhost_vq_reset(dev, vq);
> if (vq->handle_kick)
> vhost_poll_init(&vq->poll, vq->handle_kick,
> - POLLIN, dev);
> + POLLIN, vq);
> }
> }
> EXPORT_SYMBOL_GPL(vhost_dev_init);
> @@ -350,7 +532,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
> struct vhost_attach_cgroups_struct attach;
>
> attach.owner = current;
> - vhost_work_init(&attach.work, vhost_attach_cgroups_work);
> + vhost_work_init(&attach.work, NULL, vhost_attach_cgroups_work);
> vhost_work_queue(dev, &attach.wor...
2014 Aug 10
7
[PATCH] vhost: Add polling mode
...qs; ++i) {
@@ -318,7 +500,7 @@ void vhost_dev_init(struct vhost_dev *dev,
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
- POLLIN, dev);
+ POLLIN, vq);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
@@ -350,7 +532,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
struct vhost_attach_cgroups_struct attach;
attach.owner = current;
- vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_init(&attach.work, NULL, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
vhost_work_flush(dev, &a...
2014 Aug 10
7
[PATCH] vhost: Add polling mode
...qs; ++i) {
@@ -318,7 +500,7 @@ void vhost_dev_init(struct vhost_dev *dev,
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
- POLLIN, dev);
+ POLLIN, vq);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
@@ -350,7 +532,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
struct vhost_attach_cgroups_struct attach;
attach.owner = current;
- vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_init(&attach.work, NULL, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
vhost_work_flush(dev, &a...