Displaying 20 results from an estimated 61 matches for "work_lock".
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
...;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
@@ -211,31 +222,17 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
- unsigned seq)
-{
- int left;
-
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq;
- spin_unlock_irq(&dev->work_lock);
- return left <= 0;
-}
-
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned seq;
- int flushing;
+ struct vhost_flush_struct flush;
+
+ if (dev->worker) {
+ init_completion(&f...
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
...;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
@@ -211,31 +222,17 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
- unsigned seq)
-{
- int left;
-
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq;
- spin_unlock_irq(&dev->work_lock);
- return left <= 0;
-}
-
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned seq;
- int flushing;
+ struct vhost_flush_struct flush;
+
+ if (dev->worker) {
+ init_completion(&f...
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
...locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&...
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
...locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&...
2011 Jan 10
0
[PATCH] vhost: fix signed/unsigned comparison
...c
+++ b/drivers/vhost/vhost.c
@@ -98,22 +98,26 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+ unsigned seq)
+{
+ int left;
+ spin_lock_irq(&dev->work_lock);
+ left = seq - work->done_seq;
+ spin_unlock_irq(&dev->work_lock);
+ return left <= 0;
+}
+
static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned seq;
- int left;
int flushing;
spin_lock_irq(&dev->work_lock);
seq = work->queue_s...
2011 Jan 10
0
[PATCH] vhost: fix signed/unsigned comparison
...c
+++ b/drivers/vhost/vhost.c
@@ -98,22 +98,26 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+ unsigned seq)
+{
+ int left;
+ spin_lock_irq(&dev->work_lock);
+ left = seq - work->done_seq;
+ spin_unlock_irq(&dev->work_lock);
+ return left <= 0;
+}
+
static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned seq;
- int left;
int flushing;
spin_lock_irq(&dev->work_lock);
seq = work->queue_s...
2020 Jun 22
0
[RFC v5 02/10] drm/vblank: Add vblank works
...ock_irqrestore(&dev->vbl_lock, irqflags);
}
+static void drm_vblank_work_release(struct drm_vblank_crtc *vblank)
+{
+ struct kthread_worker *worker = vblank->worker;
+ struct drm_vblank_work *work, *tmp;
+ bool wake = false;
+
+ if (!worker)
+ return;
+
+ spin_lock_irq(&vblank->work_lock);
+ vblank->worker = NULL;
+
+ list_for_each_entry_safe(work, tmp, &vblank->pending_work, node) {
+ drm_vblank_put(vblank->dev, vblank->pipe);
+ list_del(&work->node);
+
+ if (!--work->pending) {
+ write_seqcount_invalidate(&work->seqcount);
+ wake = true;
+...
2016 Apr 26
0
[PATCH 2/2] vhost: lockless enqueuing
...lags);
work->fn = fn;
init_waitqueue_head(&work->done);
}
@@ -246,15 +246,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned long flags;
+ if (!dev->worker)
+ return;
- spin_lock_irqsave(&dev->work_lock, flags);
- if (list_empty(&work->node)) {
- list_add_tail(&work->node, &dev->work_list);
- spin_unlock_irqrestore(&dev->work_lock, flags);
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+...
2020 May 08
0
[RFC v4 04/12] drm/vblank: Add vblank works
...>vblank[drm_crtc_index(crtc)];
+}
+EXPORT_SYMBOL(drm_vblank_work_init);
+
+static int vblank_worker_init(struct drm_vblank_crtc *vblank)
+{
+ struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO - 1,
+ };
+
+ INIT_LIST_HEAD(&vblank->pending_work);
+ spin_lock_init(&vblank->work_lock);
+ vblank->worker = kthread_create_worker(0, "card%d-crtc%d",
+ vblank->dev->primary->index,
+ vblank->pipe);
+ if (IS_ERR(vblank->worker))
+ return PTR_ERR(vblank->worker);
+
+ return sched_setscheduler(vblank->worker->task, SCHED_FIFO,...
2016 Apr 26
2
[PATCH 2/2] vhost: lockless enqueuing
...amp;work->done);
> }
> @@ -246,15 +246,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
>
> void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
> {
> - unsigned long flags;
> + if (!dev->worker)
> + return;
>
> - spin_lock_irqsave(&dev->work_lock, flags);
> - if (list_empty(&work->node)) {
> - list_add_tail(&work->node, &dev->work_list);
> - spin_unlock_irqrestore(&dev->work_lock, flags);
> + if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
> + /* We can only add the work to the...
2016 Apr 26
2
[PATCH 2/2] vhost: lockless enqueuing
...amp;work->done);
> }
> @@ -246,15 +246,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
>
> void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
> {
> - unsigned long flags;
> + if (!dev->worker)
> + return;
>
> - spin_lock_irqsave(&dev->work_lock, flags);
> - if (list_empty(&work->node)) {
> - list_add_tail(&work->node, &dev->work_list);
> - spin_unlock_irqrestore(&dev->work_lock, flags);
> + if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
> + /* We can only add the work to the...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...@@ -99,40 +104,16 @@ void vhost_poll_stop(struct vhost_poll *poll)
* locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
- struct vhost_work *work = &poll->work;
- unsigned seq;
- int left;
- int flushing;
-
- spin_lock_irq(&poll->dev->work_lock);
- seq = work->queue_seq;
- work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
- wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
- left = seq - work->done_seq <= 0;
- spin_unlock_irq(&poll->dev->work_lock);
- l...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...@@ -99,40 +104,16 @@ void vhost_poll_stop(struct vhost_poll *poll)
* locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
- struct vhost_work *work = &poll->work;
- unsigned seq;
- int left;
- int flushing;
-
- spin_lock_irq(&poll->dev->work_lock);
- seq = work->queue_seq;
- work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
- wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
- left = seq - work->done_seq <= 0;
- spin_unlock_irq(&poll->dev->work_lock);
- l...
2011 Nov 11
10
[RFC] [ver3 PATCH 0/6] Implement multiqueue virtio-net
This patch series resurrects the earlier multiple TX/RX queues
functionality for virtio_net, and addresses the issues pointed
out. It also includes an API to share irq's, f.e. amongst the
TX vqs.
I plan to run TCP/UDP STREAM and RR tests for local->host and
local->remote, and send the results in the next couple of days.
patch #1: Introduce VIRTIO_NET_F_MULTIQUEUE
patch #2: Move
2011 Nov 11
10
[RFC] [ver3 PATCH 0/6] Implement multiqueue virtio-net
This patch series resurrects the earlier multiple TX/RX queues
functionality for virtio_net, and addresses the issues pointed
out. It also includes an API to share irq's, f.e. amongst the
TX vqs.
I plan to run TCP/UDP STREAM and RR tests for local->host and
local->remote, and send the results in the next couple of days.
patch #1: Introduce VIRTIO_NET_F_MULTIQUEUE
patch #2: Move
2020 Jun 22
13
[RFC v5 00/10] drm/nouveau: Introduce CRC support for gf119+
Nvidia released some documentation on how CRC support works on their
GPUs, hooray!
So: this patch series implements said CRC support in nouveau, along with
adding some special debugfs interfaces for some relevant igt-gpu-tools
tests (already on the ML).
First - we add some new functionality to kthread_work in the kernel, and
then use this to add a new feature to DRM that Ville Syrj?l? came up
2013 Jul 07
2
[PATCH v2 03/11] vhost: Make vhost a separate module
...st_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > {
> > > unsigned seq;
> > > int flushing;
> > > @@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > spin_unlock_irq(&dev->work_lock);
> > > BUG_ON(flushing < 0);
> > > }
> > > +EXPORT_SYMBOL_GPL(vhost_work_flush);
> > >
> > > /* Flush any work that has been scheduled. When calling this, don't hold any
> > > * locks that are also used by the callback. */
> &...
2013 Jul 07
2
[PATCH v2 03/11] vhost: Make vhost a separate module
...st_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > {
> > > unsigned seq;
> > > int flushing;
> > > @@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > spin_unlock_irq(&dev->work_lock);
> > > BUG_ON(flushing < 0);
> > > }
> > > +EXPORT_SYMBOL_GPL(vhost_work_flush);
> > >
> > > /* Flush any work that has been scheduled. When calling this, don't hold any
> > > * locks that are also used by the callback. */
> &...
2015 Dec 31
4
[PATCH RFC] vhost: basic device IOTLB support
...iova >> PAGE_SHIFT) & (VHOST_IOTLB_SIZE - 1);
+}
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -384,8 +389,14 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->memory = NULL;
dev->mm = NULL;
spin_lock_init(&dev->work_lock);
+ spin_lock_init(&dev->iotlb_lock);
+ mutex_init(&dev->iotlb_req_mutex);
INIT_LIST_HEAD(&dev->work_list);
dev->worker = NULL;
+ dev->iotlb_request = NULL;
+ dev->iotlb_ctx = NULL;
+ dev->iotlb_file = NULL;
+ dev->pending_request.flags.type = VHOST_IOTLB_INV...
2015 Dec 31
4
[PATCH RFC] vhost: basic device IOTLB support
...iova >> PAGE_SHIFT) & (VHOST_IOTLB_SIZE - 1);
+}
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -384,8 +389,14 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->memory = NULL;
dev->mm = NULL;
spin_lock_init(&dev->work_lock);
+ spin_lock_init(&dev->iotlb_lock);
+ mutex_init(&dev->iotlb_req_mutex);
INIT_LIST_HEAD(&dev->work_list);
dev->worker = NULL;
+ dev->iotlb_request = NULL;
+ dev->iotlb_ctx = NULL;
+ dev->iotlb_file = NULL;
+ dev->pending_request.flags.type = VHOST_IOTLB_INV...