Displaying 20 results from an estimated 185 matches for "qlock".
Did you mean:
block
2018 Apr 03
3
[PATCH] drm/virtio: fix vq wait_event condition
.../gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,7 +293,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -368,7 +368,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,...
2018 Apr 03
3
[PATCH] drm/virtio: fix vq wait_event condition
.../gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,7 +293,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -368,7 +368,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,...
2014 Mar 12
0
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
...ock value
+ * Return: 1 if lock acquired, 0 if failed
+ *
+ * This is an optimized contention path for 2 contending tasks. It
+ * should only be entered if no task is waiting in the queue.
+ */
+static inline int queue_spin_trylock_quick(struct qspinlock *lock, int qsval)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ /*
+ * Fall into the quick spinning code path only if no task is waiting
+ * in the queue.
+ */
+ while (likely(!(qsval >> _QCODE_OFFSET))) {
+ if ((qsval & _QSPINLOCK_LWMASK) == _QSPINLOCK_LWMASK) {
+ /*
+ * Both the lock and wait bits are set...
2014 Feb 26
2
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...9;m going to have to make one; this is all getting a bit
unwieldy, and those xchg() + fixup things are hard to read.
On Wed, Feb 26, 2014 at 10:14:23AM -0500, Waiman Long wrote:
> +static inline int queue_spin_trylock_quick(struct qspinlock *lock, int qsval)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> + u16 old;
> +
> + /*
> + * Fall into the quick spinning code path only if no one is waiting
> + * or the lock is available.
> + */
> + if (unlikely((qsval != _QSPINLOCK_LOCKED) &&
> + (qsval != _QSPINLOCK_WAITING)))
&...
2014 Feb 26
2
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...9;m going to have to make one; this is all getting a bit
unwieldy, and those xchg() + fixup things are hard to read.
On Wed, Feb 26, 2014 at 10:14:23AM -0500, Waiman Long wrote:
> +static inline int queue_spin_trylock_quick(struct qspinlock *lock, int qsval)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> + u16 old;
> +
> + /*
> + * Fall into the quick spinning code path only if no one is waiting
> + * or the lock is available.
> + */
> + if (unlikely((qsval != _QSPINLOCK_LOCKED) &&
> + (qsval != _QSPINLOCK_WAITING)))
&...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...ue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to queue sp...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...ue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to queue sp...
2019 Aug 13
0
[PATCH 1/2] drm/virtio: cleanup queue functions
...ck_queue);
}
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
{
@@ -263,7 +263,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
int ret;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &...
2019 Sep 03
0
[PATCH v2 13/27] drm/dp_mst: Refactor drm_dp_mst_handle_down_rep()
...dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- int slot = -1;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad);
+ /* find the message */
+ slot = hdr->seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_...
2019 Aug 13
0
[PATCH 2/2] drm/virtio: notify virtqueues without holding spinlock
...uct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
-static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
@@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtqueue *vq = vgdev->ctrlq.vq;
struct scatterlist *sgs[3], vcmd, vout, vresp;
int outcnt = 0, incnt = 0;
+ bool notify = false;
int r...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...e spinlock unfairly
> >>+ * @lock : Pointer to queue spinlock structure
> >>+ * Return: 1 if lock acquired, 0 if failed
> >>+ */
> >>+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> >>+{
> >>+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> >>+
> >>+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> >>+ return 1;
> >>+ return 0;
> >>+}
> >>+
> >>+/**
> >>+ * queue_spin_lock_unfair - acqui...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...e spinlock unfairly
> >>+ * @lock : Pointer to queue spinlock structure
> >>+ * Return: 1 if lock acquired, 0 if failed
> >>+ */
> >>+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> >>+{
> >>+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> >>+
> >>+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> >>+ return 1;
> >>+ return 0;
> >>+}
> >>+
> >>+/**
> >>+ * queue_spin_lock_unfair - acqui...
2014 Mar 13
1
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...ll take out the lock holder kick portion from the patch. I will also
try to collect more test data.
>
> More important, I think a barrier is missing:
>
> Lock holder ---------------------------------------
>
> // queue_spin_unlock
> barrier();
> ACCESS_ONCE(qlock->lock) = 0;
> barrier();
>
This is not the unlock code that is used when PV spinlock is enabled.
The right unlock code is
if (static_key_false(¶virt_spinlocks_enabled)) {
/*
* Need to atomically clear the lock byte to avoid
racing...
2014 Mar 13
1
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...ll take out the lock holder kick portion from the patch. I will also
try to collect more test data.
>
> More important, I think a barrier is missing:
>
> Lock holder ---------------------------------------
>
> // queue_spin_unlock
> barrier();
> ACCESS_ONCE(qlock->lock) = 0;
> barrier();
>
This is not the unlock code that is used when PV spinlock is enabled.
The right unlock code is
if (static_key_false(¶virt_spinlocks_enabled)) {
/*
* Need to atomically clear the lock byte to avoid
racing...
2019 Jul 11
2
[PATCH] drm/virtio: kick vq outside of the vq lock
...(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ ret = virtqueue_kick_prepare(vq);
}
- if (!ret)
- ret = vq->num_free;
return ret;
}
@@ -307,6 +305,10 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
spin_lock(&vgdev->ctrlq.qlock);
rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock);
+
+ if (rc > 0)
+ virtqueue_notify(vgdev->ctrlq.vq);
+
return rc;
}
@@ -339,6 +341,10 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
virtio_gpu_fe...
2019 Jul 11
2
[PATCH] drm/virtio: kick vq outside of the vq lock
...(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ ret = virtqueue_kick_prepare(vq);
}
- if (!ret)
- ret = vq->num_free;
return ret;
}
@@ -307,6 +305,10 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
spin_lock(&vgdev->ctrlq.qlock);
rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock);
+
+ if (rc > 0)
+ virtqueue_notify(vgdev->ctrlq.vq);
+
return rc;
}
@@ -339,6 +341,10 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
virtio_gpu_fe...
2014 Mar 12
2
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
On 03/12/2014 02:54 PM, Waiman Long wrote:
> +
> + /*
> + * Now wait until the lock bit is cleared
> + */
> + while (smp_load_acquire(&qlock->qlcode)& _QSPINLOCK_LOCKED)
> + arch_mutex_cpu_relax();
> +
> + /*
> + * Set the lock bit& clear the waiting bit simultaneously
> + * It is assumed that there is no lock stealing with this
> + * quick path active.
> + *
> + * A direct memory store...
2014 Mar 12
2
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
On 03/12/2014 02:54 PM, Waiman Long wrote:
> +
> + /*
> + * Now wait until the lock bit is cleared
> + */
> + while (smp_load_acquire(&qlock->qlcode)& _QSPINLOCK_LOCKED)
> + arch_mutex_cpu_relax();
> +
> + /*
> + * Set the lock bit& clear the waiting bit simultaneously
> + * It is assumed that there is no lock stealing with this
> + * quick path active.
> + *
> + * A direct memory store...
2017 Dec 28
3
[PATCH] drm/virtio: Add window server support
...ueue_work);
+}
+
+void virtio_gpu_queue_winsrv_rx_in(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_winsrv_rx *cmd)
+{
+ struct virtqueue *vq = vgdev->winsrv_rxq.vq;
+ struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, cmd, sizeof(*cmd));
+
+ spin_lock(&vgdev->winsrv_rxq.qlock);
+retry:
+ ret = virtqueue_add_inbuf(vq, sg, 1, cmd, GFP_KERNEL);
+ if (ret == -ENOSPC) {
+ spin_unlock(&vgdev->winsrv_rxq.qlock);
+ wait_event(vgdev->winsrv_rxq.ack_queue, vq->num_free);
+ spin_lock(&vgdev->winsrv_rxq.qlock);
+ goto retry;
+ }
+ virtqueue_kick(vq);
+ spin_...
2017 Dec 28
3
[PATCH] drm/virtio: Add window server support
...ueue_work);
+}
+
+void virtio_gpu_queue_winsrv_rx_in(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_winsrv_rx *cmd)
+{
+ struct virtqueue *vq = vgdev->winsrv_rxq.vq;
+ struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, cmd, sizeof(*cmd));
+
+ spin_lock(&vgdev->winsrv_rxq.qlock);
+retry:
+ ret = virtqueue_add_inbuf(vq, sg, 1, cmd, GFP_KERNEL);
+ if (ret == -ENOSPC) {
+ spin_unlock(&vgdev->winsrv_rxq.qlock);
+ wait_event(vgdev->winsrv_rxq.ack_queue, vq->num_free);
+ spin_lock(&vgdev->winsrv_rxq.qlock);
+ goto retry;
+ }
+ virtqueue_kick(vq);
+ spin_...