Displaying 20 results from an estimated 81 matches for "assert_spin_locked".
2014 Dec 21
1
[PATCH 1/3] nouveau: Do not BUG_ON(!spin_is_locked()) on UP
On !SMP systems spinlocks do not exist. Thus checking of they
are active will always fail.
Use
assert_spin_locked(lock);
instead of
BUG_ON(!spin_is_locked(lock));
to not BUG() on all UP systems.
Signed-off-by: Bruno Prémont <bonbons at linux-vserver.org>
---
See also fdo bug #87552
drivers/gpu/drm/nouveau/core/core/event.c | 4 ++--
drivers/gpu/drm/nouveau/core/core/notify.c | 2 +-
2 files changed...
2009 Feb 03
10
Convert mle list to a hash
These patches convert the mle list to a hash. The same patches apply on
ocfs2 1.4 too.
Currently, we use the same number of hash pages for mles and lockres'.
This will be addressed in a future patch that will make both of them
configurable.
Sunil
2009 Feb 26
13
o2dlm mle hash patches - round 2
The changes from the last drop are:
1. Patch 11 removes struct dlm_lock_name.
2. Patch 12 is an unrelated bugfix. Actually is related to a bugfix
that we are retracting in mainline currently. The patch may need more testing.
While I did hit the condition in my testing, Marcos hasn't. I am sending it
because it can be queued for 2.6.30. Give us more time to test.
3. Patch 13 will be useful
2007 May 17
1
[PATCH] ocfs: use list_for_each_entry where benefical
...linux-2.6/fs/ocfs2/cluster/tcp.c 2007-05-17 15:00:14.000000000 +0200
@@ -261,14 +261,12 @@ out:
static void o2net_complete_nodes_nsw(struct o2net_node *nn)
{
- struct list_head *iter, *tmp;
+ struct o2net_status_wait *nsw, *tmp;
unsigned int num_kills = 0;
- struct o2net_status_wait *nsw;
assert_spin_locked(&nn->nn_lock);
- list_for_each_safe(iter, tmp, &nn->nn_status_list) {
- nsw = list_entry(iter, struct o2net_status_wait, ns_node_item);
+ list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0);
num_...
2009 Apr 17
26
OCFS2 1.4: Patches backported from mainline
Please review the list of patches being applied to the ocfs2 1.4 tree.
All patches list the mainline commit hash.
Thanks
Sunil
2018 Nov 27
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...sync_req(struct viommu_dev *viommu)
> >> +{
> >> + int ret = 0;
> >> + unsigned int len;
> >> + size_t write_len;
> >> + struct viommu_request *req;
> >> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >> +
> >> + assert_spin_locked(&viommu->request_lock);
> >> +
> >> + virtqueue_kick(vq);
> >> +
> >> + while (!list_empty(&viommu->requests)) {
> >> + len = 0;
> >> + req = virtqueue_get_buf(vq, &len);
> >> + if (!req)
> >> + continue;...
2018 Nov 27
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...sync_req(struct viommu_dev *viommu)
> >> +{
> >> + int ret = 0;
> >> + unsigned int len;
> >> + size_t write_len;
> >> + struct viommu_request *req;
> >> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >> +
> >> + assert_spin_locked(&viommu->request_lock);
> >> +
> >> + virtqueue_kick(vq);
> >> +
> >> + while (!list_empty(&viommu->requests)) {
> >> + len = 0;
> >> + req = virtqueue_get_buf(vq, &len);
> >> + if (!req)
> >> + continue;...
2018 Nov 27
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...; >>>> + int ret = 0;
> >>>> + unsigned int len;
> >>>> + size_t write_len;
> >>>> + struct viommu_request *req;
> >>>> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >>>> +
> >>>> + assert_spin_locked(&viommu->request_lock);
> >>>> +
> >>>> + virtqueue_kick(vq);
> >>>> +
> >>>> + while (!list_empty(&viommu->requests)) {
> >>>> + len = 0;
> >>>> + req = virtqueue_get_buf(vq, &len);
> &...
2018 Nov 27
2
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...; >>>> + int ret = 0;
> >>>> + unsigned int len;
> >>>> + size_t write_len;
> >>>> + struct viommu_request *req;
> >>>> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >>>> +
> >>>> + assert_spin_locked(&viommu->request_lock);
> >>>> +
> >>>> + virtqueue_kick(vq);
> >>>> +
> >>>> + while (!list_empty(&viommu->requests)) {
> >>>> + len = 0;
> >>>> + req = virtqueue_get_buf(vq, &len);
> &...
2015 Oct 30
5
[PATCH] drm/nouveau: Fix pre-nv50 pageflip events
...e1f58666a6 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -972,7 +972,8 @@ static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
- WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ assert_spin_locked(&dev->event_lock);
+
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
@@ -985,6 +986,59 @@ static void send_vblank_event(struct drm_device *dev,
}
/**
+ * drm_arm_vblank_event - arm vblanke event after pageflip
+ * @dev: DRM de...
2018 Nov 20
1
[virtio-dev] Re: [PATCH v4 5/7] iommu: Add virtio-iommu driver
..._viommu_sync_req(struct viommu_dev *viommu)
>>> +{
>>> + int ret = 0;
>>> + unsigned int len;
>>> + size_t write_len;
>>> + struct viommu_request *req;
>>> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
>>> +
>>> + assert_spin_locked(&viommu->request_lock);
>>> +
>>> + virtqueue_kick(vq);
>>> +
>>> + while (!list_empty(&viommu->requests)) {
>>> + len = 0;
>>> + req = virtqueue_get_buf(vq, &len);
>>> + if (!req)
>>> + continue;
>>...
2018 Dec 10
1
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...gt;>>> + unsigned int len;
> >>>>>> + size_t write_len;
> >>>>>> + struct viommu_request *req;
> >>>>>> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >>>>>> +
> >>>>>> + assert_spin_locked(&viommu->request_lock);
> >>>>>> +
> >>>>>> + virtqueue_kick(vq);
> >>>>>> +
> >>>>>> + while (!list_empty(&viommu->requests)) {
> >>>>>> + len = 0;
> >>>>>>...
2015 Nov 06
0
[PATCH] drm/nouveau: Fix pre-nv50 pageflip events
.../drm/drm_irq.c
> +++ b/drivers/gpu/drm/drm_irq.c
> @@ -972,7 +972,8 @@ static void send_vblank_event(struct drm_device *dev,
> struct drm_pending_vblank_event *e,
> unsigned long seq, struct timeval *now)
> {
> - WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
> + assert_spin_locked(&dev->event_lock);
> +
> e->event.sequence = seq;
> e->event.tv_sec = now->tv_sec;
> e->event.tv_usec = now->tv_usec;
> @@ -985,6 +986,59 @@ static void send_vblank_event(struct drm_device *dev,
> }
>
> /**
> + * drm_arm_vblank_event - arm...
2007 Jun 07
4
[PATCH RFC 0/3] Virtio draft II
Hi again all,
It turns out that networking really wants ordered requests, which the
previous patches didn't allow. This patch changes it to a callback
mechanism; kudos to Avi.
The downside is that locking is more complicated, and after a few dead
ends I implemented the simplest solution: the struct virtio_device
contains the spinlock to use, and it's held when your callbacks get
2007 Jun 07
4
[PATCH RFC 0/3] Virtio draft II
Hi again all,
It turns out that networking really wants ordered requests, which the
previous patches didn't allow. This patch changes it to a callback
mechanism; kudos to Avi.
The downside is that locking is more complicated, and after a few dead
ends I implemented the simplest solution: the struct virtio_device
contains the spinlock to use, and it's held when your callbacks get
2007 Jun 07
4
[PATCH RFC 0/3] Virtio draft II
Hi again all,
It turns out that networking really wants ordered requests, which the
previous patches didn't allow. This patch changes it to a callback
mechanism; kudos to Avi.
The downside is that locking is more complicated, and after a few dead
ends I implemented the simplest solution: the struct virtio_device
contains the spinlock to use, and it's held when your callbacks get
2018 Nov 16
0
[PATCH v4 5/7] iommu: Add virtio-iommu driver
...> + */
>> +static int __viommu_sync_req(struct viommu_dev *viommu)
>> +{
>> + int ret = 0;
>> + unsigned int len;
>> + size_t write_len;
>> + struct viommu_request *req;
>> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
>> +
>> + assert_spin_locked(&viommu->request_lock);
>> +
>> + virtqueue_kick(vq);
>> +
>> + while (!list_empty(&viommu->requests)) {
>> + len = 0;
>> + req = virtqueue_get_buf(vq, &len);
>> + if (!req)
>> + continue;
>> +
>> + if (!len)
>&g...
2018 Nov 16
2
[PATCH v4 5/7] iommu: Add virtio-iommu driver
...he time of the call have completed.
> + */
> +static int __viommu_sync_req(struct viommu_dev *viommu)
> +{
> + int ret = 0;
> + unsigned int len;
> + size_t write_len;
> + struct viommu_request *req;
> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> + assert_spin_locked(&viommu->request_lock);
> +
> + virtqueue_kick(vq);
> +
> + while (!list_empty(&viommu->requests)) {
> + len = 0;
> + req = virtqueue_get_buf(vq, &len);
> + if (!req)
> + continue;
> +
> + if (!len)
> + viommu_set_req_status(req->buf, re...
2018 Nov 16
2
[PATCH v4 5/7] iommu: Add virtio-iommu driver
...he time of the call have completed.
> + */
> +static int __viommu_sync_req(struct viommu_dev *viommu)
> +{
> + int ret = 0;
> + unsigned int len;
> + size_t write_len;
> + struct viommu_request *req;
> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> + assert_spin_locked(&viommu->request_lock);
> +
> + virtqueue_kick(vq);
> +
> + while (!list_empty(&viommu->requests)) {
> + len = 0;
> + req = virtqueue_get_buf(vq, &len);
> + if (!req)
> + continue;
> +
> + if (!len)
> + viommu_set_req_status(req->buf, re...
2018 Nov 27
0
[PATCH v5 5/7] iommu: Add virtio-iommu driver
...ev *viommu)
>>>> +{
>>>> + int ret = 0;
>>>> + unsigned int len;
>>>> + size_t write_len;
>>>> + struct viommu_request *req;
>>>> + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
>>>> +
>>>> + assert_spin_locked(&viommu->request_lock);
>>>> +
>>>> + virtqueue_kick(vq);
>>>> +
>>>> + while (!list_empty(&viommu->requests)) {
>>>> + len = 0;
>>>> + req = virtqueue_get_buf(vq, &len);
>>>> + if (!req)
>&g...