Displaying 20 results from an estimated 1453 matches for "spin_unlock_irqrestore".
2011 May 09
1
Bug#625438: [PATCH] xen: ioapic: avoid gcc 4.6 warnings about uninitialised variables
...0x11 + 2 * pin);
+ return eu.entry;
+}
+
+static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin, int raw)
+{
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ entry = __ioapic_read_entry(apic, pin, raw);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ return entry;
+}
+
+static void
+__ioapic_write_entry(int apic, int pin, int raw, struct IO_APIC_route_entry e)
+{
+ void (*write)(unsigned int, unsigned int, unsigned int)
+ = raw ? __io_apic_write : io_apic_write;
+ union entry_union eu = {{0, 0}};
+
+...
2011 May 09
1
Bug#625438: [PATCH] xen: ioapic: avoid gcc 4.6 warnings about uninitialised variables
...0x11 + 2 * pin);
+ return eu.entry;
+}
+
+static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin, int raw)
+{
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ entry = __ioapic_read_entry(apic, pin, raw);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ return entry;
+}
+
+static void
+__ioapic_write_entry(int apic, int pin, int raw, struct IO_APIC_route_entry e)
+{
+ void (*write)(unsigned int, unsigned int, unsigned int)
+ = raw ? __io_apic_write : io_apic_write;
+ union entry_union eu = {{0, 0}};
+
+...
2010 Feb 07
3
[PATCH] drm/nouveau: don't hold spin lock while calling kzalloc with GFP_KERNEL
...chan_ret,
return ret;
}
- spin_lock_irqsave(&dev_priv->engine.lock, flags);
-
/* disable the fifo caches */
pfifo->reassign(dev, false);
@@ -225,8 +222,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->engine.lock, flags);
-
ret = nouveau_dma_init(chan);
if (!ret)
ret = nouveau_fence_init(chan);
@@ -290,7 +285,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
if (pgraph->channel(dev) == chan)
nouveau_wait_for_idle(dev);
- spin_lock_irqsave(&dev_priv-&g...
2011 Jan 26
8
[PATCH 1/8] staging: hv: Convert camel cased variables in connection.c to lower cases
...rdatalen)
memcpy(openMsg->userdata, userdata, userdatalen);
- spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&openInfo->msglistentry,
- &gVmbusConnection.ChannelMsgList);
- spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
+ &vmbus_connection.ChannelMsgList);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
DPRINT_DBG(VMBUS, "Sending channel open msg...");
@@ -289,9 +289,9 @@ int vmbus_open(struct vmbus_channel *newchanne...
2011 Jan 26
8
[PATCH 1/8] staging: hv: Convert camel cased variables in connection.c to lower cases
...rdatalen)
memcpy(openMsg->userdata, userdata, userdatalen);
- spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&openInfo->msglistentry,
- &gVmbusConnection.ChannelMsgList);
- spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
+ &vmbus_connection.ChannelMsgList);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
DPRINT_DBG(VMBUS, "Sending channel open msg...");
@@ -289,9 +289,9 @@ int vmbus_open(struct vmbus_channel *newchanne...
2017 Dec 06
1
[PATCH RFC 1/4] crypto: engine - Permit to enqueue all async requests
...s;
> bool was_busy = false;
> - int ret, rtype;
> + int ret;
> + struct crypto_engine_reqctx *enginectx;
>
> spin_lock_irqsave(&engine->queue_lock, flags);
>
> @@ -94,7 +93,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>
> spin_unlock_irqrestore(&engine->queue_lock, flags);
>
> - rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
> /* Until here we get the request need to be encrypted successfully */
> if (!was_busy && engine->prepare_crypt_hardware) {
> ret = engine->prepare_crypt_har...
2017 Dec 06
1
[PATCH RFC 1/4] crypto: engine - Permit to enqueue all async requests
...s;
> bool was_busy = false;
> - int ret, rtype;
> + int ret;
> + struct crypto_engine_reqctx *enginectx;
>
> spin_lock_irqsave(&engine->queue_lock, flags);
>
> @@ -94,7 +93,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>
> spin_unlock_irqrestore(&engine->queue_lock, flags);
>
> - rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
> /* Until here we get the request need to be encrypted successfully */
> if (!was_busy && engine->prepare_crypt_hardware) {
> ret = engine->prepare_crypt_har...
2014 Jun 22
2
[PATCH v1 2/2] block: virtio-blk: support multi virt queues per virtio-blk device
...mplete_request(vbr->req);
> req_done = true;
> }
> @@ -151,7 +157,7 @@ static void virtblk_done(struct virtqueue *vq)
> /* In case queue is stopped waiting for more buffers. */
> if (req_done)
> blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
> - spin_unlock_irqrestore(&vblk->vq_lock, flags);
> + spin_unlock_irqrestore(&vblk->vq_lock[qid], flags);
> }
>
> static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
> @@ -160,6 +166,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
>...
2014 Jun 22
2
[PATCH v1 2/2] block: virtio-blk: support multi virt queues per virtio-blk device
...mplete_request(vbr->req);
> req_done = true;
> }
> @@ -151,7 +157,7 @@ static void virtblk_done(struct virtqueue *vq)
> /* In case queue is stopped waiting for more buffers. */
> if (req_done)
> blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
> - spin_unlock_irqrestore(&vblk->vq_lock, flags);
> + spin_unlock_irqrestore(&vblk->vq_lock[qid], flags);
> }
>
> static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
> @@ -160,6 +166,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
>...
2010 Aug 03
2
[PATCH 6/6] staging: hv: Gracefully handle SCSI resets
...static inline struct storvsc_device *GetStorDevice(struct hv_device *Device) {
struct storvsc_device *storDevice;
+ unsigned long flags;
storDevice = (struct storvsc_device *)Device->Extension;
+
+ spin_lock_irqsave(&storDevice->lock, flags);
+
+ if (storDevice->reset == 1) {
+ spin_unlock_irqrestore(&storDevice->lock, flags);
+ return NULL;
+ }
+
if (storDevice && atomic_read(&storDevice->RefCount) > 1)
atomic_inc(&storDevice->RefCount);
else
storDevice = NULL;
+ spin_unlock_irqrestore(&storDevice->lock, flags);
+
return storDevice;
}
@@...
2010 Aug 03
2
[PATCH 6/6] staging: hv: Gracefully handle SCSI resets
...static inline struct storvsc_device *GetStorDevice(struct hv_device *Device) {
struct storvsc_device *storDevice;
+ unsigned long flags;
storDevice = (struct storvsc_device *)Device->Extension;
+
+ spin_lock_irqsave(&storDevice->lock, flags);
+
+ if (storDevice->reset == 1) {
+ spin_unlock_irqrestore(&storDevice->lock, flags);
+ return NULL;
+ }
+
if (storDevice && atomic_read(&storDevice->RefCount) > 1)
atomic_inc(&storDevice->RefCount);
else
storDevice = NULL;
+ spin_unlock_irqrestore(&storDevice->lock, flags);
+
return storDevice;
}
@@...
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone,
This is the dm-ioband version 1.2.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc5-mm3.
- Changes from 1.1.0 (posted on June 2, 2008):
- Dynamic policy switching
A user can change the bandwidth control policy
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone,
This is the dm-ioband version 1.2.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc5-mm3.
- Changes from 1.1.0 (posted on June 2, 2008):
- Dynamic policy switching
A user can change the bandwidth control policy
2008 Jul 04
1
[PATCH 0/2] dm-ioband: I/O bandwidth controller v1.2.0: Introduction
Hi everyone,
This is the dm-ioband version 1.2.0 release.
Dm-ioband is an I/O bandwidth controller implemented as a device-mapper
driver, which gives specified bandwidth to each job running on the same
physical device.
- Can be applied to the kernel 2.6.26-rc5-mm3.
- Changes from 1.1.0 (posted on June 2, 2008):
- Dynamic policy switching
A user can change the bandwidth control policy
2016 Jan 01
5
[PATCH 2/2] virtio_balloon: fix race between migration and ballooning
...und
> @@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
> continue;
> }
> #endif
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> balloon_page_delete(page);
> __count_vm_event(BALLOON_DEFLATE);
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> unlock_page(page);
> dequeued_page = true;
> break;
> }
> }
> + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
>
> if (!dequeued_page) {
> /*
I think this will cause deadlocks.
pages_lock...
2016 Jan 01
5
[PATCH 2/2] virtio_balloon: fix race between migration and ballooning
...und
> @@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
> continue;
> }
> #endif
> - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> balloon_page_delete(page);
> __count_vm_event(BALLOON_DEFLATE);
> - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> unlock_page(page);
> dequeued_page = true;
> break;
> }
> }
> + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
>
> if (!dequeued_page) {
> /*
I think this will cause deadlocks.
pages_lock...
2012 Jan 12
9
Re: [PATCH] add netconsole support for xen-netfront
...>tx_lock);
> + spin_lock_irqsave(&np->tx_lock, flags);
>
> if (unlikely(!netif_carrier_ok(dev) ||
> (frags > 1 && !xennet_can_sg(dev)) ||
> netif_needs_gso(skb, netif_skb_features(skb)))) {
> - spin_unlock_irq(&np->tx_lock);
> + spin_unlock_irqrestore(&np->tx_lock, flags);
> goto drop;
> }
>
> @@ -574,7 +575,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
> if (!netfront_tx_slot_available(np))
> netif_stop_queue(dev);
>
> - spin_unlock_irq(&np->tx_lock);
> + sp...
2011 Dec 23
2
re: Btrfs: fix num_workers_starting bug and other bugs in async thread
Hi Josef,
Smatch complains about this change introduces a double unlock.
fs/btrfs/async-thread.c +608 find_worker(49) error: double unlock ''spin_lock:&workers->lock''
579 spin_unlock_irqrestore(&workers->lock, flags);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We unlock here.
580 /* we''re below the limit, start another worker */
581 ret = __btrfs_start_workers(workers);
582...
2017 Dec 07
0
[PATCH RFC 1/4] crypto: engine - Permit to enqueue all async requests
...int ret, rtype;
> > + int ret;
> > + struct crypto_engine_reqctx *enginectx;
> >
> > spin_lock_irqsave(&engine->queue_lock, flags);
> >
> > @@ -94,7 +93,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
> >
> > spin_unlock_irqrestore(&engine->queue_lock, flags);
> >
> > - rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
> > /* Until here we get the request need to be encrypted successfully */
> > if (!was_busy && engine->prepare_crypt_hardware) {
> > ret = eng...
2014 Jun 20
3
[PATCH v1 0/2] block: virtio-blk: support multi vq per virtio-blk
Hi,
These patches try to support multi virtual queues(multi-vq) in one
virtio-blk device, and maps each virtual queue(vq) to blk-mq's
hardware queue.
With this approach, both scalability and performance on virtio-blk
device can get improved.
For verifying the improvement, I implements virtio-blk multi-vq over
qemu's dataplane feature, and both handling host notification
from each vq and