Displaying 20 results from an estimated 783 matches for "atomic_read".
2020 Sep 26
1
[PATCH 1/2] ext4/xfs: add page refcount helper
On Fri, Sep 25, 2020 at 01:44:41PM -0700, Ralph Campbell wrote:
> error = ___wait_var_event(&page->_refcount,
> - atomic_read(&page->_refcount) == 1,
> + dax_layout_is_idle_page(page),
> TASK_INTERRUPTIBLE, 0, 0,
> ext4_wait_dax_page(ei));
> +++ b/fs/xfs/xfs_file.c
> @@ -750,7 +750,7 @@ xfs_break_dax_layouts(
>
> *retry = true;
> return ___wait_var_event(&page->_re...
2010 Aug 03
2
[PATCH 6/6] staging: hv: Gracefully handle SCSI resets
...t;reset = 0;
+ spin_lock_init(&storDevice->lock);
+
Device->Extension = storDevice;
return storDevice;
@@ -101,6 +106,7 @@ static inline struct storvsc_device *AllocStorDevice(struct hv_device *Device) static inline void FreeStorDevice(struct storvsc_device *Device) {
/* ASSERT(atomic_read(&Device->RefCount) == 0); */
+ /*kfree(Device->lock);*/
kfree(Device);
}
@@ -108,13 +114,24 @@ static inline void FreeStorDevice(struct storvsc_device *Device) static inline struct storvsc_device *GetStorDevice(struct hv_device *Device) {
struct storvsc_device *storDevice;
+ unsi...
2010 Aug 03
2
[PATCH 6/6] staging: hv: Gracefully handle SCSI resets
...t;reset = 0;
+ spin_lock_init(&storDevice->lock);
+
Device->Extension = storDevice;
return storDevice;
@@ -101,6 +106,7 @@ static inline struct storvsc_device *AllocStorDevice(struct hv_device *Device) static inline void FreeStorDevice(struct storvsc_device *Device) {
/* ASSERT(atomic_read(&Device->RefCount) == 0); */
+ /*kfree(Device->lock);*/
kfree(Device);
}
@@ -108,13 +114,24 @@ static inline void FreeStorDevice(struct storvsc_device *Device) static inline struct storvsc_device *GetStorDevice(struct hv_device *Device) {
struct storvsc_device *storDevice;
+ unsi...
2009 Nov 07
1
Fwd: [PATCH (geoip)] use IO.pread from the io-extra lib if possible
...record = ""
- @mutex.synchronize {
- @file.seek(pos + (2*@record_length-1) * @databaseSegments[0])
- return nil unless record = @file.read(FULL_RECORD_LENGTH)
- }
+ off = pos + (2*@record_length-1) * @databaseSegments[0]
+ record = atomic_read(FULL_RECORD_LENGTH, off)
+ return nil unless record && record.size == FULL_RECORD_LENGTH
# The country code is the first byte:
code = record[0]
@@ -655,11 +658,8 @@ class GeoIP
throw "Invalid GeoIP database type, can''t look up Organizati...
2014 May 08
2
[PATCH v10 06/19] qspinlock: prolong the stay in the pending bit path
...lock and pending bits are set, we wait
> + * a while to see if that either bit will be cleared.
> + * If that is no change, we return and be queued.
> + */
> + if (!retry)
> + return 0;
> + retry--;
> + cpu_relax();
> + cpu_relax();
> + *pval = val = atomic_read(&lock->val);
> + continue;
> + } else if (val == _Q_PENDING_VAL) {
> + /*
> + * Pending bit is set, but not the lock bit.
> + * Assuming that the pending bit holder is going to
> + * set the lock bit and clear the pending bit soon,
> + * it is better to...
2014 May 08
2
[PATCH v10 06/19] qspinlock: prolong the stay in the pending bit path
...lock and pending bits are set, we wait
> + * a while to see if that either bit will be cleared.
> + * If that is no change, we return and be queued.
> + */
> + if (!retry)
> + return 0;
> + retry--;
> + cpu_relax();
> + cpu_relax();
> + *pval = val = atomic_read(&lock->val);
> + continue;
> + } else if (val == _Q_PENDING_VAL) {
> + /*
> + * Pending bit is set, but not the lock bit.
> + * Assuming that the pending bit holder is going to
> + * set the lock bit and clear the pending bit soon,
> + * it is better to...
2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
...int val, old, new;
> +
> + for (;;) {
> + count = SPIN_THRESHOLD;
> +
> + do {
> + val = smp_load_acquire(&lock->val.counter);
> + if (!(val& _Q_LOCKED_PENDING_MASK))
> + return;
> + } while (--count);
> +
> + do {
> + pn = pv_decode_tail(atomic_read(&lock->val));
> +
> + while (pn->head == INVALID_HEAD)
> + cpu_relax();
> +
> + pn->head = smp_processor_id();
> +
> + } while (pn != pv_decode_tail(atomic_read(&lock->val)));
> +
> + /*
> + * Set _Q_LOCKED_SLOW; bail when the lock is fre...
2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
...int val, old, new;
> +
> + for (;;) {
> + count = SPIN_THRESHOLD;
> +
> + do {
> + val = smp_load_acquire(&lock->val.counter);
> + if (!(val& _Q_LOCKED_PENDING_MASK))
> + return;
> + } while (--count);
> +
> + do {
> + pn = pv_decode_tail(atomic_read(&lock->val));
> +
> + while (pn->head == INVALID_HEAD)
> + cpu_relax();
> +
> + pn->head = smp_processor_id();
> +
> + } while (pn != pv_decode_tail(atomic_read(&lock->val)));
> +
> + /*
> + * Set _Q_LOCKED_SLOW; bail when the lock is fre...
2019 Nov 11
2
[PATCH net-next 12/14] vsock/vmci: register vmci_transport only when VMCI guest/host are active
...ch is great). Shouldn't vhost behave similar, so that any module
that registers a h2g transport only does so if it is in active use?
> --- a/drivers/misc/vmw_vmci/vmci_host.c
> +++ b/drivers/misc/vmw_vmci/vmci_host.c
> @@ -108,6 +108,11 @@ bool vmci_host_code_active(void)
> atomic_read(&vmci_host_active_users) > 0);
> }
>
> +int vmci_host_users(void)
> +{
> + return atomic_read(&vmci_host_active_users);
> +}
> +
> /*
> * Called on open of /dev/vmci.
> */
> @@ -338,6 +343,8 @@ static int vmci_host_do_init_context(struct
> vmci_...
2019 Nov 11
2
[PATCH net-next 12/14] vsock/vmci: register vmci_transport only when VMCI guest/host are active
...ch is great). Shouldn't vhost behave similar, so that any module
that registers a h2g transport only does so if it is in active use?
> --- a/drivers/misc/vmw_vmci/vmci_host.c
> +++ b/drivers/misc/vmw_vmci/vmci_host.c
> @@ -108,6 +108,11 @@ bool vmci_host_code_active(void)
> atomic_read(&vmci_host_active_users) > 0);
> }
>
> +int vmci_host_users(void)
> +{
> + return atomic_read(&vmci_host_active_users);
> +}
> +
> /*
> * Called on open of /dev/vmci.
> */
> @@ -338,6 +343,8 @@ static int vmci_host_do_init_context(struct
> vmci_...
2013 Sep 12
6
[PATCH] RFC xen: suppress Coverity warnings about atomic_read and atomic_set.
...eletions(-)
diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h
index e476ab5..cfa3f66 100644
--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -70,7 +70,11 @@ typedef struct { int counter; } atomic_t;
* Atomically reads the value of @v.
*/
#define _atomic_read(v) ((v).counter)
-#define atomic_read(v) read_atomic(&((v)->counter))
+static inline int atomic_read(atomic_t *v)
+{
+ /* coverity[incompatible_cast : FALSE] */
+ return read_atomic(&v->counter);
+}
/**
* atomic_set - set atomic variable
@@ -80,7 +84,11 @@ typedef struct...
2016 Apr 05
1
[PATCH v3 04/16] mm/balloon: use general movable page feature into balloon
...E_MOVABLE_MAPCOUNT_VALUE (-255)
> +#define PAGE_MOVABLE_MAPCOUNT_VALUE (-256)
> +#define PAGE_BALLOON_MAPCOUNT_VALUE PAGE_MOVABLE_MAPCOUNT_VALUE
>
> static inline int PageMovable(struct page *page)
> {
> - return ((test_bit(PG_movable, &(page)->flags) &&
> - atomic_read(&page->_mapcount) == PAGE_MOVABLE_MAPCOUNT_VALUE)
> - || PageBalloon(page));
> + return (test_bit(PG_movable, &(page)->flags) &&
> + atomic_read(&page->_mapcount) == PAGE_MOVABLE_MAPCOUNT_VALUE);
> }
>
> /* Caller should hold a PG_lock */
> @@...
2016 Apr 05
1
[PATCH v3 04/16] mm/balloon: use general movable page feature into balloon
...E_MOVABLE_MAPCOUNT_VALUE (-255)
> +#define PAGE_MOVABLE_MAPCOUNT_VALUE (-256)
> +#define PAGE_BALLOON_MAPCOUNT_VALUE PAGE_MOVABLE_MAPCOUNT_VALUE
>
> static inline int PageMovable(struct page *page)
> {
> - return ((test_bit(PG_movable, &(page)->flags) &&
> - atomic_read(&page->_mapcount) == PAGE_MOVABLE_MAPCOUNT_VALUE)
> - || PageBalloon(page));
> + return (test_bit(PG_movable, &(page)->flags) &&
> + atomic_read(&page->_mapcount) == PAGE_MOVABLE_MAPCOUNT_VALUE);
> }
>
> /* Caller should hold a PG_lock */
> @@...
2010 Aug 04
6
[PATCH -v2 0/3] jbd2 scalability patches
This version fixes three bugs in the 2nd patch of this series that
caused kernel BUG when the system was under race. We weren't accounting
with t_oustanding_credits correctly, and there were race conditions
caused by the fact the I had overlooked the fact that
__jbd2_log_wait_for_space() and jbd2_get_transaction() requires
j_state_lock to be write locked.
Theodore Ts'o (3):
jbd2: Use
2007 Dec 06
0
[PATCH] linux/x86: Use cpu_relax() rather than barrier() in smp_call_function()
...c
===================================================================
--- head-2007-11-30.orig/arch/i386/kernel/smp-xen.c 2007-12-06 14:56:17.000000000 +0100
+++ head-2007-11-30/arch/i386/kernel/smp-xen.c 2007-12-04 09:36:41.000000000 +0100
@@ -525,11 +525,11 @@
/* Wait for response */
while (atomic_read(&data.started) != cpus)
- barrier();
+ cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
- barrier();
+ cpu_relax();
spin_unlock(&call_lock);
return 0;
Index: head-2007-11-30/arch/x86_64/kernel/smp-xen.c
===============================================...
2014 Apr 17
2
[PATCH v9 06/19] qspinlock: prolong the stay in the pending bit path
...lock and pending bits are set, we wait
> + * a while to see if that either bit will be cleared.
> + * If that is no change, we return and be queued.
> + */
> + if (!retry)
> + return 0;
> + retry--;
> + cpu_relax();
> + cpu_relax();
> + *pval = val = atomic_read(&lock->val);
> + continue;
Since you gave up optimizing the _Q_PENDING_BITS != 8 case why bother
with this? The switch from _Q_PENDING_VAL to _Q_LOCKED_VAL is atomic by
virtue of your (endian challenged) clear_pending_set_locked().
> + } else if ((val & _Q_LOCKED_PENDING_MASK)...
2014 Apr 17
2
[PATCH v9 06/19] qspinlock: prolong the stay in the pending bit path
...lock and pending bits are set, we wait
> + * a while to see if that either bit will be cleared.
> + * If that is no change, we return and be queued.
> + */
> + if (!retry)
> + return 0;
> + retry--;
> + cpu_relax();
> + cpu_relax();
> + *pval = val = atomic_read(&lock->val);
> + continue;
Since you gave up optimizing the _Q_PENDING_BITS != 8 case why bother
with this? The switch from _Q_PENDING_VAL to _Q_LOCKED_VAL is atomic by
virtue of your (endian challenged) clear_pending_set_locked().
> + } else if ((val & _Q_LOCKED_PENDING_MASK)...
2014 Jun 12
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...not the lock bit.
> >>+ * Assuming that the pending bit holder is going to
> >>+ * set the lock bit and clear the pending bit soon,
> >>+ * it is better to wait than to exit at this point.
> >>+ */
> >>+ cpu_relax();
> >>+ val = atomic_read(&lock->val);
> >>+ continue;
> >>+ }
> >>+
> >> new = _Q_LOCKED_VAL;
> >> if (val == new)
> >> new |= _Q_PENDING_VAL;
> >Wouldn't something like:
> >
> > while (atomic_read(&lock->val) == _Q_PEND...
2014 Jun 12
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...not the lock bit.
> >>+ * Assuming that the pending bit holder is going to
> >>+ * set the lock bit and clear the pending bit soon,
> >>+ * it is better to wait than to exit at this point.
> >>+ */
> >>+ cpu_relax();
> >>+ val = atomic_read(&lock->val);
> >>+ continue;
> >>+ }
> >>+
> >> new = _Q_LOCKED_VAL;
> >> if (val == new)
> >> new |= _Q_PENDING_VAL;
> >Wouldn't something like:
> >
> > while (atomic_read(&lock->val) == _Q_PEND...
2012 Aug 01
7
[PATCH] Btrfs: barrier before waitqueue_active
...s/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -513,9 +513,11 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
rb_erase(&delayed_item->rb_node, root);
delayed_item->delayed_node->count--;
atomic_dec(&delayed_root->items);
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
- waitqueue_active(&delayed_root->wait))
- wake_up(&delayed_root->wait);
+ if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) {
+ smp_mb();
+ if (waitqueue_active(&delayed_root-&g...