Displaying 12 results from an estimated 12 matches for "try_set_locked".
2014 May 08
2
[PATCH v10 09/19] qspinlock: Prepare for unfair lock support
...)
> +static __always_inline int get_qlock(struct qspinlock *lock)
> {
> struct __qspinlock *l = (void *)lock;
>
> barrier();
> ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL;
> barrier();
> + return 1;
> }
and here you make a horribly named function more horrible;
try_set_locked() is that its now.
2014 May 08
2
[PATCH v10 09/19] qspinlock: Prepare for unfair lock support
...)
> +static __always_inline int get_qlock(struct qspinlock *lock)
> {
> struct __qspinlock *l = (void *)lock;
>
> barrier();
> ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL;
> barrier();
> + return 1;
> }
and here you make a horribly named function more horrible;
try_set_locked() is that its now.
2014 May 30
19
[PATCH v11 00/16] qspinlock: a 4-byte queue spinlock with PV support
v10->v11:
- Use a simple test-and-set unfair lock to simplify the code,
but performance may suffer a bit for large guest with many CPUs.
- Take out Raghavendra KT's test results as the unfair lock changes
may render some of his results invalid.
- Add PV support without increasing the size of the core queue node
structure.
- Other minor changes to address some of the
2014 May 30
19
[PATCH v11 00/16] qspinlock: a 4-byte queue spinlock with PV support
v10->v11:
- Use a simple test-and-set unfair lock to simplify the code,
but performance may suffer a bit for large guest with many CPUs.
- Take out Raghavendra KT's test results as the unfair lock changes
may render some of his results invalid.
- Add PV support without increasing the size of the core queue node
structure.
- Other minor changes to address some of the
2014 May 10
0
[PATCH v10 09/19] qspinlock: Prepare for unfair lock support
...struct qspinlock *lock)
>> {
>> struct __qspinlock *l = (void *)lock;
>>
>> barrier();
>> ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL;
>> barrier();
>> + return 1;
>> }
> and here you make a horribly named function more horrible;
> try_set_locked() is that its now.
Will do.
-Longman
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...paths for this case? Are you worried about the upper 24bits?
> diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
> index ae1b19d..3723c83 100644
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -217,6 +217,14 @@ static __always_inline int try_set_locked(struct qspinlock *lock)
> {
> struct __qspinlock *l = (void *)lock;
>
> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> + /*
> + * Need to use atomic operation to grab the lock when lock stealing
> + * can happen.
> + */
> + if (static_key_false(&virt_unfairlocks_enabled))...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...paths for this case? Are you worried about the upper 24bits?
> diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
> index ae1b19d..3723c83 100644
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -217,6 +217,14 @@ static __always_inline int try_set_locked(struct qspinlock *lock)
> {
> struct __qspinlock *l = (void *)lock;
>
> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> + /*
> + * Need to use atomic operation to grab the lock when lock stealing
> + * can happen.
> + */
> + if (static_key_false(&virt_unfairlocks_enabled))...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...h and paravirt spinlock in the slowpath.
>> diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
>> index ae1b19d..3723c83 100644
>> --- a/kernel/locking/qspinlock.c
>> +++ b/kernel/locking/qspinlock.c
>> @@ -217,6 +217,14 @@ static __always_inline int try_set_locked(struct qspinlock *lock)
>> {
>> struct __qspinlock *l = (void *)lock;
>>
>> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
>> + /*
>> + * Need to use atomic operation to grab the lock when lock stealing
>> + * can happen.
>> + */
>> + if (static_ke...
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...enabled\n");
+
+ return 0;
+}
+early_initcall(unfair_locks_init_jump);
+
+#endif
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ae1b19d..3723c83 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -217,6 +217,14 @@ static __always_inline int try_set_locked(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
+#ifdef CONFIG_VIRT_UNFAIR_LOCKS
+ /*
+ * Need to use atomic operation to grab the lock when lock stealing
+ * can happen.
+ */
+ if (static_key_false(&virt_unfairlocks_enabled))
+ return cmpxchg(&l->locked, 0, _Q_L...
2014 May 30
0
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...avirt_spinlocks_enabled)
+# define MAX_NODES 8
+# define pv_qspinlock_enabled() static_key_false(¶virt_spinlocks_enabled)
#else
-#define pv_qspinlock_enabled() false
+# define MAX_NODES 4
+# define pv_qspinlock_enabled() false
#endif
/*
@@ -243,6 +248,22 @@ static __always_inline int try_set_locked(struct qspinlock *lock)
return 1;
}
+/*
+ * Para-virtualization (PV) queue spinlock support
+ */
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/pvqspinlock.h>
+#else
+static inline void pv_init_vars(struct mcs_spinlock *mcs, int cpu) {}
+static inline void pv_head_spin_check(struct mcs...
2014 May 07
32
[PATCH v10 00/19] qspinlock: a 4-byte queue spinlock with PV support
v9->v10:
- Make some minor changes to qspinlock.c to accommodate review feedback.
- Change author to PeterZ for 2 of the patches.
- Include Raghavendra KT's test results in patch 18.
v8->v9:
- Integrate PeterZ's version of the queue spinlock patch with some
modification:
http://lkml.kernel.org/r/20140310154236.038181843 at infradead.org
- Break the more complex
2014 May 07
32
[PATCH v10 00/19] qspinlock: a 4-byte queue spinlock with PV support
v9->v10:
- Make some minor changes to qspinlock.c to accommodate review feedback.
- Change author to PeterZ for 2 of the patches.
- Include Raghavendra KT's test results in patch 18.
v8->v9:
- Integrate PeterZ's version of the queue spinlock patch with some
modification:
http://lkml.kernel.org/r/20140310154236.038181843 at infradead.org
- Break the more complex