Displaying 20 results from an estimated 23 matches for "static_branch_likely".
Did you mean:
static_branch_unlikely
2017 Sep 06
2
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...nlock *lock)
> #endif
>
> #ifdef CONFIG_PARAVIRT
> +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
> +
> +void native_pv_lock_init(void) __init;
> +
> #define virt_spin_lock virt_spin_lock
> static inline bool virt_spin_lock(struct qspinlock *lock)
> {
> + if (!static_branch_likely(&virt_spin_lock_key))
> + return false;
> if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> return false;
>
> @@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>
> return true;
> }
> +#else
> +static inline void native_pv_lock_i...
2017 Sep 06
2
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...nlock *lock)
> #endif
>
> #ifdef CONFIG_PARAVIRT
> +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
> +
> +void native_pv_lock_init(void) __init;
> +
> #define virt_spin_lock virt_spin_lock
> static inline bool virt_spin_lock(struct qspinlock *lock)
> {
> + if (!static_branch_likely(&virt_spin_lock_key))
> + return false;
> if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> return false;
>
> @@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>
> return true;
> }
> +#else
> +static inline void native_pv_lock_i...
2017 Sep 06
1
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
On 09/06/2017 12:04 PM, Peter Zijlstra wrote:
> On Wed, Sep 06, 2017 at 11:49:49AM -0400, Waiman Long wrote:
>>> #define virt_spin_lock virt_spin_lock
>>> static inline bool virt_spin_lock(struct qspinlock *lock)
>>> {
>>> + if (!static_branch_likely(&virt_spin_lock_key))
>>> + return false;
>>> if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
>>> return false;
>>>
> Now native has two NOPs instead of one. Can't we merge these two static
> branches?
I guess we can remove the static_cpu_has...
2017 Sep 06
1
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
On 09/06/2017 12:04 PM, Peter Zijlstra wrote:
> On Wed, Sep 06, 2017 at 11:49:49AM -0400, Waiman Long wrote:
>>> #define virt_spin_lock virt_spin_lock
>>> static inline bool virt_spin_lock(struct qspinlock *lock)
>>> {
>>> + if (!static_branch_likely(&virt_spin_lock_key))
>>> + return false;
>>> if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
>>> return false;
>>>
> Now native has two NOPs instead of one. Can't we merge these two static
> branches?
I guess we can remove the static_cpu_has...
2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V2:
- use static key instead of making virt_spin_lock() a pvops function
Juergen Gross
2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V2:
- use static key instead of making virt_spin_lock() a pvops function
Juergen Gross
2017 Sep 06
0
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
On Wed, Sep 06, 2017 at 11:49:49AM -0400, Waiman Long wrote:
> > #define virt_spin_lock virt_spin_lock
> > static inline bool virt_spin_lock(struct qspinlock *lock)
> > {
> > + if (!static_branch_likely(&virt_spin_lock_key))
> > + return false;
> > if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> > return false;
> >
Now native has two NOPs instead of one. Can't we merge these two static
branches?
2017 Sep 06
0
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...15 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
#endif
#ifdef CONFIG_PARAVIRT
+DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void native_pv_lock_init(void) __init;
+
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
+ if (!static_branch_likely(&virt_spin_lock_key))
+ return false;
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
@@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
+#else
+static inline void native_pv_lock_init(void)
+{
+}
#endif /* CONFIG_PARAVIRT */
#...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...o, 1);
+ }
+}
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ native_flush_tlb_multi(cpumask, info);
}
/*
@@ -774,10 +816,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
{
int this_cpu = smp_processor_id();
+ if (static_branch_likely(&flush_tlb_multi_enabled)) {
+ flush_tlb_multi(cpumask, info);
+ return;
+ }
+
if (cpumask_test_cpu(this_cpu, cpumask)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local((__force void *)info);
local_...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...o, 1);
+ }
+}
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ native_flush_tlb_multi(cpumask, info);
}
/*
@@ -774,10 +816,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
{
int this_cpu = smp_processor_id();
+ if (static_branch_likely(&flush_tlb_multi_enabled)) {
+ flush_tlb_multi(cpumask, info);
+ return;
+ }
+
if (cpumask_test_cpu(this_cpu, cpumask)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local((__force void *)info);
local_...
2017 Sep 06
5
[PATCH v3 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V3:
- remove test for hypervisor environment from virt_spin_lock(9 as
suggested by
2017 Sep 06
5
[PATCH v3 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V3:
- remove test for hypervisor environment from virt_spin_lock(9 as
suggested by
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...uct cpumask *cpumask,
> + const struct flush_tlb_info *info)
> +{
> + native_flush_tlb_multi(cpumask, info);
> }
>
> /*
> @@ -774,10 +816,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
> {
> int this_cpu = smp_processor_id();
>
> + if (static_branch_likely(&flush_tlb_multi_enabled)) {
> + flush_tlb_multi(cpumask, info);
> + return;
> + }
Probably needs a comment for posterity above the if()^^:
/* Use the optimized flush_tlb_multi() where we can. */
> --- a/arch/x86/xen/mmu_pv.c
> +++ b/arch/x86/xen/mmu_pv.c
> @@ -2474,6 +2...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *info)
>> +{
>> + native_flush_tlb_multi(cpumask, info);
>> }
>>
>> /*
>> @@ -774,10 +816,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
>> {
>> int this_cpu = smp_processor_id();
>>
>> + if (static_branch_likely(&flush_tlb_multi_enabled)) {
>> + flush_tlb_multi(cpumask, info);
>> + return;
>> + }
>
> Probably needs a comment for posterity above the if()^^:
>
> /* Use the optimized flush_tlb_multi() where we can. */
Right.
>
>> --- a/arch/x86/xen/mmu_pv.c
&...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *info)
>> +{
>> + native_flush_tlb_multi(cpumask, info);
>> }
>>
>> /*
>> @@ -774,10 +816,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
>> {
>> int this_cpu = smp_processor_id();
>>
>> + if (static_branch_likely(&flush_tlb_multi_enabled)) {
>> + flush_tlb_multi(cpumask, info);
>> + return;
>> + }
>
> Probably needs a comment for posterity above the if()^^:
>
> /* Use the optimized flush_tlb_multi() where we can. */
Right.
>
>> --- a/arch/x86/xen/mmu_pv.c
&...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...o, 1);
+ }
+}
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ native_flush_tlb_multi(cpumask, info);
}
/*
@@ -773,10 +824,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
{
int this_cpu = smp_processor_id();
+ if (static_branch_likely(&flush_tlb_multi_enabled)) {
+ flush_tlb_multi(cpumask, info);
+ return;
+ }
+
if (cpumask_test_cpu(this_cpu, cpumask)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local((__force void *)info);
local_...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...o, 1);
+ }
+}
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ native_flush_tlb_multi(cpumask, info);
}
/*
@@ -773,10 +824,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
{
int this_cpu = smp_processor_id();
+ if (static_branch_likely(&flush_tlb_multi_enabled)) {
+ flush_tlb_multi(cpumask, info);
+ return;
+ }
+
if (cpumask_test_cpu(this_cpu, cpumask)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local((__force void *)info);
local_...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...o, 1);
+ }
+}
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ native_flush_tlb_multi(cpumask, info);
}
/*
@@ -773,10 +824,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
{
int this_cpu = smp_processor_id();
+ if (static_branch_likely(&flush_tlb_multi_enabled)) {
+ flush_tlb_multi(cpumask, info);
+ return;
+ }
+
if (cpumask_test_cpu(this_cpu, cpumask)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local((__force void *)info);
local_...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...o, 1);
+ }
+}
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ native_flush_tlb_multi(cpumask, info);
}
/*
@@ -773,10 +824,15 @@ static void flush_tlb_on_cpus(const cpumask_t *cpumask,
{
int this_cpu = smp_processor_id();
+ if (static_branch_likely(&flush_tlb_multi_enabled)) {
+ flush_tlb_multi(cpumask, info);
+ return;
+ }
+
if (cpumask_test_cpu(this_cpu, cpumask)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local((__force void *)info);
local_...
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to