Displaying 14 results from an estimated 14 matches for "native_virt_spin_lock".
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...dex 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
> smp_store_release((u8 *)lock, 0);
> }
>
> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
> + /*
> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
> + * back to a Test-and-Set spinlock, because fair locks have
> + * horrible lock 'holder'...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...dex 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
> smp_store_release((u8 *)lock, 0);
> }
>
> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
> + /*
> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
> + * back to a Test-and-Set spinlock, because fair locks have
> + * horrible lock 'holder'...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...h/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
> smp_store_release((u8 *)lock, 0);
> }
>
Should this not have:
#ifdef CONFIG_PARAVIRT
?
> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
> + /*
> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
> + * back to a Test-and-Set spinlock, because fair locks have
> + * horrible lock 'holder'...
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...h/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
> smp_store_release((u8 *)lock, 0);
> }
>
Should this not have:
#ifdef CONFIG_PARAVIRT
?
> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
> + /*
> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
> + * back to a Test-and-Set spinlock, because fair locks have
> + * horrible lock 'holder'...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...dex 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
> smp_store_release((u8 *)lock, 0);
> }
>
> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
I think you can take the above if statement out as you has done test in
native_pv_lock_init(). So the test will also be false here.
As this patch series is x86 specific, you should pro...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...dex 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
> smp_store_release((u8 *)lock, 0);
> }
>
> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
I think you can take the above if statement out as you has done test in
native_pv_lock_init(). So the test will also be false here.
As this patch series is x86 specific, you should pro...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...5c 100644
>> --- a/arch/x86/include/asm/qspinlock.h
>> +++ b/arch/x86/include/asm/qspinlock.h
>> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
>> smp_store_release((u8 *)lock, 0);
>> }
>>
>> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
>> +{
>> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
>> + return false;
>> +
>> + /*
>> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
>> + * back to a Test-and-Set spinlock, because fair locks have
>> + *...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...arch/x86/include/asm/qspinlock.h
index 48a706f641f2..fbd98896385c 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
smp_store_release((u8 *)lock, 0);
}
+static inline bool native_virt_spin_lock(struct qspinlock *lock)
+{
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ return false;
+
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+ do {
+ whi...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
.../arch/x86/include/asm/qspinlock.h
>>> +++ b/arch/x86/include/asm/qspinlock.h
>>> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
>>> smp_store_release((u8 *)lock, 0);
>>> }
>>>
>>> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
>>> +{
>>> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
>>> + return false;
>>> +
>>> + /*
>>> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
>>> + * back to a Test-and-Set spinlock, because f...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
.../arch/x86/include/asm/qspinlock.h
>>> +++ b/arch/x86/include/asm/qspinlock.h
>>> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
>>> smp_store_release((u8 *)lock, 0);
>>> }
>>>
>>> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
>>> +{
>>> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
>>> + return false;
>>> +
>>> + /*
>>> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
>>> + * back to a Test-and-Set spinlock, because f...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
On Tue, Sep 05, 2017 at 10:02:57AM -0400, Waiman Long wrote:
> On 09/05/2017 09:24 AM, Juergen Gross wrote:
> > +static inline bool native_virt_spin_lock(struct qspinlock *lock)
> > +{
> > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> > + return false;
> > +
>
> I think you can take the above if statement out as you has done test in
> native_pv_lock_init(). So the test will also be false here.
That does mean we...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...pinlock.h
>>>> +++ b/arch/x86/include/asm/qspinlock.h
>>>> @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
>>>> smp_store_release((u8 *)lock, 0);
>>>> }
>>>>
>>>> +static inline bool native_virt_spin_lock(struct qspinlock *lock)
>>>> +{
>>>> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
>>>> + return false;
>>>> +
>>>> + /*
>>>> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
>>>> + * back to a Test...