Displaying 13 results from an estimated 13 matches for "pv_virt_spin_lock".
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...df 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
> }
>
> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
> +{
> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
> +}
> +
> #endif /* SMP && PARAVIRT_SPINLOCKS */
>
> #ifdef CONFIG_X86_32
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
&...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...df 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
> }
>
> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
> +{
> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
> +}
> +
> #endif /* SMP && PARAVIRT_SPINLOCKS */
>
> #ifdef CONFIG_X86_32
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
&...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...rt.h
>>> +++ b/arch/x86/include/asm/paravirt.h
>>> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
>>> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
>>> }
>>>
>>> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
>>> +{
>>> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
>>> +}
>>> +
>>> #endif /* SMP && PARAVIRT_SPINLOCKS */
>>>
>>> #ifdef CONFIG_X86_32
>>> diff --git a/arch/x86/includ...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...rt.h
>>> +++ b/arch/x86/include/asm/paravirt.h
>>> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
>>> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
>>> }
>>>
>>> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
>>> +{
>>> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
>>> +}
>>> +
>>> #endif /* SMP && PARAVIRT_SPINLOCKS */
>>>
>>> #ifdef CONFIG_X86_32
>>> diff --git a/arch/x86/includ...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...h/x86/include/asm/paravirt.h
>> +++ b/arch/x86/include/asm/paravirt.h
>> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
>> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
>> }
>>
>> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
>> +{
>> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
>> +}
>> +
>> #endif /* SMP && PARAVIRT_SPINLOCKS */
>>
>> #ifdef CONFIG_X86_32
>> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x8...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...rt.h
index c25dd22f7c70..d9e954fb37df 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
+static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
+{
+ return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
+}
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 19efefc0e27e..928f5e7953a7 100644
-...
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...d_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> extern void __pv_init_lock_hash(void);
> #ifdef CONFIG_PARAVIRT
> #define virt_spin_lock virt_spin_lock
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> static inline bool virt_spin_lock(struct qspinlock *lock)
> {
> + return pv_virt_spin_lock(lock);
> +}
> +#else
> +static inline bool virt_spin_lock(struct qspinlock *lock)
> +{
> + return native_virt_spin_lock(lock);
> }
> +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
> #endif /* CONFIG_PARAVIRT */
Because I think the above only ever uses native_virt_spin_lock() whe...
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...d_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> extern void __pv_init_lock_hash(void);
> #ifdef CONFIG_PARAVIRT
> #define virt_spin_lock virt_spin_lock
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> static inline bool virt_spin_lock(struct qspinlock *lock)
> {
> + return pv_virt_spin_lock(lock);
> +}
> +#else
> +static inline bool virt_spin_lock(struct qspinlock *lock)
> +{
> + return native_virt_spin_lock(lock);
> }
> +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
> #endif /* CONFIG_PARAVIRT */
Because I think the above only ever uses native_virt_spin_lock() whe...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...+ b/arch/x86/include/asm/paravirt.h
>>>> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
>>>> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
>>>> }
>>>>
>>>> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
>>>> +{
>>>> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
>>>> +}
>>>> +
>>>> #endif /* SMP && PARAVIRT_SPINLOCKS */
>>>>
>>>> #ifdef CONFIG_X86_32
>>>&...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...df 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
> }
>
> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
> +{
> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
> +}
> +
> #endif /* SMP && PARAVIRT_SPINLOCKS */
>
> #ifdef CONFIG_X86_32
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
&...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...df 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -725,6 +725,11 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
> return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
> }
>
> +static __always_inline bool pv_virt_spin_lock(struct qspinlock *lock)
> +{
> + return PVOP_CALLEE1(bool, pv_lock_ops.virt_spin_lock, lock);
> +}
> +
> #endif /* SMP && PARAVIRT_SPINLOCKS */
>
> #ifdef CONFIG_X86_32
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
&...