search for: native_pv_lock_init

Displaying 20 results from an estimated 39 matches for "native_pv_lock_init".

2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. V2: - use static key instead of making virt_spin_lock() a pvops function Juergen Gross
2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. V2: - use static key instead of making virt_spin_lock() a pvops function Juergen Gross
2017 Sep 06
2
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...> > #include <asm-generic/qspinlock_types.h> > #include <asm/paravirt.h> > @@ -46,9 +47,15 @@ static inline void queued_spin_unlock(struct qspinlock *lock) > #endif > > #ifdef CONFIG_PARAVIRT > +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); > + > +void native_pv_lock_init(void) __init; > + > #define virt_spin_lock virt_spin_lock > static inline bool virt_spin_lock(struct qspinlock *lock) > { > + if (!static_branch_likely(&virt_spin_lock_key)) > + return false; > if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > return false; >...
2017 Sep 06
2
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...> > #include <asm-generic/qspinlock_types.h> > #include <asm/paravirt.h> > @@ -46,9 +47,15 @@ static inline void queued_spin_unlock(struct qspinlock *lock) > #endif > > #ifdef CONFIG_PARAVIRT > +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); > + > +void native_pv_lock_init(void) __init; > + > #define virt_spin_lock virt_spin_lock > static inline bool virt_spin_lock(struct qspinlock *lock) > { > + if (!static_branch_likely(&virt_spin_lock_key)) > + return false; > if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > return false; >...
2017 Sep 06
0
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...mp_label.h> #include <asm/cpufeature.h> #include <asm-generic/qspinlock_types.h> #include <asm/paravirt.h> @@ -46,9 +47,15 @@ static inline void queued_spin_unlock(struct qspinlock *lock) #endif #ifdef CONFIG_PARAVIRT +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); + +void native_pv_lock_init(void) __init; + #define virt_spin_lock virt_spin_lock static inline bool virt_spin_lock(struct qspinlock *lock) { + if (!static_branch_likely(&virt_spin_lock_key)) + return false; if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return false; @@ -65,6 +72,10 @@ static inline bool virt_sp...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...ock *lock) > smp_store_release((u8 *)lock, 0); > } > > +static inline bool native_virt_spin_lock(struct qspinlock *lock) > +{ > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > + return false; > + I think you can take the above if statement out as you has done test in native_pv_lock_init(). So the test will also be false here. As this patch series is x86 specific, you should probably add "x86/" in front of paravirt in the patch titles. Cheers, Longman
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...ock *lock) > smp_store_release((u8 *)lock, 0); > } > > +static inline bool native_virt_spin_lock(struct qspinlock *lock) > +{ > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > + return false; > + I think you can take the above if statement out as you has done test in native_pv_lock_init(). So the test will also be false here. As this patch series is x86 specific, you should probably add "x86/" in front of paravirt in the patch titles. Cheers, Longman
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...!= 0); + + return true; +} + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) { return pv_vcpu_is_preempted(cpu); } + +void native_pv_lock_init(void) __init; #else static inline void queued_spin_unlock(struct qspinlock *lock) { native_queued_spin_unlock(lock); } + +static inline void native_pv_lock_init(void) +{ +} #endif #ifdef CONFIG_PARAVIRT #define virt_spin_lock virt_spin_lock +#ifdef CONFIG_PARAVIRT_SPINLOCKS static inlin...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...pin_lock_slowpath(struct qspinlock *lock, u32 val); >>> extern void __pv_init_lock_hash(void); >>> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) >>> { >>> return pv_vcpu_is_preempted(cpu); >>> } >>> + >>> +void native_pv_lock_init(void) __init; >>> #else >>> static inline void queued_spin_unlock(struct qspinlock *lock) >>> { >>> native_queued_spin_unlock(lock); >>> } >>> + >>> +static inline void native_pv_lock_init(void) >>> +{ >>> +} &gt...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...pin_lock_slowpath(struct qspinlock *lock, u32 val); >>> extern void __pv_init_lock_hash(void); >>> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) >>> { >>> return pv_vcpu_is_preempted(cpu); >>> } >>> + >>> +void native_pv_lock_init(void) __init; >>> #else >>> static inline void queued_spin_unlock(struct qspinlock *lock) >>> { >>> native_queued_spin_unlock(lock); >>> } >>> + >>> +static inline void native_pv_lock_init(void) >>> +{ >>> +} &gt...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...FIG_PARAVIRT_SPINLOCKS > extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > extern void __pv_init_lock_hash(void); > @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) > { > return pv_vcpu_is_preempted(cpu); > } > + > +void native_pv_lock_init(void) __init; > #else > static inline void queued_spin_unlock(struct qspinlock *lock) > { > native_queued_spin_unlock(lock); > } > + > +static inline void native_pv_lock_init(void) > +{ > +} > #endif > > #ifdef CONFIG_PARAVIRT > #define virt_spin_lo...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...FIG_PARAVIRT_SPINLOCKS > extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > extern void __pv_init_lock_hash(void); > @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) > { > return pv_vcpu_is_preempted(cpu); > } > + > +void native_pv_lock_init(void) __init; > #else > static inline void queued_spin_unlock(struct qspinlock *lock) > { > native_queued_spin_unlock(lock); > } > + > +static inline void native_pv_lock_init(void) > +{ > +} > #endif > > #ifdef CONFIG_PARAVIRT > #define virt_spin_lo...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. Juergen Gross (4): paravirt: add generic _paravirt_false() function paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. Juergen Gross (4): paravirt: add generic _paravirt_false() function paravirt: switch
2017 Sep 06
5
[PATCH v3 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. V3: - remove test for hypervisor environment from virt_spin_lock(9 as suggested by
2017 Sep 06
5
[PATCH v3 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. V3: - remove test for hypervisor environment from virt_spin_lock(9 as suggested by
2017 Sep 06
1
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...urn false; >>> if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) >>> return false; >>> > Now native has two NOPs instead of one. Can't we merge these two static > branches? I guess we can remove the static_cpu_has() call. Just that any spin_lock calls before native_pv_lock_init() will use the virt_spin_lock(). That is still OK as the init call is done before SMP starts. Cheers, Longman
2017 Sep 06
1
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...urn false; >>> if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) >>> return false; >>> > Now native has two NOPs instead of one. Can't we merge these two static > branches? I guess we can remove the static_cpu_has() call. Just that any spin_lock calls before native_pv_lock_init() will use the virt_spin_lock(). That is still OK as the init call is done before SMP starts. Cheers, Longman
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...inlock *lock, u32 val); >>>> extern void __pv_init_lock_hash(void); >>>> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) >>>> { >>>> return pv_vcpu_is_preempted(cpu); >>>> } >>>> + >>>> +void native_pv_lock_init(void) __init; >>>> #else >>>> static inline void queued_spin_unlock(struct qspinlock *lock) >>>> { >>>> native_queued_spin_unlock(lock); >>>> } >>>> + >>>> +static inline void native_pv_lock_init(void) >&gt...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); >> extern void __pv_init_lock_hash(void); >> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) >> { >> return pv_vcpu_is_preempted(cpu); >> } >> + >> +void native_pv_lock_init(void) __init; >> #else >> static inline void queued_spin_unlock(struct qspinlock *lock) >> { >> native_queued_spin_unlock(lock); >> } >> + >> +static inline void native_pv_lock_init(void) >> +{ >> +} >> #endif >> >> #...