search for: queued_spin_unlock

Displaying 20 results from an estimated 120 matches for "queued_spin_unlock".

Did you mean: queue_spin_unlock
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...oid (*kick)(int cpu); - bool (*vcpu_is_preempted)(int cpu); + struct paravirt_callee_save vcpu_is_preempted; }; /* This contains all the paravirt structures: we get a convenient --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -32,6 +32,12 @@ static inline void queued_spin_unlock(st { pv_queued_spin_unlock(lock); } + +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + return pv_vcpu_is_preempted(cpu); +} #else static inline void queued_spin_unlock(struct qspinlock *lock) { --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...oid (*kick)(int cpu); - bool (*vcpu_is_preempted)(int cpu); + struct paravirt_callee_save vcpu_is_preempted; }; /* This contains all the paravirt structures: we get a convenient --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -32,6 +32,12 @@ static inline void queued_spin_unlock(st { pv_queued_spin_unlock(lock); } + +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + return pv_vcpu_is_preempted(cpu); +} #else static inline void queued_spin_unlock(struct qspinlock *lock) { --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...vcpu_is_preempted)(int cpu); > + struct paravirt_callee_save vcpu_is_preempted; > }; > > /* This contains all the paravirt structures: we get a convenient > --- a/arch/x86/include/asm/qspinlock.h > +++ b/arch/x86/include/asm/qspinlock.h > @@ -32,6 +32,12 @@ static inline void queued_spin_unlock(st > { > pv_queued_spin_unlock(lock); > } > + > +#define vcpu_is_preempted vcpu_is_preempted > +static inline bool vcpu_is_preempted(int cpu) > +{ > + return pv_vcpu_is_preempted(cpu); > +} > #else > static inline void queued_spin_unlock(struct qspinlock *lock...
2018 Sep 10
2
[PATCH] x86/paravirt: Cleanup native_patch()
On Mon, Sep 10, 2018 at 08:54:12AM +0200, Juergen Gross wrote: > > + case PARAVIRT_PATCH(lock.queued_spin_unlock): > > + if (pv_is_native_spin_unlock()) > > + return paravirt_patch_insns(ibuf, len, > > + start_lock_queued_spin_unlock, > > + end_lock_queued_spin_unlock); > > + else > > + return paravirt_patch_default(type, ibuf, addr, len); > >...
2018 Sep 10
2
[PATCH] x86/paravirt: Cleanup native_patch()
On Mon, Sep 10, 2018 at 08:54:12AM +0200, Juergen Gross wrote: > > + case PARAVIRT_PATCH(lock.queued_spin_unlock): > > + if (pv_is_native_spin_unlock()) > > + return paravirt_patch_insns(ibuf, len, > > + start_lock_queued_spin_unlock, > > + end_lock_queued_spin_unlock); > > + else > > + return paravirt_patch_default(type, ibuf, addr, len); > >...
2018 Sep 08
2
[PATCH] x86/paravirt: Cleanup native_patch()
...d_##ops##_##x) + switch (type) { #ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, irq_disable); @@ -54,32 +50,26 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(mmu, write_cr3); #endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_lock_queued_spin_unlock; - end = end_lock_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.queued_spin_unlock): + if (pv_is_native_spin_unlock()) + return paravirt_patch_insns(ibuf,...
2018 Sep 08
2
[PATCH] x86/paravirt: Cleanup native_patch()
...d_##ops##_##x) + switch (type) { #ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, irq_disable); @@ -54,32 +50,26 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(mmu, write_cr3); #endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_lock_queued_spin_unlock; - end = end_lock_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.queued_spin_unlock): + if (pv_is_native_spin_unlock()) + return paravirt_patch_insns(ibuf,...
2020 Jul 05
1
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...l) > +{ > + if (!is_shared_processor()) > + native_queued_spin_lock_slowpath(lock, val); > + else > + __pv_queued_spin_lock_slowpath(lock, val); > +} In a previous mail, I said that: You may need to match the use of __pv_queued_spin_lock_slowpath() with the corresponding __pv_queued_spin_unlock(), e.g. #define queued_spin_unlock queued_spin_unlock static inline queued_spin_unlock(struct qspinlock *lock) { ??????? if (!is_shared_processor()) ??????????????? smp_store_release(&lock->locked, 0); ??????? else ??????????????? __pv_queued_spin_unlock(lock); } Otherwise, pv_kick() w...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
On Mon, Dec 05, 2016 at 10:19:21AM -0500, Pan Xinhui wrote: > This patch add basic code to enable qspinlock on powerpc. qspinlock is > one kind of fairlock implementation. And seen some performance improvement > under some scenarios. > > queued_spin_unlock() release the lock by just one write of NULL to the > ::locked field which sits at different places in the two endianness > system. > > We override some arch_spin_XXX as powerpc has io_sync stuff which makes > sure the io operations are protected by the lock correctly. > > Th...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
On Mon, Dec 05, 2016 at 10:19:21AM -0500, Pan Xinhui wrote: > This patch add basic code to enable qspinlock on powerpc. qspinlock is > one kind of fairlock implementation. And seen some performance improvement > under some scenarios. > > queued_spin_unlock() release the lock by just one write of NULL to the > ::locked field which sits at different places in the two endianness > system. > > We override some arch_spin_XXX as powerpc has io_sync stuff which makes > sure the io operations are protected by the lock correctly. > > Th...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...c83cd2 > --- /dev/null > +++ b/arch/powerpc/include/asm/qspinlock.h > @@ -0,0 +1,26 @@ > +#ifndef _ASM_POWERPC_QSPINLOCK_H > +#define _ASM_POWERPC_QSPINLOCK_H > + > +#include <asm-generic/qspinlock_types.h> > + > +#define SPIN_THRESHOLD (1 << 15) > +#define queued_spin_unlock queued_spin_unlock > + > +static inline void native_queued_spin_unlock(struct qspinlock *lock) > +{ > + u8 *locked = (u8 *)lock; > +#ifdef __BIG_ENDIAN > + locked += 3; > +#endif > + /* no load/store can be across the unlock()*/ > + smp_store_release(locked, 0); > +} &...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...c83cd2 > --- /dev/null > +++ b/arch/powerpc/include/asm/qspinlock.h > @@ -0,0 +1,26 @@ > +#ifndef _ASM_POWERPC_QSPINLOCK_H > +#define _ASM_POWERPC_QSPINLOCK_H > + > +#include <asm-generic/qspinlock_types.h> > + > +#define SPIN_THRESHOLD (1 << 15) > +#define queued_spin_unlock queued_spin_unlock > + > +static inline void native_queued_spin_unlock(struct qspinlock *lock) > +{ > + u8 *locked = (u8 *)lock; > +#ifdef __BIG_ENDIAN > + locked += 3; > +#endif > + /* no load/store can be across the unlock()*/ > + smp_store_release(locked, 0); > +} &...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
This patch add basic code to enable qspinlock on powerpc. qspinlock is one kind of fairlock implementation. And seen some performance improvement under some scenarios. queued_spin_unlock() release the lock by just one write of NULL to the ::locked field which sits at different places in the two endianness system. We override some arch_spin_XXX as powerpc has io_sync stuff which makes sure the io operations are protected by the lock correctly. There is another special case, see co...
2016 Dec 14
1
[PATCH] arch: x86: kernel: fixed unused label issue
The patch_default label is only used from within case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock) and case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted) i.e. when #if defined(CONFIG_PARAVIRT_SPINLOCKS) is true. Therefore no code jumps to this label in case CONFIG_PARAVIRT_SPINLOCKS is not defined and label should be removed in that case. Moving #endif directive just after that label fixes the...
2016 Dec 14
1
[PATCH] arch: x86: kernel: fixed unused label issue
The patch_default label is only used from within case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock) and case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted) i.e. when #if defined(CONFIG_PARAVIRT_SPINLOCKS) is true. Therefore no code jumps to this label in case CONFIG_PARAVIRT_SPINLOCKS is not defined and label should be removed in that case. Moving #endif directive just after that label fixes the...
2017 Oct 25
0
[PATCH 03/13] x86/paravirt: Convert native patch assembly code strings to macros
...%cr3, " _REG_RET > +#define NATIVE_WRITE_CR3 "mov " _REG_ARG1 ", %cr3" > +#define NATIVE_FLUSH_TLB_SINGLE "invlpg (" _REG_ARG1 ")" > +#define NATIVE_SWAPGS "swapgs" > +#define NATIVE_IRET "iret" > +#define NATIVE_QUEUED_SPIN_UNLOCK "movb $0, (" _REG_ARG1 ")" > + > /* > * Volatile isn't enough to prevent the compiler from reordering the > * read/write functions for the control registers and messing everything up. > diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/pa...
2018 Sep 11
1
[PATCH v2] x86/paravirt: Cleanup native_patch()
...d_##ops##_##x) + switch (type) { #ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(irq, irq_disable); @@ -54,32 +50,24 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(mmu, write_cr3); #endif #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_lock_queued_spin_unlock; - end = end_lock_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(lock.queued_spin_unlock): + if (pv_is_native_spin_unlock()) + return paravirt_patch_insns(ibuf,...
2017 Oct 04
1
[PATCH 03/13] x86/paravirt: Convert native patch assembly code strings to macros
...TIVE_READ_CR3 "mov %cr3, " _REG_RET +#define NATIVE_WRITE_CR3 "mov " _REG_ARG1 ", %cr3" +#define NATIVE_FLUSH_TLB_SINGLE "invlpg (" _REG_ARG1 ")" +#define NATIVE_SWAPGS "swapgs" +#define NATIVE_IRET "iret" +#define NATIVE_QUEUED_SPIN_UNLOCK "movb $0, (" _REG_ARG1 ")" + /* * Volatile isn't enough to prevent the compiler from reordering the * read/write functions for the control registers and messing everything up. diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 5...
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...clude <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_queued_spin_lock_slowpath(lock, val); +} + +#define queued_spin_unlock queued_spin_unloc...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
.../qspinlock.h new file mode 100644 index 0000000..fc83cd2 --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock.h @@ -0,0 +1,26 @@ +#ifndef _ASM_POWERPC_QSPINLOCK_H +#define _ASM_POWERPC_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> + +#define SPIN_THRESHOLD (1 << 15) +#define queued_spin_unlock queued_spin_unlock + +static inline void native_queued_spin_unlock(struct qspinlock *lock) +{ + u8 *locked = (u8 *)lock; +#ifdef __BIG_ENDIAN + locked += 3; +#endif + /* no load/store can be across the unlock()*/ + smp_store_release(locked, 0); +} + +static inline void queued_spin_unlock(struct qsp...