Displaying 20 results from an estimated 354 matches for "config_paravirt_spinlocks".
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
On Fri, May 30, 2014 at 11:44:00AM -0400, Waiman Long wrote:
> @@ -19,13 +19,46 @@ extern struct static_key virt_unfairlocks_enabled;
> * that the clearing the lock bit is done ASAP without artificial delay
> * due to compiler optimization.
> */
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +static __always_inline void __queue_spin_unlock(struct qspinlock *lock)
> +#else
> static inline void queue_spin_unlock(struct qspinlock *lock)
> +#endif
> {
> barrier();
> ACCESS_ONCE(*(u8 *)lock) = 0;
> barrier();
> }
>
> +#ifdef CONFIG_PARAVIRT_SPIN...
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
On Fri, May 30, 2014 at 11:44:00AM -0400, Waiman Long wrote:
> @@ -19,13 +19,46 @@ extern struct static_key virt_unfairlocks_enabled;
> * that the clearing the lock bit is done ASAP without artificial delay
> * due to compiler optimization.
> */
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +static __always_inline void __queue_spin_unlock(struct qspinlock *lock)
> +#else
> static inline void queue_spin_unlock(struct qspinlock *lock)
> +#endif
> {
> barrier();
> ACCESS_ONCE(*(u8 *)lock) = 0;
> barrier();
> }
>
> +#ifdef CONFIG_PARAVIRT_SPIN...
2014 Jun 15
0
[PATCH 10/11] qspinlock: Paravirt support
...x86/include/asm/paravirt.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/paravirt.h
+++ linux-2.6/arch/x86/include/asm/paravirt.h
@@ -712,6 +712,44 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+static __always_inline void pv_init_node(struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE1(pv_lock_ops.init_node, node);
+}
+
+static __always_inline void pv_link_and_wait_node(u32 old, struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE2(pv_lock_ops.link_and_wait_node, old...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
...ng' of SPIN_UNLOCK functions
again.
We further optimize the unlock path by patching the direct call with a
"movb $0,%arg1" if we are indeed using the native unlock code. This
makes the unlock code almost as fast as the !PARAVIRT case.
This significantly lowers the overhead of having
CONFIG_PARAVIRT_SPINLOCKS enabled, even for native code.
Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
---
arch/x86/Kconfig | 2 -
arch/x86/include/asm/paravirt.h | 28 ++++++++++++++++++++-
arch/x86/include/asm/paravirt_types.h | 10 +++++++
arch/x86/include/asm/...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
...ng' of SPIN_UNLOCK functions
again.
We further optimize the unlock path by patching the direct call with a
"movb $0,%arg1" if we are indeed using the native unlock code. This
makes the unlock code almost as fast as the !PARAVIRT case.
This significantly lowers the overhead of having
CONFIG_PARAVIRT_SPINLOCKS enabled, even for native code.
Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
---
arch/x86/Kconfig | 2 -
arch/x86/include/asm/paravirt.h | 28 ++++++++++++++++++++-
arch/x86/include/asm/paravirt_types.h | 10 +++++++
arch/x86/include/asm/...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...have
> + * horrible lock 'holder' preemption issues.
> + */
> +
> + do {
> + while (atomic_read(&lock->val) != 0)
> + cpu_relax();
> + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
> +
> + return true;
> +}
> +
> #ifdef CONFIG_PARAVIRT_SPINLOCKS
> extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> extern void __pv_init_lock_hash(void);
> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu)
> {
> return pv_vcpu_is_preempted(cpu);
> }
> +
> +void native_pv_lock_init(vo...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...have
> + * horrible lock 'holder' preemption issues.
> + */
> +
> + do {
> + while (atomic_read(&lock->val) != 0)
> + cpu_relax();
> + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
> +
> + return true;
> +}
> +
> #ifdef CONFIG_PARAVIRT_SPINLOCKS
> extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> extern void __pv_init_lock_hash(void);
> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu)
> {
> return pv_vcpu_is_preempted(cpu);
> }
> +
> +void native_pv_lock_init(vo...
2016 Dec 14
1
[PATCH] arch: x86: kernel: fixed unused label issue
The patch_default label is only used from within
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock)
and
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted)
i.e. when #if defined(CONFIG_PARAVIRT_SPINLOCKS) is true.
Therefore no code jumps to this label in case CONFIG_PARAVIRT_SPINLOCKS
is not defined and label should be removed in that case.
Moving #endif directive just after that label fixes the issue.
In addition,there are three errors reported by checkpatch script
on this file. This commit fixes...
2016 Dec 14
1
[PATCH] arch: x86: kernel: fixed unused label issue
The patch_default label is only used from within
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock)
and
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted)
i.e. when #if defined(CONFIG_PARAVIRT_SPINLOCKS) is true.
Therefore no code jumps to this label in case CONFIG_PARAVIRT_SPINLOCKS
is not defined and label should be removed in that case.
Moving #endif directive just after that label fixes the issue.
In addition,there are three errors reported by checkpatch script
on this file. This commit fixes...
2014 Oct 27
5
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...rch/x86/kernel/paravirt_patch_64.c
> +++ linux-2.6/arch/x86/kernel/paravirt_patch_64.c
> @@ -22,6 +22,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs")
> DEF_NATIVE(, mov32, "mov %edi, %eax");
> DEF_NATIVE(, mov64, "mov %rdi, %rax");
>
> +#if defined(CONFIG_PARAVIRT_SPINLOCKS)&& defined(CONFIG_QUEUE_SPINLOCK)
> +DEF_NATIVE(pv_lock_ops, queue_unlock, "movb $0, (%rdi)");
> +#endif
> +
> unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
> {
> return paravirt_patch_insns(insnbuf, len,
> @@ -61,6 +65,9 @@ unsig...
2014 Oct 27
5
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...rch/x86/kernel/paravirt_patch_64.c
> +++ linux-2.6/arch/x86/kernel/paravirt_patch_64.c
> @@ -22,6 +22,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs")
> DEF_NATIVE(, mov32, "mov %edi, %eax");
> DEF_NATIVE(, mov64, "mov %rdi, %rax");
>
> +#if defined(CONFIG_PARAVIRT_SPINLOCKS)&& defined(CONFIG_QUEUE_SPINLOCK)
> +DEF_NATIVE(pv_lock_ops, queue_unlock, "movb $0, (%rdi)");
> +#endif
> +
> unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
> {
> return paravirt_patch_insns(insnbuf, len,
> @@ -61,6 +65,9 @@ unsig...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...e
static inline void queued_spin_unlock(struct qspinlock *lock)
{
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,14 +26,6 @@
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(int cpu)
-{
- return pv_lock_ops.vcpu_is_preempted(cpu);
-}
-#endif
-
#include <asm/qspinlock.h>
/*
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -415,15 +415,6 @@ void kvm_disable_steal_time(v...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...e
static inline void queued_spin_unlock(struct qspinlock *lock)
{
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,14 +26,6 @@
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(int cpu)
-{
- return pv_lock_ops.vcpu_is_preempted(cpu);
-}
-#endif
-
#include <asm/qspinlock.h>
/*
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -415,15 +415,6 @@ void kvm_disable_steal_time(v...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...;>> +
>>> + do {
>>> + while (atomic_read(&lock->val) != 0)
>>> + cpu_relax();
>>> + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
>>> +
>>> + return true;
>>> +}
>>> +
>>> #ifdef CONFIG_PARAVIRT_SPINLOCKS
>>> extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
>>> extern void __pv_init_lock_hash(void);
>>> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu)
>>> {
>>> return pv_vcpu_is_preempted(cpu);
>>...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...;>> +
>>> + do {
>>> + while (atomic_read(&lock->val) != 0)
>>> + cpu_relax();
>>> + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
>>> +
>>> + return true;
>>> +}
>>> +
>>> #ifdef CONFIG_PARAVIRT_SPINLOCKS
>>> extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
>>> extern void __pv_init_lock_hash(void);
>>> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu)
>>> {
>>> return pv_vcpu_is_preempted(cpu);
>>...
2014 Jun 28
2
[RFC PATCH v2] Implement Batched (group) ticket lock
...d | TICKET_LOCK_LOCK_INC;
+ if (cmpxchg(&lock->tickets.head, inc.head,
+ new.head) == inc.head)
+ break;
+ }
+ cpu_relax();
+ inc.head = ACCESS_ONCE(lock->tickets.head);
+ }
+ } else {
+ add_smp(&lock->tickets.head, TICKET_LOCK_UNLOCK_INC);
+ }
+}
+
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static inline void __ticket_lock_batch_spin(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
{
@@ -59,6 +95,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
{
}...
2014 Jun 28
2
[RFC PATCH v2] Implement Batched (group) ticket lock
...d | TICKET_LOCK_LOCK_INC;
+ if (cmpxchg(&lock->tickets.head, inc.head,
+ new.head) == inc.head)
+ break;
+ }
+ cpu_relax();
+ inc.head = ACCESS_ONCE(lock->tickets.head);
+ }
+ } else {
+ add_smp(&lock->tickets.head, TICKET_LOCK_UNLOCK_INC);
+ }
+}
+
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static inline void __ticket_lock_batch_spin(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
{
@@ -59,6 +95,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
{
}...
2014 Oct 27
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...smp.h>
#include <linux/bug.h>
#include <linux/cpumask.h>
@@ -271,19 +272,37 @@ void queue_spin_unlock_slowpath(struct qspinlock
*lock)
}
EXPORT_SYMBOL(queue_spin_unlock_slowpath);
-#else
+static void pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+#else /* CONFIG_PARAVIRT_SPINLOCKS */
+
+static inline void pv_queue_spin_lock_slowpath(struct qspinlock *lock,
u32 val)
+ { }
-static inline void pv_init_node(struct mcs_spinlock *node) { }
-static inline void pv_wait_check(struct qspinlock *lock,
- struct mcs_spinlock *node,
-...