Displaying 20 results from an estimated 87 matches for "config_queue_spinlock".
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
...replace the
spinlock implementation with something virtualization-friendly
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -712,6 +712,30 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+static __always_inline void pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ PVOP_VCALL2(pv_lock_ops.queue_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queue_spin_unlock(struct qspinlock *lock)
+{
+ PVOP_VCALLEE1(pv_lock_ops.queue_spin_unlock, lock);
+}
+...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
...replace the
spinlock implementation with something virtualization-friendly
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -712,6 +712,30 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+static __always_inline void pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ PVOP_VCALL2(pv_lock_ops.queue_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queue_spin_unlock(struct qspinlock *lock)
+{
+ PVOP_VCALLEE1(pv_lock_ops.queue_spin_unlock, lock);
+}
+...
2014 Apr 02
0
[PATCH v8 10/10] pvqspinlock, x86: Enable qspinlock PV support for XEN
...64..6bbe798 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,12 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifndef CONFIG_QUEUE_SPINLOCK
+
enum xen_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -100,12 +106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
stati...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...e0..60e444c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,12 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifndef CONFIG_QUEUE_SPINLOCK
+
enum xen_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -100,12 +106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
stati...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...e0..60e444c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,12 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifndef CONFIG_QUEUE_SPINLOCK
+
enum xen_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -100,12 +106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
stati...
2015 Apr 07
0
[PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen
...74c..728b45b 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,55 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+static void xen_qlock_kick(int cpu)
+{
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+}
+
+/*
+ * Halt the current CPU & release it back to the host
+ */
+static void xen_qlock_wait(u8 *byte, u8 val)
+{
+ int irq = __this_cpu_read(lock_kicker_irq);
+
+...
2014 Oct 27
5
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...nux-2.6/arch/x86/kernel/paravirt_patch_64.c
> @@ -22,6 +22,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs")
> DEF_NATIVE(, mov32, "mov %edi, %eax");
> DEF_NATIVE(, mov64, "mov %rdi, %rax");
>
> +#if defined(CONFIG_PARAVIRT_SPINLOCKS)&& defined(CONFIG_QUEUE_SPINLOCK)
> +DEF_NATIVE(pv_lock_ops, queue_unlock, "movb $0, (%rdi)");
> +#endif
> +
> unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
> {
> return paravirt_patch_insns(insnbuf, len,
> @@ -61,6 +65,9 @@ unsigned native_patch(u8 type, u16 clobb
>...
2014 Oct 27
5
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...nux-2.6/arch/x86/kernel/paravirt_patch_64.c
> @@ -22,6 +22,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs")
> DEF_NATIVE(, mov32, "mov %edi, %eax");
> DEF_NATIVE(, mov64, "mov %rdi, %rax");
>
> +#if defined(CONFIG_PARAVIRT_SPINLOCKS)&& defined(CONFIG_QUEUE_SPINLOCK)
> +DEF_NATIVE(pv_lock_ops, queue_unlock, "movb $0, (%rdi)");
> +#endif
> +
> unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
> {
> return paravirt_patch_insns(insnbuf, len,
> @@ -61,6 +65,9 @@ unsigned native_patch(u8 type, u16 clobb
>...
2014 Jun 15
0
[PATCH 10/11] qspinlock: Paravirt support
...=========================================================
--- linux-2.6.orig/arch/x86/include/asm/paravirt.h
+++ linux-2.6/arch/x86/include/asm/paravirt.h
@@ -712,6 +712,44 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+static __always_inline void pv_init_node(struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE1(pv_lock_ops.init_node, node);
+}
+
+static __always_inline void pv_link_and_wait_node(u32 old, struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE2(pv_lock_ops.link_and_wait_node, old, node);
+}
+
+static __always_in...
2014 Feb 26
0
[PATCH RFC v5 8/8] pvqspinlock, x86: Enable KVM to use qspinlock's PV support
...d, 55 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index f318e78..3ddc436 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -568,6 +568,7 @@ static void kvm_kick_cpu(int cpu)
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}
+#ifndef CONFIG_QUEUE_SPINLOCK
enum kvm_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -795,6 +796,55 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
}
}
}
+#else /* !CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_KVM_DEBUG_FS
+static struct dentry *d_spin_debug;
+static struct dentry *d_...
2014 Feb 26
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
...k.h b/arch/x86/include/asm/spinlock.h
index bf156de..6e6de1f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -43,6 +43,10 @@
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
+#ifdef CONFIG_QUEUE_SPINLOCK
+#include <asm/qspinlock.h>
+#else
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
@@ -181,6 +185,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
{
arch_spin_lock(lock);
}
+#endif /* CONFIG_QUEUE_SPINLOC...
2014 Feb 27
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
...k.h b/arch/x86/include/asm/spinlock.h
index bf156de..6e6de1f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -43,6 +43,10 @@
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
+#ifdef CONFIG_QUEUE_SPINLOCK
+#include <asm/qspinlock.h>
+#else
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
@@ -181,6 +185,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
{
arch_spin_lock(lock);
}
+#endif /* CONFIG_QUEUE_SPINLOC...
2014 Mar 19
0
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...k.h b/arch/x86/include/asm/spinlock.h
index 0f62f54..958d20f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -42,6 +42,10 @@
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
+#ifdef CONFIG_QUEUE_SPINLOCK
+#include <asm/qspinlock.h>
+#else
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
@@ -180,6 +184,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
{
arch_spin_lock(lock);
}
+#endif /* CONFIG_QUEUE_SPINLOC...
2014 Mar 12
0
[PATCH RFC v6 10/11] pvqspinlock, x86: Enable qspinlock PV support for KVM
...d, 88 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index f318e78..aaf704e 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -568,6 +568,7 @@ static void kvm_kick_cpu(int cpu)
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}
+#ifndef CONFIG_QUEUE_SPINLOCK
enum kvm_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -795,6 +796,87 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
}
}
}
+#else /* !CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_KVM_DEBUG_FS
+static struct dentry *d_spin_debug;
+static struct dentry *d_...
2014 Feb 27
1
[PATCH RFC v5 8/8] pvqspinlock, x86: Enable KVM to use qspinlock's PV support
...f --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index f318e78..3ddc436 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -568,6 +568,7 @@ static void kvm_kick_cpu(int cpu)
> kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
> }
>
> +#ifndef CONFIG_QUEUE_SPINLOCK
> enum kvm_contention_stat {
> TAKEN_SLOW,
> TAKEN_SLOW_PICKUP,
> @@ -795,6 +796,55 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
> }
> }
> }
> +#else /* !CONFIG_QUEUE_SPINLOCK */
> +
> +#ifdef CONFIG_KVM_DEBUG_FS
> +static...
2014 Feb 27
1
[PATCH RFC v5 8/8] pvqspinlock, x86: Enable KVM to use qspinlock's PV support
...f --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index f318e78..3ddc436 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -568,6 +568,7 @@ static void kvm_kick_cpu(int cpu)
> kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
> }
>
> +#ifndef CONFIG_QUEUE_SPINLOCK
> enum kvm_contention_stat {
> TAKEN_SLOW,
> TAKEN_SLOW_PICKUP,
> @@ -795,6 +796,55 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
> }
> }
> }
> +#else /* !CONFIG_QUEUE_SPINLOCK */
> +
> +#ifdef CONFIG_KVM_DEBUG_FS
> +static...
2014 Oct 27
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
....c
>>> @@ -22,6 +22,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs")
>>> DEF_NATIVE(, mov32, "mov %edi, %eax");
>>> DEF_NATIVE(, mov64, "mov %rdi, %rax");
>>>
>>> +#if defined(CONFIG_PARAVIRT_SPINLOCKS)&& defined(CONFIG_QUEUE_SPINLOCK)
>>> +DEF_NATIVE(pv_lock_ops, queue_unlock, "movb $0, (%rdi)");
>>> +#endif
>>> +
>>> unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
>>> {
>>> return paravirt_patch_insns(insnbuf, len,
>>> @@ -61,6...
2014 Oct 27
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
....c
>>> @@ -22,6 +22,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs")
>>> DEF_NATIVE(, mov32, "mov %edi, %eax");
>>> DEF_NATIVE(, mov64, "mov %rdi, %rax");
>>>
>>> +#if defined(CONFIG_PARAVIRT_SPINLOCKS)&& defined(CONFIG_QUEUE_SPINLOCK)
>>> +DEF_NATIVE(pv_lock_ops, queue_unlock, "movb $0, (%rdi)");
>>> +#endif
>>> +
>>> unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
>>> {
>>> return paravirt_patch_insns(insnbuf, len,
>>> @@ -61,6...
2014 Mar 19
1
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...t; index 0f62f54..958d20f 100644
> --- a/arch/x86/include/asm/spinlock.h
> +++ b/arch/x86/include/asm/spinlock.h
> @@ -42,6 +42,10 @@
> extern struct static_key paravirt_ticketlocks_enabled;
> static __always_inline bool static_key_false(struct static_key *key);
>
> +#ifdef CONFIG_QUEUE_SPINLOCK
> +#include <asm/qspinlock.h>
> +#else
> +
> #ifdef CONFIG_PARAVIRT_SPINLOCKS
>
> static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
> @@ -180,6 +184,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
> {
> arch_spin_l...
2014 Mar 19
1
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...t; index 0f62f54..958d20f 100644
> --- a/arch/x86/include/asm/spinlock.h
> +++ b/arch/x86/include/asm/spinlock.h
> @@ -42,6 +42,10 @@
> extern struct static_key paravirt_ticketlocks_enabled;
> static __always_inline bool static_key_false(struct static_key *key);
>
> +#ifdef CONFIG_QUEUE_SPINLOCK
> +#include <asm/qspinlock.h>
> +#else
> +
> #ifdef CONFIG_PARAVIRT_SPINLOCKS
>
> static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
> @@ -180,6 +184,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
> {
> arch_spin_l...