Displaying 20 results from an estimated 71 matches for "xen_pvspin".
2017 Nov 01
3
[PATCH-tip v2 0/2] x86/paravirt: Enable users to choose PV lock type
v1->v2:
- Make pv_spinlock_type a bit mask for easier checking.
- Add patch 2 to deprecate xen_nopvspin
v1 - https://lkml.org/lkml/2017/11/1/381
Patch 1 adds a new pvlock_type parameter for the administrators to
specify the type of lock to be used in a para-virtualized kernel.
Patch 2 deprecates Xen's xen_nopvspin parameter as it is no longer
needed.
Waiman Long (2):
x86/paravirt:
2017 Nov 01
3
[PATCH-tip v2 0/2] x86/paravirt: Enable users to choose PV lock type
v1->v2:
- Make pv_spinlock_type a bit mask for easier checking.
- Add patch 2 to deprecate xen_nopvspin
v1 - https://lkml.org/lkml/2017/11/1/381
Patch 1 adds a new pvlock_type parameter for the administrators to
specify the type of lock to be used in a para-virtualized kernel.
Patch 2 deprecates Xen's xen_nopvspin parameter as it is no longer
needed.
Waiman Long (2):
x86/paravirt:
2017 Nov 01
0
[PATCH-tip v2 2/2] x86/xen: Deprecate xen_nopvspin
...with no PV drivers.
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d5f79ac..19e2e75 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -20,7 +20,6 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
-static bool xen_pvspin = true;
#include <asm/qspinlock.h>
@@ -81,12 +80,8 @@ void xen_init_lock_cpu(int cpu)
int irq;
char *name;
- if (!xen_pvspin ||
- (pv_spinlock_type & (locktype_queued|locktype_unfair))) {
- if ((cpu == 0) && !pv_spinlock_type)
- static_branch_disable(&virt_spi...
2017 Jan 12
1
[PATCH v2] x86, locking/spinlocks: Remove paravirt_ticketlocks_enabled
...bel init code needs to happend _after_ the jump labels are
- * enabled and before SMP is started. Hence we use pre-SMP initcall level
- * init. We cannot do it in xen_init_spinlocks as that is done before
- * jump labels are activated.
- */
-static __init int xen_init_spinlocks_jump(void)
-{
- if (!xen_pvspin)
- return 0;
-
- if (!xen_domain())
- return 0;
-
- static_key_slow_inc(¶virt_ticketlocks_enabled);
- return 0;
-}
-early_initcall(xen_init_spinlocks_jump);
-
static __init int xen_parse_nopvspin(char *arg)
{
xen_pvspin = false;
--
1.8.3.1
2017 Jan 12
1
[PATCH v2] x86, locking/spinlocks: Remove paravirt_ticketlocks_enabled
...bel init code needs to happend _after_ the jump labels are
- * enabled and before SMP is started. Hence we use pre-SMP initcall level
- * init. We cannot do it in xen_init_spinlocks as that is done before
- * jump labels are activated.
- */
-static __init int xen_init_spinlocks_jump(void)
-{
- if (!xen_pvspin)
- return 0;
-
- if (!xen_domain())
- return 0;
-
- static_key_slow_inc(¶virt_ticketlocks_enabled);
- return 0;
-}
-early_initcall(xen_init_spinlocks_jump);
-
static __init int xen_parse_nopvspin(char *arg)
{
xen_pvspin = false;
--
1.8.3.1
2017 Nov 01
2
[PATCH-tip v2 2/2] x86/xen: Deprecate xen_nopvspin
On 11/01/2017 04:58 PM, Waiman Long wrote:
> +/* TODO: To be removed in a future kernel version */
> static __init int xen_parse_nopvspin(char *arg)
> {
> - xen_pvspin = false;
> + pr_warn("xen_nopvspin is deprecated, replace it with \"pvlock_type=queued\"!\n");
> + if (!pv_spinlock_type)
> + pv_spinlock_type = locktype_queued;
Since we currently end up using unfair locks and because you are
deprecating xen_nopvspin I wonder whether...
2017 Nov 01
2
[PATCH-tip v2 2/2] x86/xen: Deprecate xen_nopvspin
On 11/01/2017 04:58 PM, Waiman Long wrote:
> +/* TODO: To be removed in a future kernel version */
> static __init int xen_parse_nopvspin(char *arg)
> {
> - xen_pvspin = false;
> + pr_warn("xen_nopvspin is deprecated, replace it with \"pvlock_type=queued\"!\n");
> + if (!pv_spinlock_type)
> + pv_spinlock_type = locktype_queued;
Since we currently end up using unfair locks and because you are
deprecating xen_nopvspin I wonder whether...
2017 Jan 12
0
[PATCH v2] x86, locking/spinlocks: Remove paravirt_ticketlocks_enabled
...er_ the jump labels are
> - * enabled and before SMP is started. Hence we use pre-SMP initcall level
> - * init. We cannot do it in xen_init_spinlocks as that is done before
> - * jump labels are activated.
> - */
> -static __init int xen_init_spinlocks_jump(void)
> -{
> - if (!xen_pvspin)
> - return 0;
> -
> - if (!xen_domain())
> - return 0;
> -
> - static_key_slow_inc(¶virt_ticketlocks_enabled);
> - return 0;
> -}
> -early_initcall(xen_init_spinlocks_jump);
> -
> static __init int xen_parse_nopvspin(char *arg)
> {
> xen_pvspin...
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...74c..b019b2a 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -95,17 +95,43 @@ static inline void spin_time_accum_blocked(u64 start)
}
#endif /* CONFIG_XEN_DEBUG_FS */
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock);
+
+static void xen_qlock_wait(u8 *ptr, u8 val)
+{
+ int irq = __this_cpu_read(lock_kicker_irq);
+
+ xen_clear_irq_pending(irq);
+
+ barrier();
+
+ if (READ_ONCE(*ptr) ==...
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...74c..b019b2a 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -95,17 +95,43 @@ static inline void spin_time_accum_blocked(u64 start)
}
#endif /* CONFIG_XEN_DEBUG_FS */
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock);
+
+static void xen_qlock_wait(u8 *ptr, u8 val)
+{
+ int irq = __this_cpu_read(lock_kicker_irq);
+
+ xen_clear_irq_pending(irq);
+
+ barrier();
+
+ if (READ_ONCE(*ptr) ==...
2015 Apr 07
0
[PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen
...x86/xen/spinlock.c
index 956374c..728b45b 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,55 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+static void xen_qlock_kick(int cpu)
+{
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+}
+
+/*
+ * Halt the current CPU & release it back to the host
+ */
+static void xen_qlock_wait(u8 *byte, u8 val)
+{
+ int irq...
2014 Apr 02
0
[PATCH v8 10/10] pvqspinlock, x86: Enable qspinlock PV support for XEN
...x86/xen/spinlock.c
index 06f4a64..6bbe798 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,12 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifndef CONFIG_QUEUE_SPINLOCK
+
enum xen_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -100,12 +106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struc...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...x86/xen/spinlock.c
index d332ae0..60e444c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,12 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifndef CONFIG_QUEUE_SPINLOCK
+
enum xen_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -100,12 +106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struc...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...x86/xen/spinlock.c
index d332ae0..60e444c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,12 @@
#include "xen-ops.h"
#include "debugfs.h"
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifndef CONFIG_QUEUE_SPINLOCK
+
enum xen_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -100,12 +106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struc...
2017 Nov 01
2
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...o);
EXPORT_SYMBOL (pv_irq_ops);
+EXPORT_SYMBOL (pv_spinlock_type);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 1e1462d..9fc8eab 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -82,7 +82,7 @@ void xen_init_lock_cpu(int cpu)
char *name;
if (!xen_pvspin) {
- if (cpu == 0)
+ if ((cpu == 0) && (pv_spinlock_type == locktype_auto))
static_branch_disable(&virt_spin_lock_key);
return;
}
@@ -130,8 +130,8 @@ void xen_uninit_lock_cpu(int cpu)
*/
void __init xen_init_spinlocks(void)
{
-
- if (!xen_pvspin) {
+ if (!xen_pvspin || (p...
2017 Nov 01
2
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...o);
EXPORT_SYMBOL (pv_irq_ops);
+EXPORT_SYMBOL (pv_spinlock_type);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 1e1462d..9fc8eab 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -82,7 +82,7 @@ void xen_init_lock_cpu(int cpu)
char *name;
if (!xen_pvspin) {
- if (cpu == 0)
+ if ((cpu == 0) && (pv_spinlock_type == locktype_auto))
static_branch_disable(&virt_spin_lock_key);
return;
}
@@ -130,8 +130,8 @@ void xen_uninit_lock_cpu(int cpu)
*/
void __init xen_init_spinlocks(void)
{
-
- if (!xen_pvspin) {
+ if (!xen_pvspin || (p...
2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V2:
- use static key instead of making virt_spin_lock() a pvops function
Juergen Gross
2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V2:
- use static key instead of making virt_spin_lock() a pvops function
Juergen Gross
2017 Sep 06
0
[PATCH v2 2/2] paravirt,xen: correct xen_nopvspin case
...xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <asm/paravirt.h>
+#include <asm/qspinlock.h>
#include <xen/interface/xen.h>
#include <xen/events.h>
@@ -129,6 +130,7 @@ void __init xen_init_spinlocks(void)
if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
+ static_branch_disable(&virt_spin_lock_key);
return;
}
printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
--
2.12.3
2017 Nov 02
0
[PATCH-tip v2 2/2] x86/xen: Deprecate xen_nopvspin
On 11/01/2017 06:01 PM, Boris Ostrovsky wrote:
> On 11/01/2017 04:58 PM, Waiman Long wrote:
>> +/* TODO: To be removed in a future kernel version */
>> static __init int xen_parse_nopvspin(char *arg)
>> {
>> - xen_pvspin = false;
>> + pr_warn("xen_nopvspin is deprecated, replace it with \"pvlock_type=queued\"!\n");
>> + if (!pv_spinlock_type)
>> + pv_spinlock_type = locktype_queued;
> Since we currently end up using unfair locks and because you are
> deprecating xen_nopv...