Displaying 20 results from an estimated 75 matches for "xen_lock_spinning".
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...void check_zero(void)
> {
> u8 ret;
> - u8 old = ACCESS_ONCE(zero_stats);
> + u8 old = READ_ONCE(zero_stats);
> if (unlikely(old)) {
> ret = cmpxchg(&zero_stats, old, 0);
> /* This ensures only one fellow resets the stat */
> @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
> struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
> int cpu = smp_processor_id();
> u64 start;
> + __ticket_t head;
> unsigned long flags;
>
> /* If kicker interrupts not initialized yet, just spin */
> @...
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...void check_zero(void)
> {
> u8 ret;
> - u8 old = ACCESS_ONCE(zero_stats);
> + u8 old = READ_ONCE(zero_stats);
> if (unlikely(old)) {
> ret = cmpxchg(&zero_stats, old, 0);
> /* This ensures only one fellow resets the stat */
> @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
> struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
> int cpu = smp_processor_id();
> u64 start;
> + __ticket_t head;
> unsigned long flags;
>
> /* If kicker interrupts not initialized yet, just spin */
> @...
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...ting {
struct arch_spinlock *lock;
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
-static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
@@ -217,6 +243,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
}
}
}
+#endif /* !QUEUE_SPINLOCK */
static irqreturn_t dummy_handler(int irq, void *dev_id)
{
@@ -280,...
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...ting {
struct arch_spinlock *lock;
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
-static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
@@ -217,6 +243,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
}
}
}
+#endif /* !QUEUE_SPINLOCK */
static irqreturn_t dummy_handler(int irq, void *dev_id)
{
@@ -280,...
2015 Apr 07
0
[PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen
...+149,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
-static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
@@ -217,6 +263,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
}
}
}
+#endif /* CONFIG_QUEUE_SPINLOCK */
static irqreturn_t dummy_handler(int irq, void *dev_id)
{
@@...
2012 Mar 21
15
[PATCH RFC V6 0/11] Paravirtualized ticketlocks
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Changes since last posting: (Raghavendra K T)
[
- Rebased to linux-3.3-rc6.
- used function+enum in place of macro (better type checking)
- use cmpxchg while resetting zero status for possible race
[suggested by Dave Hansen for KVM patches ]
]
This series replaces the existing paravirtualized spinlock mechanism
with a
2012 Mar 21
15
[PATCH RFC V6 0/11] Paravirtualized ticketlocks
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Changes since last posting: (Raghavendra K T)
[
- Rebased to linux-3.3-rc6.
- used function+enum in place of macro (better type checking)
- use cmpxchg while resetting zero status for possible race
[suggested by Dave Hansen for KVM patches ]
]
This series replaces the existing paravirtualized spinlock mechanism
with a
2014 Apr 02
0
[PATCH v8 10/10] pvqspinlock, x86: Enable qspinlock PV support for XEN
...+106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
-static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
@@ -213,6 +216,94 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
}
}
+#else /* CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_XEN_DEBUG_FS
+static u32 kick_stats; /* CPU kick...
2012 Apr 19
13
[PATCH RFC V7 0/12] Paravirtualized ticketlocks
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. (targeted for 3.5 window)
Changes in V7:
- Reabsed patches to 3.4-rc3
- Added jumplabel split patch (originally from Andrew Jones rebased to
3.4-rc3
- jumplabel changes from Ingo and Jason taken and now using
2012 Apr 19
13
[PATCH RFC V7 0/12] Paravirtualized ticketlocks
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. (targeted for 3.5 window)
Changes in V7:
- Reabsed patches to 3.4-rc3
- Added jumplabel split patch (originally from Andrew Jones rebased to
3.4-rc3
- jumplabel changes from Ingo and Jason taken and now using
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...+106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
-static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
@@ -213,6 +216,118 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
}
}
+#else /* CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_XEN_DEBUG_FS
+static u32 kick_nohlt_stats; /* Ki...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...+106,9 @@ struct xen_lock_waiting {
__ticket_t want;
};
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
-static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
@@ -213,6 +216,118 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
}
}
+#else /* CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_XEN_DEBUG_FS
+static u32 kick_nohlt_stats; /* Ki...
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
The current set of patches are for Xen/x86 spinlock/KVM guest side, to be included
against -tip.
I 'll be sending a separate patchset for KVM host based on kvm tree.
Please note I have added the below performance result
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
The current set of patches are for Xen/x86 spinlock/KVM guest side, to be included
against -tip.
I 'll be sending a separate patchset for KVM host based on kvm tree.
Please note I have added the below performance result
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
The current set of patches are for Xen/x86 spinlock/KVM guest side, to be included
against -tip.
I 'll be sending a separate patchset for KVM host based on kvm tree.
Please note I have added the below performance result
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...@@ static u8 zero_stats;
static inline void check_zero(void)
{
u8 ret;
- u8 old = ACCESS_ONCE(zero_stats);
+ u8 old = READ_ONCE(zero_stats);
if (unlikely(old)) {
ret = cmpxchg(&zero_stats, old, 0);
/* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
int cpu = smp_processor_id();
u64 start;
+ __ticket_t head;
unsigned long flags;
/* If kicker interrupts not initialized yet, just spin */
@@ -163,7 +164,8 @@ __visible void xen_lo...
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...@@ static u8 zero_stats;
static inline void check_zero(void)
{
u8 ret;
- u8 old = ACCESS_ONCE(zero_stats);
+ u8 old = READ_ONCE(zero_stats);
if (unlikely(old)) {
ret = cmpxchg(&zero_stats, old, 0);
/* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
int cpu = smp_processor_id();
u64 start;
+ __ticket_t head;
unsigned long flags;
/* If kicker interrupts not initialized yet, just spin */
@@ -163,7 +164,8 @@ __visible void xen_lo...
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Hi all,
This series does two major things:
1. It converts the bulk of the implementation to C, and makes the
"small ticket" and "large ticket" code common. Only the actual
size-dependent asm instructions are specific to the ticket size.
The resulting generated asm is very similar to the current
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Hi all,
This series does two major things:
1. It converts the bulk of the implementation to C, and makes the
"small ticket" and "large ticket" code common. Only the actual
size-dependent asm instructions are specific to the ticket size.
The resulting generated asm is very similar to the current
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Hi all,
This series does two major things:
1. It converts the bulk of the implementation to C, and makes the
"small ticket" and "large ticket" code common. Only the actual
size-dependent asm instructions are specific to the ticket size.
The resulting generated asm is very similar to the current