Displaying 20 results from an estimated 155 matches for "spin_threshold".
2016 Jul 11
2
[PATCH v2 0/4] implement vcpu preempted check
...sq_lock, rwsem_spin_on_owner and mutex_spin_on_owner.
>> These spin_on_onwer variant also cause rcu stall before we apply this patch set
>>
> Paolo, could you help out with an (x86) KVM interface for this?
>
> Waiman, could you see if you can utilize this to get rid of the
> SPIN_THRESHOLD in qspinlock_paravirt?
That API is certainly useful to make the paravirt spinlock perform
better. However, I am not sure if we can completely get rid of the
SPIN_THRESHOLD at this point. It is not just the kvm, the xen code need
to be modified as well.
Cheers,
Longman
2016 Jul 11
2
[PATCH v2 0/4] implement vcpu preempted check
...sq_lock, rwsem_spin_on_owner and mutex_spin_on_owner.
>> These spin_on_onwer variant also cause rcu stall before we apply this patch set
>>
> Paolo, could you help out with an (x86) KVM interface for this?
>
> Waiman, could you see if you can utilize this to get rid of the
> SPIN_THRESHOLD in qspinlock_paravirt?
That API is certainly useful to make the paravirt spinlock perform
better. However, I am not sure if we can completely get rid of the
SPIN_THRESHOLD at this point. It is not just the kvm, the xen code need
to be modified as well.
Cheers,
Longman
2016 Jul 12
0
[PATCH v2 0/4] implement vcpu preempted check
...x_spin_on_owner.
>>> These spin_on_onwer variant also cause rcu stall before we apply this
>>> patch set
>>>
>> Paolo, could you help out with an (x86) KVM interface for this?
>>
>> Waiman, could you see if you can utilize this to get rid of the
>> SPIN_THRESHOLD in qspinlock_paravirt?
>
> That API is certainly useful to make the paravirt spinlock perform
> better. However, I am not sure if we can completely get rid of the
> SPIN_THRESHOLD at this point. It is not just the kvm, the xen code need
> to be modified as well.
This should be rath...
2015 Apr 24
0
[PATCH v16 13/14] pvqspinlock: Improve slowpath performance by avoiding cmpxchg
...x 9b4ac3d..41ee033 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -19,7 +19,8 @@
* native_queue_spin_unlock().
*/
-#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
+#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
+#define MAYHALT_THRESHOLD (SPIN_THRESHOLD >> 4)
/*
* The vcpu_hashed is a special state that is set by the new lock holder on
@@ -39,6 +40,7 @@ struct pv_node {
int cpu;
u8 state;
+ u8 mayhalt;
};
/*
@@ -198,6 +200,7 @@ static void pv_init_node(struct mcs_spinlock *node)
pn->cpu = smp_processor_id();
pn-...
2013 Jun 01
11
[PATCH RFC V9 0/19] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
Changes in V9:
- Changed spin_threshold to 32k to avoid excess halt exits that are
causing undercommit degradation (after PLE handler improvement).
- Added kvm_irq_delivery_to_apic (suggested by Gleb)
- Optimized halt exit path to use PLE handler
V8 of PVspinlock was posted last year. After Avi's suggestions to look
at PLE handl...
2013 Jun 01
11
[PATCH RFC V9 0/19] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
Changes in V9:
- Changed spin_threshold to 32k to avoid excess halt exits that are
causing undercommit degradation (after PLE handler improvement).
- Added kvm_irq_delivery_to_apic (suggested by Gleb)
- Optimized halt exit path to use PLE handler
V8 of PVspinlock was posted last year. After Avi's suggestions to look
at PLE handl...
2013 Jun 01
11
[PATCH RFC V9 0/19] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
Changes in V9:
- Changed spin_threshold to 32k to avoid excess halt exits that are
causing undercommit degradation (after PLE handler improvement).
- Added kvm_irq_delivery_to_apic (suggested by Gleb)
- Optimized halt exit path to use PLE handler
V8 of PVspinlock was posted last year. After Avi's suggestions to look
at PLE handl...
2016 Jun 28
11
[PATCH v2 0/4] implement vcpu preempted check
change fomr v1:
a simplier definition of default vcpu_is_preempted
skip mahcine type check on ppc, and add config. remove dedicated macro.
add one patch to drop overload of rwsem_spin_on_owner and mutex_spin_on_owner.
add more comments
thanks boqun and Peter's suggestion.
This patch set aims to fix lock holder preemption issues.
test-case:
perf record -a perf bench sched messaging -g
2016 Jun 28
11
[PATCH v2 0/4] implement vcpu preempted check
change fomr v1:
a simplier definition of default vcpu_is_preempted
skip mahcine type check on ppc, and add config. remove dedicated macro.
add one patch to drop overload of rwsem_spin_on_owner and mutex_spin_on_owner.
add more comments
thanks boqun and Peter's suggestion.
This patch set aims to fix lock holder preemption issues.
test-case:
perf record -a perf bench sched messaging -g
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...>>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct qspinlock **lp = NULL;
> >>+ struct pv_node *pn = (struct pv_node *)node;
> >>+ int slow_set = false;
> >>+ int loop;
> >>+
> >>+ for (;;) {
> >>+ for (loop = SPIN_THRESHOLD; loop; loop--) {
> >>+ if (!READ_ONCE(l->locked))
> >>+ return;
> >>+
> >>+ cpu_relax();
> >>+ }
> >>+
> >>+ WRITE_ONCE(pn->state, vcpu_halted);
> >>+ if (!lp)
> >>+ lp = pv_hash(lock, pn);
> >&...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...>>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct qspinlock **lp = NULL;
> >>+ struct pv_node *pn = (struct pv_node *)node;
> >>+ int slow_set = false;
> >>+ int loop;
> >>+
> >>+ for (;;) {
> >>+ for (loop = SPIN_THRESHOLD; loop; loop--) {
> >>+ if (!READ_ONCE(l->locked))
> >>+ return;
> >>+
> >>+ cpu_relax();
> >>+ }
> >>+
> >>+ WRITE_ONCE(pn->state, vcpu_halted);
> >>+ if (!lp)
> >>+ lp = pv_hash(lock, pn);
> >&...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...u_running;
+}
+
+/*
+ * Wait for node->locked to become true, halt the vcpu after a short spin.
+ * pv_kick_node() is used to wake the vcpu again.
+ */
+static void pv_wait_node(struct mcs_spinlock *node)
+{
+ struct pv_node *pn = (struct pv_node *)node;
+ int loop;
+
+ for (;;) {
+ for (loop = SPIN_THRESHOLD; loop; loop--) {
+ if (READ_ONCE(node->locked))
+ return;
+ cpu_relax();
+ }
+
+ /*
+ * Order pn->state vs pn->locked thusly:
+ *
+ * [S] pn->state = vcpu_halted [S] next->locked = 1
+ * MB MB
+ * [L] pn->locked [RmW] pn->state = vcpu_running
+...
2013 Aug 09
1
[PATCH V13 00/14] Paravirtualized ticket spinlocks
...- Added break in patch 5 since now we know exact cpu to wakeup
- Dropped patch 12 and Konrad needs to revert two patches to enable xen on hvm
70dd4998, f10cd522c
- Remove TIMEOUT and corrected spacing in patch 15
- Kicked spelling and correct spacing in patches 17, 18
Changes in V9:
- Changed spin_threshold to 32k to avoid excess halt exits that are
causing undercommit degradation (after PLE handler improvement).
- Added kvm_irq_delivery_to_apic (suggested by Gleb)
- Optimized halt exit path to use PLE handler
V8 of PVspinlock was posted last year. After Avi''s suggestions to look
at PLE...
2013 Aug 09
1
[PATCH V13 00/14] Paravirtualized ticket spinlocks
...- Added break in patch 5 since now we know exact cpu to wakeup
- Dropped patch 12 and Konrad needs to revert two patches to enable xen on hvm
70dd4998, f10cd522c
- Remove TIMEOUT and corrected spacing in patch 15
- Kicked spelling and correct spacing in patches 17, 18
Changes in V9:
- Changed spin_threshold to 32k to avoid excess halt exits that are
causing undercommit degradation (after PLE handler improvement).
- Added kvm_irq_delivery_to_apic (suggested by Gleb)
- Optimized halt exit path to use PLE handler
V8 of PVspinlock was posted last year. After Avi''s suggestions to look
at PLE...
2013 Aug 09
1
[PATCH V13 00/14] Paravirtualized ticket spinlocks
...- Added break in patch 5 since now we know exact cpu to wakeup
- Dropped patch 12 and Konrad needs to revert two patches to enable xen on hvm
70dd4998, f10cd522c
- Remove TIMEOUT and corrected spacing in patch 15
- Kicked spelling and correct spacing in patches 17, 18
Changes in V9:
- Changed spin_threshold to 32k to avoid excess halt exits that are
causing undercommit degradation (after PLE handler improvement).
- Added kvm_irq_delivery_to_apic (suggested by Gleb)
- Optimized halt exit path to use PLE handler
V8 of PVspinlock was posted last year. After Avi''s suggestions to look
at PLE...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t; new file mode 100644
> index 0000000..fc83cd2
> --- /dev/null
> +++ b/arch/powerpc/include/asm/qspinlock.h
> @@ -0,0 +1,26 @@
> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define SPIN_THRESHOLD (1 << 15)
> +#define queued_spin_unlock queued_spin_unlock
> +
> +static inline void native_queued_spin_unlock(struct qspinlock *lock)
> +{
> + u8 *locked = (u8 *)lock;
> +#ifdef __BIG_ENDIAN
> + locked += 3;
> +#endif
> + /* no load/store can be across the unlock()...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t; new file mode 100644
> index 0000000..fc83cd2
> --- /dev/null
> +++ b/arch/powerpc/include/asm/qspinlock.h
> @@ -0,0 +1,26 @@
> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define SPIN_THRESHOLD (1 << 15)
> +#define queued_spin_unlock queued_spin_unlock
> +
> +static inline void native_queued_spin_unlock(struct qspinlock *lock)
> +{
> + u8 *locked = (u8 *)lock;
> +#ifdef __BIG_ENDIAN
> + locked += 3;
> +#endif
> + /* no load/store can be across the unlock()...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...truct mcs_spinlock *node)
+static void pv_wait_node(u32 old, struct mcs_spinlock *node)
{
+ struct pv_node *ppn = pv_decode_tail(old);
struct pv_node *pn = (struct pv_node *)node;
int loop;
+ pv_propagate_head(pn, ppn);
+
for (;;) {
for (loop = SPIN_THRESHOLD; loop; loop--) {
if (READ_ONCE(node->locked))
@@ -107,13 +145,33 @@ static void pv_kick_node(struct mcs_spin
pv_kick(pn->cpu);
}
-static DEFINE_PER_CPU(struct qspinlock *, __pv_lock_wait);
+static void pv_set_head(struct qspinlock *lock, struct pv_no...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...truct mcs_spinlock *node)
+static void pv_wait_node(u32 old, struct mcs_spinlock *node)
{
+ struct pv_node *ppn = pv_decode_tail(old);
struct pv_node *pn = (struct pv_node *)node;
int loop;
+ pv_propagate_head(pn, ppn);
+
for (;;) {
for (loop = SPIN_THRESHOLD; loop; loop--) {
if (READ_ONCE(node->locked))
@@ -107,13 +145,33 @@ static void pv_kick_node(struct mcs_spin
pv_kick(pn->cpu);
}
-static DEFINE_PER_CPU(struct qspinlock *, __pv_lock_wait);
+static void pv_set_head(struct qspinlock *lock, struct pv_no...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...u_running;
+}
+
+/*
+ * Wait for node->locked to become true, halt the vcpu after a short spin.
+ * pv_kick_node() is used to wake the vcpu again.
+ */
+static void pv_wait_node(struct mcs_spinlock *node)
+{
+ struct pv_node *pn = (struct pv_node *)node;
+ int loop;
+
+ for (;;) {
+ for (loop = SPIN_THRESHOLD; loop; loop--) {
+ if (READ_ONCE(node->locked))
+ goto done;
+
+ cpu_relax();
+ }
+
+ /*
+ * Order pn->state vs pn->locked thusly:
+ *
+ * [S] pn->state = vcpu_halted [S] next->locked = 1
+ * MB MB
+ * [L] pn->locked [RmW] pn->state = vcpu_runn...