Displaying 20 results from an estimated 37 matches for "__native_queued_spin_unlock".
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...E(__kvm_vcpu_is_preempted);
+ }
}
static __init int kvm_spinlock_init_jump(void)
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlo
{
native_queued_spin_unlock(lock);
}
-
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
@@ -21,9 +20,16 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock;
}
-static bool native_vcpu_is_preempted(int cpu)
+__visible bool __native_vcpu_is_preempted(int cpu)
{
- return 0;
+ return false;
+}
+PV_CALLEE_SAVE...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...E(__kvm_vcpu_is_preempted);
+ }
}
static __init int kvm_spinlock_init_jump(void)
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlo
{
native_queued_spin_unlock(lock);
}
-
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
@@ -21,9 +20,16 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock;
}
-static bool native_vcpu_is_preempted(int cpu)
+__visible bool __native_vcpu_is_preempted(int cpu)
{
- return 0;
+ return false;
+}
+PV_CALLEE_SAVE...
2016 May 17
0
[PATCH v2 4/6] pv-qspinlock: powerpc support pv-qspinlock
...ram is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+
+static void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+ native_queued_spin_unlock(lock);
+}
+
+static void __pv_wait(u8 *ptr, u8 val, int cpu)
+{
+ HMT_low();
+ __spin_yield_cpu(cpu);
+ HMT_medium();
+}
+
+static void __pv_kick(int cpu)
+{
+ __spin_wake_cpu(cpu);
+}
+
+struct pv_lock_ops pv_lock_op = {
+ .lock = native_queue...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...atic __init int kvm_spinlock_init_jump(void)
> --- a/arch/x86/kernel/paravirt-spinlocks.c
> +++ b/arch/x86/kernel/paravirt-spinlocks.c
> @@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlo
> {
> native_queued_spin_unlock(lock);
> }
> -
> PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
>
> bool pv_is_native_spin_unlock(void)
> @@ -21,9 +20,16 @@ bool pv_is_native_spin_unlock(void)
> __raw_callee_save___native_queued_spin_unlock;
> }
>
> -static bool native_vcpu_is_preempted(int cpu)
> +__visible bool __native_vcpu_is_preempted(int cpu)
> {
>...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...ram is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+
+static void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+ native_queued_spin_unlock(lock);
+}
+
+static void __native_wait(u8 *ptr, u8 val, int cpu)
+{
+}
+
+static void __native_kick(int cpu)
+{
+}
+
+static void __pv_wait(u8 *ptr, u8 val, int cpu)
+{
+ HMT_low();
+ __spin_yield_cpu(cpu);
+ HMT_medium();
+}
+
+static void __p...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...ram is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+
+static void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+ native_queued_spin_unlock(lock);
+}
+
+static void __native_wait(u8 *ptr, u8 val, int cpu)
+{
+}
+
+static void __native_kick(int cpu)
+{
+}
+
+static void __pv_wait(u8 *ptr, u8 val, int cpu)
+{
+ HMT_low();
+ __spin_yield_cpu(cpu);
+ HMT_medium();
+}
+
+static void __p...
2016 Nov 02
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...nlock.h>
/*
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 2c55a00..2f204dd 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock;
}
+static bool native_vcpu_is_preempted(int cpu)
+{
+ return 0;
+}
+
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = par...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...ram is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+
+static void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+ native_queued_spin_unlock(lock);
+}
+
+static void __native_wait(u8 *ptr, u8 val, int cpu)
+{
+}
+
+static void __native_kick(int cpu)
+{
+}
+
+static void __pv_wait(u8 *ptr, u8 val, int cpu)
+{
+ HMT_low();
+ __spin_yield_cpu(cpu);
+ HMT_medium();
+}
+
+static void __p...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...ram is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+
+static void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+ native_queued_spin_unlock(lock);
+}
+
+static void __native_wait(u8 *ptr, u8 val, int cpu)
+{
+}
+
+static void __native_kick(int cpu)
+{
+}
+
+static void __pv_wait(u8 *ptr, u8 val, int cpu)
+{
+ HMT_low();
+ __spin_yield_cpu(cpu);
+ HMT_medium();
+}
+
+static void __p...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted;
}
struct pv_lock_ops pv_lock_ops = {
@@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = {
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = paravirt_nop,
- .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+ .vcpu_is_preempted = __native_vcpu_is_preempted,
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c43..c8...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted;
}
struct pv_lock_ops pv_lock_ops = {
@@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = {
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = paravirt_nop,
- .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+ .vcpu_is_preempted = __native_vcpu_is_preempted,
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c43..c8...
2018 Aug 10
0
[PATCH 04/10] x86/paravirt: use a single ops structure
...+ pv_ops.pv_mmu_ops.set_fixmap(idx, phys, flags);
}
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
@@ -694,6 +694,9 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
@@ -862,7 +865,7 @@ extern void default_banner(void);
COND_POP(set, CLBR_RCX, rcx); \
COND_POP(set, CLBR_RAX, rax)
-#define PARA_PA...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2018 Nov 22
0
[PATCH] x86: fix -Wmissing-prototypes warning
...arning: no previous prototype for ?crash_load_segments? [-Wmissing-prototypes]
> arch/x86/kernel/machine_kexec_64.c:372:7: warning: no previous prototype for ?arch_kexec_kernel_image_load? [-Wmissing-prototypes]
> arch/x86/kernel/paravirt-spinlocks.c:12:16: warning: no previous prototype for ?__native_queued_spin_unlock? [-Wmissing-prototypes]
> arch/x86/kernel/paravirt-spinlocks.c:18:6: warning: no previous prototype for ?pv_is_native_spin_unlock? [-Wmissing-prototypes]
> arch/x86/kernel/paravirt-spinlocks.c:24:16: warning: no previous prototype for ?__native_vcpu_is_preempted? [-Wmissing-prototypes]
> a...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted;
}
struct pv_lock_ops pv_lock_ops = {
@@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = {
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = paravirt_nop,
- .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+ .vcpu_is_preempted = __native_vcpu_is_preempted,
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c43..c8...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted;
}
struct pv_lock_ops pv_lock_ops = {
@@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = {
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = paravirt_nop,
- .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+ .vcpu_is_preempted = __native_vcpu_is_preempted,
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c43..c8...
2016 Jun 02
9
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
sorry for not even doing a test on bigendian machine before!!!
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch