Displaying 20 results from an estimated 271 matches for "pv_lock_op".
Did you mean:
pv_lock_ops
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...at 05:08:33AM -0400, Pan Xinhui wrote:
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
> index 0f400c0..38c3bb7 100644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -310,6 +310,8 @@ struct pv_lock_ops {
>
> void (*wait)(u8 *ptr, u8 val);
> void (*kick)(int cpu);
> +
> + bool (*vcpu_is_preempted)(int cpu);
> };
So that ends up with a full function call in the native case. I did
something like the below on top, completely untested, not been near a
compiler etc..
It does...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...at 05:08:33AM -0400, Pan Xinhui wrote:
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
> index 0f400c0..38c3bb7 100644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -310,6 +310,8 @@ struct pv_lock_ops {
>
> void (*wait)(u8 *ptr, u8 val);
> void (*kick)(int cpu);
> +
> + bool (*vcpu_is_preempted)(int cpu);
> };
So that ends up with a full function call in the native case. I did
something like the below on top, completely untested, not been near a
compiler etc..
It does...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...Pan Xinhui wrote:
>> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
>> index 0f400c0..38c3bb7 100644
>> --- a/arch/x86/include/asm/paravirt_types.h
>> +++ b/arch/x86/include/asm/paravirt_types.h
>> @@ -310,6 +310,8 @@ struct pv_lock_ops {
>>
>> void (*wait)(u8 *ptr, u8 val);
>> void (*kick)(int cpu);
>> +
>> + bool (*vcpu_is_preempted)(int cpu);
>> };
>
> So that ends up with a full function call in the native case. I did
> something like the below on top, completely untested, not...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
....h b/arch/x86/include/asm/paravirt.h
index 864f57b..2515885 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu)
static __always_inline bool pv_vcpu_is_preempted(int cpu)
{
- return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+ return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
#endif /* SMP && PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45..88dc852 100644
--- a/arch/x86/include/asm/paravirt_...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
....h b/arch/x86/include/asm/paravirt.h
index 864f57b..2515885 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu)
static __always_inline bool pv_vcpu_is_preempted(int cpu)
{
- return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+ return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
#endif /* SMP && PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45..88dc852 100644
--- a/arch/x86/include/asm/paravirt_...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
.../include/asm/paravirt.h
@@ -712,6 +712,30 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+static __always_inline void pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ PVOP_VCALL2(pv_lock_ops.queue_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queue_spin_unlock(struct qspinlock *lock)
+{
+ PVOP_VCALLEE1(pv_lock_ops.queue_spin_unlock, lock);
+}
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+ PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
+}
+
+static __alwa...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
.../include/asm/paravirt.h
@@ -712,6 +712,30 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+static __always_inline void pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ PVOP_VCALL2(pv_lock_ops.queue_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queue_spin_unlock(struct qspinlock *lock)
+{
+ PVOP_VCALLEE1(pv_lock_ops.queue_spin_unlock, lock);
+}
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+ PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
+}
+
+static __alwa...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
....h b/arch/x86/include/asm/paravirt.h
index 864f57b..2515885 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu)
static __always_inline bool pv_vcpu_is_preempted(int cpu)
{
- return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+ return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
#endif /* SMP && PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45..88dc852 100644
--- a/arch/x86/include/asm/paravirt_...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
....h b/arch/x86/include/asm/paravirt.h
index 864f57b..2515885 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu)
static __always_inline bool pv_vcpu_is_preempted(int cpu)
{
- return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+ return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
#endif /* SMP && PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45..88dc852 100644
--- a/arch/x86/include/asm/paravirt_...
2016 May 17
0
[PATCH v2 4/6] pv-qspinlock: powerpc support pv-qspinlock
As we need let pv-qspinlock-kernel run on all environment which might
have no powervm, we should runtime choose which qspinlock version to
use. The default pv-qspinlock use native version. pv_lock initialization
should be done in bootstage with irq disabled. And if possible, restore
pv_lock_ops callbacks to pv version.
Signed-off-by: Pan Xinhui <xinhui.pan at linux.vnet.ibm.com>
---
arch/powerpc/include/asm/qspinlock.h | 17 +++++++++
arch/powerpc/include/asm/qspinlock_paravirt.h | 38 +++++++++++++++++++
.../powerpc/include/asm/qspinlock_paravirt_types.h | 13...
2018 Aug 10
0
[PATCH 04/10] x86/paravirt: use a single ops structure
...hys, pgprot_t flags)
{
- pv_mmu_ops.set_fixmap(idx, phys, flags);
+ pv_ops.pv_mmu_ops.set_fixmap(idx, phys, flags);
}
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
@@ -694,6 +694,9 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
@@ -862,7 +865,7 @@ extern void default_banner(void);...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2014 Jun 15
0
[PATCH 11/11] qspinlock, kvm: Add paravirt support
...r irq enabled case to avoid hang when lock info is overwritten
+ * in irq spinlock slowpath and no spurious interrupt occur to save us.
+ */
+ if (arch_irqs_disabled_flags(flags))
+ halt();
+ else
+ safe_halt();
+
+out:
+ local_irq_restore(flags);
+}
+#endif /* QUEUE_SPINLOCK */
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -808,8 +854,20 @@ void __init kvm_spinlock_init(void)
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return;
+#ifdef CONFIG_QUEUE_SPINLOCK
+ pv_lock_ops.init_node = PV_CALLEE_SAVE(__pv_init_node);
+ pv_lock_ops.link_and_wait_node = PV_CALLE...
2015 Apr 30
0
[PATCH 3/6] x86: introduce new pvops function clear_slowpath
...)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8957810..318f077 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -724,6 +724,13 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}
+static __always_inline void __ticket_clear_slowpath(arch_spinlock_t *lock,
+ __ticket_t head)
+{
+ PVOP_VCALL2(pv_lock_ops.clear_slowpath, lock, head);
+}
+
+void pv_lock_activate(void);
#endif
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/pa...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...wpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val)
+{
+ pv_lock_op.lock(lock, val);
+}
+
+static inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ pv_lock_op.unlock(lock);
+}
+
+static inline void pv_wait(u8 *ptr, u8 val)
+{
+ pv_lock_op.wait(ptr, val, -1);
+}
+
+static inline void pv_kick(int cpu)
+{
+ pv_lock_op.kick(cpu);
+}
+
+#endif
diff --git a/...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...wpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val)
+{
+ pv_lock_op.lock(lock, val);
+}
+
+static inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ pv_lock_op.unlock(lock);
+}
+
+static inline void pv_wait(u8 *ptr, u8 val)
+{
+ pv_lock_op.wait(ptr, val, -1);
+}
+
+static inline void pv_kick(int cpu)
+{
+ pv_lock_op.kick(cpu);
+}
+
+#endif
diff --git a/...
2014 Jun 15
0
[PATCH 10/11] qspinlock: Paravirt support
...linux-2.6/arch/x86/include/asm/paravirt.h
@@ -712,6 +712,44 @@ static inline void __set_fixmap(unsigned
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+static __always_inline void pv_init_node(struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE1(pv_lock_ops.init_node, node);
+}
+
+static __always_inline void pv_link_and_wait_node(u32 old, struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE2(pv_lock_ops.link_and_wait_node, old, node);
+}
+
+static __always_inline void pv_kick_node(struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE1(pv_lock_ops.kick_node, node);
+}...
2014 Mar 13
2
[PATCH RFC v6 10/11] pvqspinlock, x86: Enable qspinlock PV support for KVM
Il 12/03/2014 19:54, Waiman Long ha scritto:
> @@ -807,8 +889,13 @@ void __init kvm_spinlock_init(void)
> if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
> return;
>
> +#ifdef CONFIG_QUEUE_SPINLOCK
> + pv_lock_ops.kick_cpu = kvm_kick_cpu_type;
> + pv_lock_ops.hibernate = kvm_hibernate;
> +#else
> pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
> pv_lock_ops.unlock_kick = kvm_unlock_kick;
> +#endif
This should also disable the unfair path.
Paolo
2014 Mar 13
2
[PATCH RFC v6 10/11] pvqspinlock, x86: Enable qspinlock PV support for KVM
Il 12/03/2014 19:54, Waiman Long ha scritto:
> @@ -807,8 +889,13 @@ void __init kvm_spinlock_init(void)
> if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
> return;
>
> +#ifdef CONFIG_QUEUE_SPINLOCK
> + pv_lock_ops.kick_cpu = kvm_kick_cpu_type;
> + pv_lock_ops.hibernate = kvm_hibernate;
> +#else
> pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
> pv_lock_ops.unlock_kick = kvm_unlock_kick;
> +#endif
This should also disable the unfair path.
Paolo