Displaying 20 results from an estimated 128 matches for "pv_callee_sav".
Did you mean:
pv_callee_save
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...#endif
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -604,6 +592,14 @@ static void kvm_wait(u8 *ptr, u8 val)
local_irq_restore(flags);
}
+static bool __kvm_vcpu_is_preempted(int cpu)
+{
+ struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
+
+ return !!src->preempted;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
+
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
*/
@@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...#endif
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -604,6 +592,14 @@ static void kvm_wait(u8 *ptr, u8 val)
local_irq_restore(flags);
}
+static bool __kvm_vcpu_is_preempted(int cpu)
+{
+ struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
+
+ return !!src->preempted;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
+
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
*/
@@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick...
2018 Aug 10
0
[PATCH 04/10] x86/paravirt: use a single ops structure
...if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
@@ -749,13 +749,15 @@ void __init kvm_spinlock_init(void)
return;
__pv_init_lock_hash();
- pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
- pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
- pv_lock_ops.wait = kvm_wait;
- pv_lock_ops.kick = kvm_kick_cpu;
+ pv_ops.pv_lock_ops.queued_spin_lock_slowpath =
+ __pv_queued_spin_lock_slowpath;
+ pv_ops.pv_lock_ops.queued_spin_unlock =
+ PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+ pv_ops.pv_lock_ops.wait = kvm_wait...
2014 Jun 15
0
[PATCH 11/11] qspinlock, kvm: Add paravirt support
...)
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}
+#ifndef CONFIG_QUEUE_SPINLOCK
enum kvm_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -796,6 +797,51 @@ static void kvm_unlock_kick(struct arch_
}
}
}
+#else /* QUEUE_SPINLOCK */
+
+#include <asm-generic/qspinlock.h>
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_init_node);
+PV_CALLEE_SAVE_REGS_THUNK(__pv_link_and_wait_node);
+PV_CALLEE_SAVE_REGS_THUNK(__pv_kick_node);
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_wait_head);
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_unlock);
+
+void kvm_wait(int *ptr, int val)
+{
+ unsigned long flags;
+
+ if (in_nmi())...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...V_EOI))
> @@ -604,6 +592,14 @@ static void kvm_wait(u8 *ptr, u8 val)
> local_irq_restore(flags);
> }
>
> +static bool __kvm_vcpu_is_preempted(int cpu)
> +{
> + struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
> +
> + return !!src->preempted;
> +}
> +PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
> +
> /*
> * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
> */
> @@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void)
> pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
> pv_lock_ops.w...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...s all the paravirt structures: we get a convenient
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 099fcba..eb3753d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
return !!src->preempted;
}
-PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
- if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- pv_loc...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...s all the paravirt structures: we get a convenient
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 099fcba..eb3753d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
return !!src->preempted;
}
-PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
- if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- pv_loc...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...s all the paravirt structures: we get a convenient
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 099fcba..eb3753d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
return !!src->preempted;
}
-PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
- if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- pv_loc...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...s all the paravirt structures: we get a convenient
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 099fcba..eb3753d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
return !!src->preempted;
}
-PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
- if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- pv_loc...
2014 Oct 29
0
[PATCH v13 10/11] pvqspinlock, x86: Enable PV qspinlock for KVM
...apicid. Used to wake up a halted vcpu */
-static void kvm_kick_cpu(int cpu)
+void kvm_kick_cpu(int cpu)
{
int apicid;
unsigned long flags = 0;
@@ -576,7 +576,9 @@ static void kvm_kick_cpu(int cpu)
apicid = per_cpu(x86_cpu_to_apicid, cpu);
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}
+PV_CALLEE_SAVE_REGS_THUNK(kvm_kick_cpu);
+#ifndef CONFIG_QUEUE_SPINLOCK
enum kvm_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -804,6 +806,132 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
}
}
}
+#else /* !CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_KVM_DEBUG_FS
+...
2014 Oct 29
0
[PATCH v13 10/11] pvqspinlock, x86: Enable PV qspinlock for KVM
...apicid. Used to wake up a halted vcpu */
-static void kvm_kick_cpu(int cpu)
+void kvm_kick_cpu(int cpu)
{
int apicid;
unsigned long flags = 0;
@@ -576,7 +576,9 @@ static void kvm_kick_cpu(int cpu)
apicid = per_cpu(x86_cpu_to_apicid, cpu);
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}
+PV_CALLEE_SAVE_REGS_THUNK(kvm_kick_cpu);
+#ifndef CONFIG_QUEUE_SPINLOCK
enum kvm_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
@@ -804,6 +806,132 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
}
}
}
+#else /* !CONFIG_QUEUE_SPINLOCK */
+
+#ifdef CONFIG_KVM_DEBUG_FS
+...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...T)
+ add_smp(&kick_nohlt_stats, 1);
+ if (stat_types & PV_LOCKSTAT_HALT_QHEAD)
+ add_smp(&halt_qhead_stats, 1);
+ if (stat_types & PV_LOCKSTAT_HALT_QNODE)
+ add_smp(&halt_qnode_stats, 1);
+ if (stat_types & PV_LOCKSTAT_HALT_ABORT)
+ add_smp(&halt_abort_stats, 1);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_lock_stats);
+
+static inline u64 spin_time_start(void)
+{
+ return sched_clock();
+}
+
+static inline void spin_time_accum_blocked(u64 start)
+{
+ u64 delta;
+
+ delta = sched_clock() - start;
+ add_smp(&time_blocked, delta);
+}
+#else /* CONFIG_XEN_DEBUG_FS */
+static inline v...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...T)
+ add_smp(&kick_nohlt_stats, 1);
+ if (stat_types & PV_LOCKSTAT_HALT_QHEAD)
+ add_smp(&halt_qhead_stats, 1);
+ if (stat_types & PV_LOCKSTAT_HALT_QNODE)
+ add_smp(&halt_qnode_stats, 1);
+ if (stat_types & PV_LOCKSTAT_HALT_ABORT)
+ add_smp(&halt_abort_stats, 1);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_lock_stats);
+
+static inline u64 spin_time_start(void)
+{
+ return sched_clock();
+}
+
+static inline void spin_time_accum_blocked(u64 start)
+{
+ u64 delta;
+
+ delta = sched_clock() - start;
+ add_smp(&time_blocked, delta);
+}
+#else /* CONFIG_XEN_DEBUG_FS */
+static inline v...
2018 Aug 10
13
[PATCH 00/10] x86/paravirt: several cleanups
This series removes some no longer needed stuff from paravirt
infrastructure and puts large quantities of paravirt ops under a new
config option PARAVIRT_XXL which is selected by XEN_PV only.
A pvops kernel without XEN_PV being configured is about 2.5% smaller
with this series applied.
tip commit 5800dc5c19f34e6e03b5adab1282535cb102fafd ("x86/paravirt:
Fix spectre-v2 mitigations for
2014 Mar 13
2
[PATCH RFC v6 10/11] pvqspinlock, x86: Enable qspinlock PV support for KVM
...89,13 @@ void __init kvm_spinlock_init(void)
> if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
> return;
>
> +#ifdef CONFIG_QUEUE_SPINLOCK
> + pv_lock_ops.kick_cpu = kvm_kick_cpu_type;
> + pv_lock_ops.hibernate = kvm_hibernate;
> +#else
> pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
> pv_lock_ops.unlock_kick = kvm_unlock_kick;
> +#endif
This should also disable the unfair path.
Paolo
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...+95,43 @@ static inline void spin_time_accum_blocked(u64 start)
}
#endif /* CONFIG_XEN_DEBUG_FS */
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock);
+
+static void xen_qlock_wait(u8 *ptr, u8 val)
+{
+ int irq = __this_cpu_read(lock_kicker_irq);
+
+ xen_clear_irq_pending(irq);
+
+ barrier();
+
+ if (READ_ONCE(*ptr) == val)
+ xen_poll_irq(irq);
+}
+
+static void xen_qlock_kick(int cpu)
+{
+ xen_send_IPI_one(c...
2014 Mar 13
2
[PATCH RFC v6 10/11] pvqspinlock, x86: Enable qspinlock PV support for KVM
...89,13 @@ void __init kvm_spinlock_init(void)
> if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
> return;
>
> +#ifdef CONFIG_QUEUE_SPINLOCK
> + pv_lock_ops.kick_cpu = kvm_kick_cpu_type;
> + pv_lock_ops.hibernate = kvm_hibernate;
> +#else
> pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
> pv_lock_ops.unlock_kick = kvm_unlock_kick;
> +#endif
This should also disable the unfair path.
Paolo
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...+95,43 @@ static inline void spin_time_accum_blocked(u64 start)
}
#endif /* CONFIG_XEN_DEBUG_FS */
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock);
+
+static void xen_qlock_wait(u8 *ptr, u8 val)
+{
+ int irq = __this_cpu_read(lock_kicker_irq);
+
+ xen_clear_irq_pending(irq);
+
+ barrier();
+
+ if (READ_ONCE(*ptr) == val)
+ xen_poll_irq(irq);
+}
+
+static void xen_qlock_kick(int cpu)
+{
+ xen_send_IPI_one(c...
2015 Apr 07
0
[PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen
...{
@@ -280,8 +327,16 @@ void __init xen_init_spinlocks(void)
return;
}
printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
+#ifdef CONFIG_QUEUE_SPINLOCK
+ __pv_init_lock_hash();
+ pv_lock_ops.queue_spin_lock_slowpath = __pv_queue_spin_lock_slowpath;
+ pv_lock_ops.queue_spin_unlock = PV_CALLEE_SAVE(__pv_queue_spin_unlock);
+ pv_lock_ops.wait = xen_qlock_wait;
+ pv_lock_ops.kick = xen_qlock_kick;
+#else
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
pv_lock_ops.unlock_kick = xen_unlock_kick;
+#endif
}
/*
@@ -310,7 +365,7 @@ static __init int xen_parse_nopvspin(char *ar...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
...tatic inline bool virt_queue_spin_lock(struct qspinlock *lock)
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -584,6 +584,41 @@ static void kvm_kick_cpu(int cpu)
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}
+
+#ifdef CONFIG_QUEUE_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock);
+
+static void kvm_wait(u8 *ptr, u8 val)
+{
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+
+ if (READ_ONCE(*ptr) != val)
+ goto out;
+
+ /*
+ * halt until it's our turn and kicked. Note that we do safe halt
+ * for irq en...