Displaying 20 results from an estimated 25 matches for "__native_vcpu_is_preempted".
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
..._is_preempted;
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..da050bc 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu)
{
return false;
}
-PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
..._is_preempted;
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..da050bc 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu)
{
return false;
}
-PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...s to exploit KVM_FEATURE_PV_UNHALT if present.
*/
@@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
+ pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted);
+
+ if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ pv_lock_ops.vcpu_is_preempted =
+ PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
+ }
}
static __init int kvm_spinlock_init_jump(void)
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -12,7 +12,6 @@...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...s to exploit KVM_FEATURE_PV_UNHALT if present.
*/
@@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
+ pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted);
+
+ if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ pv_lock_ops.vcpu_is_preempted =
+ PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
+ }
}
static __init int kvm_spinlock_init_jump(void)
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -12,7 +12,6 @@...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
..._is_preempted;
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..da050bc 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu)
{
return false;
}
-PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted...
2017 Feb 08
4
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
..._is_preempted;
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..da050bc 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu)
{
return false;
}
-PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
- return pv_lock_ops.vcpu_is_preempted.func ==
- __raw_callee_save___native_vcpu_is_preempted;
+ return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...HALT if present.
> */
> @@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void)
> pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
> pv_lock_ops.wait = kvm_wait;
> pv_lock_ops.kick = kvm_kick_cpu;
> + pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted);
> +
> + if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
> + pv_lock_ops.vcpu_is_preempted =
> + PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
> + }
> }
>
> static __init int kvm_spinlock_init_jump(void)
> --- a/arch/x86/kernel/paravirt-spinlocks.c
> +++ b/arch/x8...
2019 Dec 26
0
[PATCH v2 5/6] KVM: arm64: Add interface to support VCPU preempted check
...ol (*vcpu_is_preempted)(int cpu);
+};
+
struct paravirt_patch_template {
struct pv_time_ops time;
+ struct pv_lock_ops lock;
};
extern struct paravirt_patch_template pv_ops;
@@ -24,6 +29,13 @@ static inline u64 paravirt_steal_clock(int cpu)
int __init pv_time_init(void);
+__visible bool __native_vcpu_is_preempted(int cpu);
+
+static inline bool pv_vcpu_is_preempted(int cpu)
+{
+ return pv_ops.lock.vcpu_is_preempted(cpu);
+}
+
#else
#define pv_time_init() do {} while (0)
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index b093b287babf..45ff1b2949a6 100644
--- a/arch/a...
2017 Feb 15
0
[PATCH v4 1/2] x86/paravirt: Change vcp_is_preempted() arg type to long
...aravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..8f2d1c9 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -20,7 +20,7 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock;
}
-__visible bool __native_vcpu_is_preempted(int cpu)
+__visible bool __native_vcpu_is_preempted(long cpu)
{
return false;
}
--
1.8.3.1
2017 Feb 15
4
[PATCH v4 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v3->v4:
- Fix x86-32 build error.
v2->v3:
- Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted()
in assembly as suggested by PeterZ.
- Add a new patch to change vcpu_is_preempted() argument type to long
to ease the writing of the assembly code.
v1->v2:
- Rerun the fio test on a different system on both bare-metal and a
KVM guest. Both sockets were
2017 Feb 15
4
[PATCH v4 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v3->v4:
- Fix x86-32 build error.
v2->v3:
- Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted()
in assembly as suggested by PeterZ.
- Add a new patch to change vcpu_is_preempted() argument type to long
to ease the writing of the assembly code.
v1->v2:
- Rerun the fio test on a different system on both bare-metal and a
KVM guest. Both sockets were
2017 Feb 15
3
[PATCH v3 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v2->v3:
- Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted()
in assembly as suggested by PeterZ.
- Add a new patch to change vcpu_is_preempted() argument type to long
to ease the writing of the assembly code.
v1->v2:
- Rerun the fio test on a different system on both bare-metal and a
KVM guest. Both sockets were utilized in this test.
- The commit log was
2017 Feb 15
3
[PATCH v3 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v2->v3:
- Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted()
in assembly as suggested by PeterZ.
- Add a new patch to change vcpu_is_preempted() argument type to long
to ease the writing of the assembly code.
v1->v2:
- Rerun the fio test on a different system on both bare-metal and a
KVM guest. Both sockets were utilized in this test.
- The commit log was
2019 Dec 17
10
[PATCH 0/5] KVM: arm64: vcpu preempted check support
From: Zengruan Ye <yezengruan at huawei.com>
This patch set aims to support the vcpu_is_preempted() functionality
under KVM/arm64, which allowing the guest to obtain the vcpu is
currently running or not. This will enhance lock performance on
overcommitted hosts (more runnable vcpus than physical cpus in the
system) as doing busy waits for preempted vcpus will hurt system
performance far
2019 Dec 17
10
[PATCH 0/5] KVM: arm64: vcpu preempted check support
From: Zengruan Ye <yezengruan at huawei.com>
This patch set aims to support the vcpu_is_preempted() functionality
under KVM/arm64, which allowing the guest to obtain the vcpu is
currently running or not. This will enhance lock performance on
overcommitted hosts (more runnable vcpus than physical cpus in the
system) as doing busy waits for preempted vcpus will hurt system
performance far
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2019 Dec 26
7
[PATCH v2 0/6] KVM: arm64: VCPU preempted check support
This patch set aims to support the vcpu_is_preempted() functionality
under KVM/arm64, which allowing the guest to obtain the VCPU is
currently running or not. This will enhance lock performance on
overcommitted hosts (more runnable VCPUs than physical CPUs in the
system) as doing busy waits for preempted VCPUs will hurt system
performance far worse than early yielding.
We have observed some
2018 Aug 10
0
[PATCH 04/10] x86/paravirt: use a single ops structure
...MP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
@@ -694,6 +694,9 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
@@ -862,7 +865,7 @@ extern void default_banner(void);
COND_POP(set, CLBR_RCX, rcx); \
COND_POP(set, CLBR_RAX, rax)
-#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
+#define PAR...
2016 Nov 02
13
[PATCH v7 00/11] implement vcpu preempted check
change from v6:
fix typos and remove uncessary comments.
change from v5:
spilt x86/kvm patch into guest/host part.
introduce kvm_write_guest_offset_cached.
fix some typos.
rebase patch onto 4.9.2
change from v4:
spilt x86 kvm vcpu preempted check into two patches.
add documentation patch.
add x86 vcpu preempted check patch under xen
add s390 vcpu preempted check patch
change from v3: