Displaying 9 results from an estimated 9 matches for "kvm_vcpu_preempt".
Did you mean:
kvm_vcpu_preempted
2019 May 27
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...;
@@ -594,6 +594,9 @@ static void kvm_flush_tlb_others(const s
* queue flush_on_enter for pre-empted vCPUs
*/
for_each_cpu(cpu, flushmask) {
+ if (cpu == smp_processor_id())
+ continue;
+
src = &per_cpu(steal_time, cpu);
state = READ_ONCE(src->preempted);
if ((state & KVM_VCPU_PREEMPTED)) {
@@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s
}
}
- native_flush_tlb_others(flushmask, info);
+ native_flush_tlb_multi(flushmask, info);
}
static void __init kvm_guest_init(void)
@@ -628,9 +631,8 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(K...
2019 May 27
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...;
@@ -594,6 +594,9 @@ static void kvm_flush_tlb_others(const s
* queue flush_on_enter for pre-empted vCPUs
*/
for_each_cpu(cpu, flushmask) {
+ if (cpu == smp_processor_id())
+ continue;
+
src = &per_cpu(steal_time, cpu);
state = READ_ONCE(src->preempted);
if ((state & KVM_VCPU_PREEMPTED)) {
@@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s
}
}
- native_flush_tlb_others(flushmask, info);
+ native_flush_tlb_multi(flushmask, info);
}
static void __init kvm_guest_init(void)
@@ -628,9 +631,8 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(K...
2019 May 27
0
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...p_processor_id())
> + continue;
> +
Even this would be just an optimization; the vCPU you're running on
cannot be preempted. You can just change others to multi.
Paolo
> src = &per_cpu(steal_time, cpu);
> state = READ_ONCE(src->preempted);
> if ((state & KVM_VCPU_PREEMPTED)) {
> @@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s
> }
> }
>
> - native_flush_tlb_others(flushmask, info);
> + native_flush_tlb_multi(flushmask, info);
> }
>
> static void __init kvm_guest_init(void)
> @@ -628,9 +631,8 @@ static void __init...
2019 Jul 19
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...*/
for_each_cpu(cpu, flushmask) {
+ /*
+ * The local vCPU is never preempted, so we do not explicitly
+ * skip check for local vCPU - it will never be cleared from
+ * flushmask.
+ */
src = &per_cpu(steal_time, cpu);
state = READ_ONCE(src->preempted);
if ((state & KVM_VCPU_PREEMPTED)) {
@@ -618,7 +623,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
}
}
- native_flush_tlb_others(flushmask, info);
+ native_flush_tlb_multi(flushmask, info);
}
static void __init kvm_guest_init(void)
@@ -643,7 +648,7 @@ static void __init kvm_guest_init(void)
if...
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...*/
for_each_cpu(cpu, flushmask) {
+ /*
+ * The local vCPU is never preempted, so we do not explicitly
+ * skip check for local vCPU - it will never be cleared from
+ * flushmask.
+ */
src = &per_cpu(steal_time, cpu);
state = READ_ONCE(src->preempted);
if ((state & KVM_VCPU_PREEMPTED)) {
@@ -603,7 +608,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
}
}
- native_flush_tlb_others(flushmask, info);
+ native_flush_tlb_multi(flushmask, info);
}
static void __init kvm_guest_init(void)
@@ -628,7 +633,7 @@ static void __init kvm_guest_init(void)
if...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
To improve TLB shootdown performance, flush the remote and local TLBs
concurrently. Introduce flush_tlb_multi() that does so. The current
flush_tlb_others() interface is kept, since paravirtual interfaces need
to be adapted first before it can be removed. This is left for future
work. In such PV environments, TLB flushes are not performed, at this
time, concurrently.
Add a static key to tell
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
To improve TLB shootdown performance, flush the remote and local TLBs
concurrently. Introduce flush_tlb_multi() that does so. The current
flush_tlb_others() interface is kept, since paravirtual interfaces need
to be adapted first before it can be removed. This is left for future
work. In such PV environments, TLB flushes are not performed, at this
time, concurrently.
Add a static key to tell
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
Currently, local and remote TLB flushes are not performed concurrently,
which introduces unnecessary overhead - each INVLPG can take 100s of
cycles. This patch-set allows TLB flushes to be run concurrently: first
request the remote CPUs to initiate the flush, then run it locally, and
finally wait for the remote CPUs to finish their work.
In addition, there are various small optimizations to avoid
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
[ Cover-letter is identical to v2, including benchmark results,
excluding the change log. ]
Currently, local and remote TLB flushes are not performed concurrently,
which introduces unnecessary overhead - each INVLPG can take 100s of
cycles. This patch-set allows TLB flushes to be run concurrently: first
request the remote CPUs to initiate the flush, then run it locally, and
finally wait for