search for: flush_tlb_func_local

Displaying 20 results from an estimated 20 matches for "flush_tlb_func_local".

2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...e_hyperv_mmu_flush_tlb_others(cpus, info); + trace_hyperv_mmu_flush_tlb_multi(cpus, info); if (!hv_hypercall_pg) goto do_native; @@ -69,6 +69,9 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, local_irq_save(flags); + if (cpumask_test_cpu(smp_processor_id(), cpus)) + flush_tlb_func_local(info); + flush_pcpu = (struct hv_tlb_flush **) this_cpu_ptr(hyperv_pcpu_input_arg); @@ -156,7 +159,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, if (!(status & HV_HYPERCALL_RESULT_MASK)) return; do_native: - native_flush_tlb_others(cpus, info); + native_...
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
...linux-hyperv at vger.kernel.org Cc: linux-kernel at vger.kernel.org Cc: virtualization at lists.linux-foundation.org Cc: x86 at kernel.org Cc: xen-devel at lists.xenproject.org Nadav Amit (9): smp: Run functions concurrently in smp_call_function_many() x86/mm/tlb: Remove reason as argument for flush_tlb_func_local() x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy() x86/mm/tlb: Flush remote and local TLBs concurrently x86/mm/tlb: Privatize cpu_tlbstate x86/mm/tlb: Do not make is_lazy dirty for no reason cpumask: Mark functions as pure x86/mm/tlb: Remove UV special case x86/mm/t...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN; flush_tlb_func_common(f, true, reason); } @@...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN; flush_tlb_func_common(f, true, reason); } @@...
2019 Jul 19
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...'ll do pointless flushes on future context switches. * - * Rather than hooking native_flush_tlb_others() here, I think + * Rather than hooking native_flush_tlb_multi() here, I think * that UV should be updated so that smp_call_function_many(), * etc, are optimal on UV. */ + flush_tlb_func_local((void *)info); + cpumask = uv_flush_tlb_others(cpumask, info); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func_remote, @@ -709,8 +716,9 @@ void native_flush_tlb_others(const struct cpumask *cpumask, * doing a speculative memory access. */ if (info->freed_tables) { -...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN; flush_tlb_func_common(f, true, reason); } @@...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN; flush_tlb_func_common(f, true, reason); } @@...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN; flush_tlb_func_common(f, true, reason); } @@...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN; flush_tlb_func_common(f, true, reason); } @@...
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...rs(const struct cpumask *cpumask, > * doing a speculative memory access. > */ > if (info->freed_tables) { > - smp_call_function_many(cpumask, flush_tlb_func_remote, > - (void *)info, 1); > + __smp_call_function_many(cpumask, flush_tlb_func_remote, > + flush_tlb_func_local, > + (void *)info, 1); > } else { > /* > * Although we could have used on_each_cpu_cond_mask(), > @@ -737,7 +745,8 @@ void native_flush_tlb_others(const struct cpumask *cpumask, > if (tlb_is_not_lazy(cpu)) > __cpumask_set_cpu(cpu, cond_cpumask); > }...
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...rs(const struct cpumask *cpumask, > * doing a speculative memory access. > */ > if (info->freed_tables) { > - smp_call_function_many(cpumask, flush_tlb_func_remote, > - (void *)info, 1); > + __smp_call_function_many(cpumask, flush_tlb_func_remote, > + flush_tlb_func_local, > + (void *)info, 1); > } else { > /* > * Although we could have used on_each_cpu_cond_mask(), > @@ -737,7 +745,8 @@ void native_flush_tlb_others(const struct cpumask *cpumask, > if (tlb_is_not_lazy(cpu)) > __cpumask_set_cpu(cpu, cond_cpumask); > }...
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
...linux-hyperv at vger.kernel.org Cc: linux-kernel at vger.kernel.org Cc: virtualization at lists.linux-foundation.org Cc: x86 at kernel.org Cc: xen-devel at lists.xenproject.org Nadav Amit (9): smp: Run functions concurrently in smp_call_function_many() x86/mm/tlb: Remove reason as argument for flush_tlb_func_local() x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy() x86/mm/tlb: Flush remote and local TLBs concurrently x86/mm/tlb: Privatize cpu_tlbstate x86/mm/tlb: Do not make is_lazy dirty for no reason cpumask: Mark functions as pure x86/mm/tlb: Remove UV special case x86/mm/t...
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...tlb_multi()" since it is a function. > switch_mm_irqs_off(NULL, &init_mm, NULL); > @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, > this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); > } > > -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) > +static void flush_tlb_func_local(void *info) > { > const struct flush_tlb_info *f = info; > + enum tlb_flush_reason reason; > + > + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN; Should we just add...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...function. Sure. > >> switch_mm_irqs_off(NULL, &init_mm, NULL); >> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, >> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); >> } >> >> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) >> +static void flush_tlb_func_local(void *info) >> { >> const struct flush_tlb_info *f = info; >> + enum tlb_flush_reason reason; >> + >> + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOW...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...function. Sure. > >> switch_mm_irqs_off(NULL, &init_mm, NULL); >> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, >> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); >> } >> >> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) >> +static void flush_tlb_func_local(void *info) >> { >> const struct flush_tlb_info *f = info; >> + enum tlb_flush_reason reason; >> + >> + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOW...
2019 Jul 03
2
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...args = mcs.args; > args->op.arg2.vcpumask = to_cpumask(args->mask); > > - /* Remove us, and any offline CPUS. */ > + /* Flush locally if needed and remove us */ > + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(args->mask))) { > + local_irq_disable(); > + flush_tlb_func_local(info); I think this isn't the correct function for PV guests. In fact it should be much easier: just don't clear the own cpu from the mask, that's all what's needed. The hypervisor is just fine having the current cpu in the mask and it will do the right thing. Juergen
2019 Jul 03
2
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...args = mcs.args; > args->op.arg2.vcpumask = to_cpumask(args->mask); > > - /* Remove us, and any offline CPUS. */ > + /* Flush locally if needed and remove us */ > + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(args->mask))) { > + local_irq_disable(); > + flush_tlb_func_local(info); I think this isn't the correct function for PV guests. In fact it should be much easier: just don't clear the own cpu from the mask, that's all what's needed. The hypervisor is just fine having the current cpu in the mask and it will do the right thing. Juergen
2019 Jul 22
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...cpumask, >> * doing a speculative memory access. >> */ >> if (info->freed_tables) { >> - smp_call_function_many(cpumask, flush_tlb_func_remote, >> - (void *)info, 1); >> + __smp_call_function_many(cpumask, flush_tlb_func_remote, >> + flush_tlb_func_local, >> + (void *)info, 1); >> } else { >> /* >> * Although we could have used on_each_cpu_cond_mask(), >> @@ -737,7 +745,8 @@ void native_flush_tlb_others(const struct cpumask *cpumask, >> if (tlb_is_not_lazy(cpu)) >> __cpumask_set_cpu(cpu, c...
2019 Jul 03
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...>> args->op.arg2.vcpumask = to_cpumask(args->mask); >> - /* Remove us, and any offline CPUS. */ >> + /* Flush locally if needed and remove us */ >> + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(args->mask))) { >> + local_irq_disable(); >> + flush_tlb_func_local(info); > > I think this isn't the correct function for PV guests. > > In fact it should be much easier: just don't clear the own cpu from the > mask, that's all what's needed. The hypervisor is just fine having the > current cpu in the mask and it will do the rig...
2019 Jul 03
1
[Xen-devel] [PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...;op.arg2.vcpumask = to_cpumask(args->mask); >>> - /* Remove us, and any offline CPUS. */ >>> + /* Flush locally if needed and remove us */ >>> + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(args->mask))) { >>> + local_irq_disable(); >>> + flush_tlb_func_local(info); >> I think this isn't the correct function for PV guests. >> >> In fact it should be much easier: just don't clear the own cpu from the >> mask, that's all what's needed. The hypervisor is just fine having the >> current cpu in the mask and it wi...