search for: is_lazy

Displaying 14 results from an estimated 14 matches for "is_lazy".

Did you mean: is_last
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...tat(irq_tlb_count); + + if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) + return; + + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + } + + flush_tlb_func_common(f, local, reason); +} + static bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...tat(irq_tlb_count); + + if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) + return; + + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + } + + flush_tlb_func_common(f, local, reason); +} + static bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
2019 Jul 22
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...gt;mm != this_cpu_read(cpu_tlbstate.loaded_mm)) > + return; > + > + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); > + } > + > + flush_tlb_func_common(f, local, reason); > +} > + > static bool tlb_is_not_lazy(int cpu) > { > return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu); Nice! I will add it on top, if you don?t mind (instead squashing it). The original decision to have local/remote functions was mostly to provide the generality. I would change the last argument of __smp_call_function_many() from ?wait? to ?flags? that would indicate whether to run the fun...
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
...ons concurrently in smp_call_function_many() x86/mm/tlb: Remove reason as argument for flush_tlb_func_local() x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy() x86/mm/tlb: Flush remote and local TLBs concurrently x86/mm/tlb: Privatize cpu_tlbstate x86/mm/tlb: Do not make is_lazy dirty for no reason cpumask: Mark functions as pure x86/mm/tlb: Remove UV special case x86/mm/tlb: Remove unnecessary uses of the inline keyword arch/x86/hyperv/mmu.c | 10 +- arch/x86/include/asm/paravirt.h | 6 +- arch/x86/include/asm/paravirt_types.h | 4 +- ar...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...ush_tlb_func_common(f, true, reason); } @@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info) flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); } -static bool tlb_is_not_lazy(int cpu, void *data) +static inline bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate.is_lazy, cpu); } -void native_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info) { + /* + * Do accounting...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...ush_tlb_func_common(f, true, reason); } @@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info) flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); } -static bool tlb_is_not_lazy(int cpu, void *data) +static inline bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate.is_lazy, cpu); } -void native_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info) { + /* + * Do accounting...
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...> @@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info) > flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); > } > > -static bool tlb_is_not_lazy(int cpu, void *data) > +static inline bool tlb_is_not_lazy(int cpu) > { > return !per_cpu(cpu_tlbstate.is_lazy, cpu); > } Nit: the compiler will probably inline this sucker anyway. So, for these kinds of patches, I'd resist the urge to do these kinds of tweaks, especially since it starts to hide the important change on the line. > -void native_flush_tlb_others(const struct cpumask *cpumask, &g...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...1 @@ static void flush_tlb_func_remote(void *info) >> flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); >> } >> >> -static bool tlb_is_not_lazy(int cpu, void *data) >> +static inline bool tlb_is_not_lazy(int cpu) >> { >> return !per_cpu(cpu_tlbstate.is_lazy, cpu); >> } > > Nit: the compiler will probably inline this sucker anyway. So, for > these kinds of patches, I'd resist the urge to do these kinds of tweaks, > especially since it starts to hide the important change on the line. Of course. > >> -void native_flush...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...1 @@ static void flush_tlb_func_remote(void *info) >> flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); >> } >> >> -static bool tlb_is_not_lazy(int cpu, void *data) >> +static inline bool tlb_is_not_lazy(int cpu) >> { >> return !per_cpu(cpu_tlbstate.is_lazy, cpu); >> } > > Nit: the compiler will probably inline this sucker anyway. So, for > these kinds of patches, I'd resist the urge to do these kinds of tweaks, > especially since it starts to hide the important change on the line. Of course. > >> -void native_flush...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...ush_tlb_func_common(f, true, reason); } @@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info) flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); } -static bool tlb_is_not_lazy(int cpu, void *data) +static inline bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate.is_lazy, cpu); } -void native_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info) { + /* + * native_flush_...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...ush_tlb_func_common(f, true, reason); } @@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info) flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); } -static bool tlb_is_not_lazy(int cpu, void *data) +static inline bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate.is_lazy, cpu); } -void native_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info) { + /* + * native_flush_...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...ush_tlb_func_common(f, true, reason); } @@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info) flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); } -static bool tlb_is_not_lazy(int cpu, void *data) +static inline bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate.is_lazy, cpu); } -void native_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info) { + /* + * native_flush_...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...ush_tlb_func_common(f, true, reason); } @@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info) flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); } -static bool tlb_is_not_lazy(int cpu, void *data) +static inline bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate.is_lazy, cpu); } -void native_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info) { + /* + * native_flush_...
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
...ons concurrently in smp_call_function_many() x86/mm/tlb: Remove reason as argument for flush_tlb_func_local() x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy() x86/mm/tlb: Flush remote and local TLBs concurrently x86/mm/tlb: Privatize cpu_tlbstate x86/mm/tlb: Do not make is_lazy dirty for no reason cpumask: Mark functions as pure x86/mm/tlb: Remove UV special case x86/mm/tlb: Remove unnecessary uses of the inline keyword arch/x86/hyperv/mmu.c | 13 ++- arch/x86/include/asm/paravirt.h | 6 +- arch/x86/include/asm/paravirt_types.h | 4 +- a...