search for: tlb_gen

Displaying 13 results from an estimated 13 matches for "tlb_gen".

2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...kipping + * This should be rare, with native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDO...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...kipping + * This should be rare, with native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDO...
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...re messing with this, it can now be "native_flush_tlb_multi()" since it is a function. > switch_mm_irqs_off(NULL, &init_mm, NULL); > @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, > this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); > } > > -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) > +static void flush_tlb_func_local(void *info) > { > const struct flush_tlb_info *f = info; > + enum tlb_flush_reason reason; > + > + reason = (f->mm == NULL)...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...an now be > "native_flush_tlb_multi()" since it is a function. Sure. > >> switch_mm_irqs_off(NULL, &init_mm, NULL); >> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, >> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); >> } >> >> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) >> +static void flush_tlb_func_local(void *info) >> { >> const struct flush_tlb_info *f = info; >> + enum tlb_flush_reason reason; >> + >&g...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...an now be > "native_flush_tlb_multi()" since it is a function. Sure. > >> switch_mm_irqs_off(NULL, &init_mm, NULL); >> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, >> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); >> } >> >> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) >> +static void flush_tlb_func_local(void *info) >> { >> const struct flush_tlb_info *f = info; >> + enum tlb_flush_reason reason; >> + >&g...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...kipping + * This should be rare, with native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDO...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...kipping + * This should be rare, with native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDO...
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...*vma, unsigned long a) { flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } -void native_flush_tlb_others(const struct cpumask *cpumask, +void native_flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info); static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) @@ -593,8 +594,8 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, info) \ - native_flush_tlb_others(mask, in...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...kipping + * This should be rare, with native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDO...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...kipping + * This should be rare, with native_flush_tlb_multi skipping * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); @@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); } -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason) +static void flush_tlb_func_local(void *info) { const struct flush_tlb_info *f = info; + enum tlb_flush_reason reason; + + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDO...
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
Currently, local and remote TLB flushes are not performed concurrently, which introduces unnecessary overhead - each INVLPG can take 100s of cycles. This patch-set allows TLB flushes to be run concurrently: first request the remote CPUs to initiate the flush, then run it locally, and finally wait for the remote CPUs to finish their work. In addition, there are various small optimizations to avoid
2019 Jul 19
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...uct *vma, unsigned long a) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } -void native_flush_tlb_others(const struct cpumask *cpumask, +void native_flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info); static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) @@ -593,8 +593,8 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, info) \ - native_flush_tlb_others(mask, in...
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
[ Cover-letter is identical to v2, including benchmark results, excluding the change log. ] Currently, local and remote TLB flushes are not performed concurrently, which introduces unnecessary overhead - each INVLPG can take 100s of cycles. This patch-set allows TLB flushes to be run concurrently: first request the remote CPUs to initiate the flush, then run it locally, and finally wait for