Displaying 13 results from an estimated 13 matches for "tlb_remote_shootdown".
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...local_flush_tlb() __flush_tlb()
Index: linux-2.6/arch/x86/mm/tlb.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/tlb.c
+++ linux-2.6/arch/x86/mm/tlb.c
@@ -659,6 +659,27 @@ static void flush_tlb_func_remote(void *
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
+static void flush_tlb_func(void *info)
+{
+ const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason = TLB_REMOTE_SHOOTDOWN;
+ bool local = false;
+
+ if (f->cpu == smp_processor_id()) {
+ local = true;
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHO...
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...local_flush_tlb() __flush_tlb()
Index: linux-2.6/arch/x86/mm/tlb.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/tlb.c
+++ linux-2.6/arch/x86/mm/tlb.c
@@ -659,6 +659,27 @@ static void flush_tlb_func_remote(void *
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
+static void flush_tlb_func(void *info)
+{
+ const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason = TLB_REMOTE_SHOOTDOWN;
+ bool local = false;
+
+ if (f->cpu == smp_processor_id()) {
+ local = true;
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHO...
2019 Jul 22
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...()
> Index: linux-2.6/arch/x86/mm/tlb.c
> ===================================================================
> --- linux-2.6.orig/arch/x86/mm/tlb.c
> +++ linux-2.6/arch/x86/mm/tlb.c
> @@ -659,6 +659,27 @@ static void flush_tlb_func_remote(void *
> flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
> }
>
> +static void flush_tlb_func(void *info)
> +{
> + const struct flush_tlb_info *f = info;
> + enum tlb_flush_reason reason = TLB_REMOTE_SHOOTDOWN;
> + bool local = false;
> +
> + if (f->cpu == smp_processor_id()) {
> + local = true;
> + reason = (f->...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
flush_tlb_func_common(f, true, reason);
}
@@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info)
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static inline bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static DEFINE_PER_CPU(cpumask_t, flus...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
flush_tlb_func_common(f, true, reason);
}
@@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info)
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static inline bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static DEFINE_PER_CPU(cpumask_t, flus...
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
[ Cover-letter is identical to v2, including benchmark results,
excluding the change log. ]
Currently, local and remote TLB flushes are not performed concurrently,
which introduces unnecessary overhead - each INVLPG can take 100s of
cycles. This patch-set allows TLB flushes to be run concurrently: first
request the remote CPUs to initiate the flush, then run it locally, and
finally wait for
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...y
it like this, but seems like it would be nicer and easier to track down
the origins of these things if we did this at the caller.
> flush_tlb_func_common(f, true, reason);
> }
> @@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info)
> flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
> }
>
> -static bool tlb_is_not_lazy(int cpu, void *data)
> +static inline bool tlb_is_not_lazy(int cpu)
> {
> return !per_cpu(cpu_tlbstate.is_lazy, cpu);
> }
Nit: the compiler will probably inline this sucker anyway. So, for
these kinds of patches, I'd resist th...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...e same
cacheline that holds call_function_data. Increasing the size of
flush_tlb_info for no good reason will not help?
>> flush_tlb_func_common(f, true, reason);
>> }
>> @@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info)
>> flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
>> }
>>
>> -static bool tlb_is_not_lazy(int cpu, void *data)
>> +static inline bool tlb_is_not_lazy(int cpu)
>> {
>> return !per_cpu(cpu_tlbstate.is_lazy, cpu);
>> }
>
> Nit: the compiler will probably inline this sucker anyway. So, for
> the...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...e same
cacheline that holds call_function_data. Increasing the size of
flush_tlb_info for no good reason will not help?
>> flush_tlb_func_common(f, true, reason);
>> }
>> @@ -655,14 +658,21 @@ static void flush_tlb_func_remote(void *info)
>> flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
>> }
>>
>> -static bool tlb_is_not_lazy(int cpu, void *data)
>> +static inline bool tlb_is_not_lazy(int cpu)
>> {
>> return !per_cpu(cpu_tlbstate.is_lazy, cpu);
>> }
>
> Nit: the compiler will probably inline this sucker anyway. So, for
> the...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
flush_tlb_func_common(f, true, reason);
}
@@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info)
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static inline bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static DEFINE_PER_CPU(cpumask_t, flus...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
flush_tlb_func_common(f, true, reason);
}
@@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info)
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static inline bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static DEFINE_PER_CPU(cpumask_t, flus...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
flush_tlb_func_common(f, true, reason);
}
@@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info)
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static inline bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static DEFINE_PER_CPU(cpumask_t, flus...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
flush_tlb_func_common(f, true, reason);
}
@@ -654,14 +657,30 @@ static void flush_tlb_func_remote(void *info)
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static inline bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static DEFINE_PER_CPU(cpumask_t, flus...