Displaying 20 results from an estimated 57 matches for "cpu_tlbstate".
2009 Aug 10
1
[PATCH 1/2] export cpu_tlbstate to modules
vhost net module wants to do copy to/from user from a kernel thread,
which needs switch_mm (like what fs/aio has).
export cpu_tlbstate to make this possible
Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
---
arch/x86/mm/tlb.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e970..e33a5f0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -...
2009 Aug 10
1
[PATCH 1/2] export cpu_tlbstate to modules
vhost net module wants to do copy to/from user from a kernel thread,
which needs switch_mm (like what fs/aio has).
export cpu_tlbstate to make this possible
Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
---
arch/x86/mm/tlb.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e970..e33a5f0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -...
2018 Jan 04
2
possible issue with nvidia and new patches?
Twitter user stintel, in this thread:
https://twitter.com/stintel/status/948499157282623488
mentions a possible problem with the new patches and the
nvidia driver:
"As if the @Intel bug isn't bad enough, #KPTI renders @nvidia driver
incompatible due to GPL-only symbol 'cpu_tlbstate'. #epicfail"
Also:
https://twitter.com/tomasz_gwozdz/status/948590364679655429
https://twitter.com/BitsAndChipsEng/status/948578609761054721
Cheers,
Zube
2007 Apr 18
4
paravirt repo rebased to 2.6.21-rc6-mm1
Seems to work OK for native and Xen. I had to play a bit with the
paravirt-sched-clock patch to deal with the VMI changes. Zach, can you
check that it still works?
Thanks,
J
2007 Apr 18
4
paravirt repo rebased to 2.6.21-rc6-mm1
Seems to work OK for native and Xen. I had to play a bit with the
paravirt-sched-clock patch to deal with the VMI changes. Zach, can you
check that it still works?
Thanks,
J
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...re, with native_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHO...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...re, with native_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHO...
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...sh_reason reason = TLB_REMOTE_SHOOTDOWN;
+ bool local = false;
+
+ if (f->cpu == smp_processor_id()) {
+ local = true;
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
+ } else {
+ inc_irq_stat(irq_tlb_count);
+
+ if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
+ return;
+
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ }
+
+ flush_tlb_func_common(f, local, reason);
+}
+
static bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...sh_reason reason = TLB_REMOTE_SHOOTDOWN;
+ bool local = false;
+
+ if (f->cpu == smp_processor_id()) {
+ local = true;
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
+ } else {
+ inc_irq_stat(irq_tlb_count);
+
+ if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
+ return;
+
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ }
+
+ flush_tlb_func_common(f, local, reason);
+}
+
static bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...e, with native_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi() skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -635,7 +635,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(void *info)
+static void __flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
enum tlb_flush_reason reason;
@@ -645,6 +645,11 @@ static void flush_tlb_func_local(void *info)
flush_tlb_fun...
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
....org
Nadav Amit (9):
smp: Run functions concurrently in smp_call_function_many()
x86/mm/tlb: Remove reason as argument for flush_tlb_func_local()
x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy()
x86/mm/tlb: Flush remote and local TLBs concurrently
x86/mm/tlb: Privatize cpu_tlbstate
x86/mm/tlb: Do not make is_lazy dirty for no reason
cpumask: Mark functions as pure
x86/mm/tlb: Remove UV special case
x86/mm/tlb: Remove unnecessary uses of the inline keyword
arch/x86/hyperv/mmu.c | 13 ++-
arch/x86/include/asm/paravirt.h | 6 +-
arch/x86/includ...
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
....org
Nadav Amit (9):
smp: Run functions concurrently in smp_call_function_many()
x86/mm/tlb: Remove reason as argument for flush_tlb_func_local()
x86/mm/tlb: Open-code on_each_cpu_cond_mask() for tlb_is_not_lazy()
x86/mm/tlb: Flush remote and local TLBs concurrently
x86/mm/tlb: Privatize cpu_tlbstate
x86/mm/tlb: Do not make is_lazy dirty for no reason
cpumask: Mark functions as pure
x86/mm/tlb: Remove UV special case
x86/mm/tlb: Remove unnecessary uses of the inline keyword
arch/x86/hyperv/mmu.c | 10 +-
arch/x86/include/asm/paravirt.h | 6 +-
arch/x86/include...
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...s.
> */
Nit, since we're messing with this, it can now be
"native_flush_tlb_multi()" since it is a function.
> switch_mm_irqs_off(NULL, &init_mm, NULL);
> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
> }
>
> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
> +static void flush_tlb_func_local(void *info)
> {
> const struct flush_tlb_info *f = info;
> + enum tlb_flush_reason reason;
> +
>...
2018 Jan 04
0
possible issue with nvidia and new patches?
...n this thread:
>
> https://twitter.com/stintel/status/948499157282623488
>
> mentions a possible problem with the new patches and the
> nvidia driver:
>
> "As if the @Intel bug isn't bad enough, #KPTI renders @nvidia driver
> incompatible due to GPL-only symbol 'cpu_tlbstate'. #epicfail"
>
> Also:
>
> https://twitter.com/tomasz_gwozdz/status/948590364679655429
>
> https://twitter.com/BitsAndChipsEng/status/948578609761054721
I've seen no obvious problems with kernel-3.10.0-693.11.6.el7.x86_64 and
nvidia-x11-drv-384.98-2.el7.elrepo.x86_64...
2009 Aug 10
0
[PATCH 0/2] vhost: a kernel-level virtio server
...ome more detailed description attached to the patch itself.
The patches are against 2.6.31-rc4. I'd like them to go into linux-next
and down the road 2.6.32 if possible. Please comment.
Userspace bits using this driver will be posted to kvm at vger shortly.
Michael S. Tsirkin (2):
export cpu_tlbstate to modules
vhost_net: a kernel-level virtio server
MAINTAINERS | 10 +
arch/x86/kvm/Kconfig | 1 +
arch/x86/mm/tlb.c | 1 +
drivers/Makefile | 1 +
drivers/block/virtio_blk.c | 3 +
drivers/vhost/Kconfig | 11 +
drivers/vhost/Makefile...
2009 Aug 10
0
[PATCH 0/2] vhost: a kernel-level virtio server
...ome more detailed description attached to the patch itself.
The patches are against 2.6.31-rc4. I'd like them to go into linux-next
and down the road 2.6.32 if possible. Please comment.
Userspace bits using this driver will be posted to kvm at vger shortly.
Michael S. Tsirkin (2):
export cpu_tlbstate to modules
vhost_net: a kernel-level virtio server
MAINTAINERS | 10 +
arch/x86/kvm/Kconfig | 1 +
arch/x86/mm/tlb.c | 1 +
drivers/Makefile | 1 +
drivers/block/virtio_blk.c | 3 +
drivers/vhost/Kconfig | 11 +
drivers/vhost/Makefile...
2019 Jul 22
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...+ bool local = false;
> +
> + if (f->cpu == smp_processor_id()) {
> + local = true;
> + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
> + } else {
> + inc_irq_stat(irq_tlb_count);
> +
> + if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
> + return;
> +
> + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
> + }
> +
> + flush_tlb_func_common(f, local, reason);
> +}
> +
> static bool tlb_is_not_lazy(int cpu)
> {
> return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
Nice! I will add it...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...we're messing with this, it can now be
> "native_flush_tlb_multi()" since it is a function.
Sure.
>
>> switch_mm_irqs_off(NULL, &init_mm, NULL);
>> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
>> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
>> }
>>
>> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
>> +static void flush_tlb_func_local(void *info)
>> {
>> const struct flush_tlb_info *f = info;
>> + enum tlb_flush_rea...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...we're messing with this, it can now be
> "native_flush_tlb_multi()" since it is a function.
Sure.
>
>> switch_mm_irqs_off(NULL, &init_mm, NULL);
>> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
>> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
>> }
>>
>> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
>> +static void flush_tlb_func_local(void *info)
>> {
>> const struct flush_tlb_info *f = info;
>> + enum tlb_flush_rea...
2019 Jul 19
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...mask, flush_tlb_func_remote,
+ flush_tlb_func_local,
(void *)info, 1);
}
}
@@ -818,16 +827,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
new_tlb_gen);
- if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
+ /*
+ * flush_tlb_multi() is not optimized for the common case in which only
+ * a local TLB flush is needed. Optimize this use-case by calling
+ * flush_tlb_func_local() directly in this case.
+ */
+ if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+ flush_tlb_multi(...