search for: native_flush_tlb_others

Displaying 20 results from an estimated 62 matches for "native_flush_tlb_others".

2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...bflush.h @@ -569,6 +569,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info); + void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT +#define flush_tlb...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...bflush.h @@ -569,6 +569,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info); + void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT +#define flush_tlb...
2019 Jul 19
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...- trace_hyperv_mmu_flush_tlb_others(cpus, info); + trace_hyperv_mmu_flush_tlb_multi(cpus, info); if (!hv_hypercall_pg) goto do_native; @@ -156,7 +156,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, if (!(status & HV_HYPERCALL_RESULT_MASK)) return; do_native: - native_flush_tlb_others(cpus, info); + native_flush_tlb_multi(cpus, info); } static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, @@ -231,6 +231,6 @@ void hyperv_setup_mmu_ops(void) return; pr_info("Using hypercall for remote TLB flush\n"); - pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb...
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) > flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); > } > > +void native_flush_tlb_multi(const struct cpumask *cpumask, > + const struct flush_tlb_info *info); > + > void native_flush_tlb_others(const struct cpumask *cpumask, > const struct flush_tlb_info *info); > > @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, > extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); > > #ifndef CONF...
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...processor_id(), cpus)) + flush_tlb_func_local(info); + flush_pcpu = (struct hv_tlb_flush **) this_cpu_ptr(hyperv_pcpu_input_arg); @@ -156,7 +159,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, if (!(status & HV_HYPERCALL_RESULT_MASK)) return; do_native: - native_flush_tlb_others(cpus, info); + native_flush_tlb_multi(cpus, info); } static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, @@ -231,6 +234,6 @@ void hyperv_setup_mmu_ops(void) return; pr_info("Using hypercall for remote TLB flush\n"); - pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...age(struct vm_area_struct *vma, unsigned long a) >> flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); >> } >> >> +void native_flush_tlb_multi(const struct cpumask *cpumask, >> + const struct flush_tlb_info *info); >> + >> void native_flush_tlb_others(const struct cpumask *cpumask, >> const struct flush_tlb_info *info); >> >> @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, >> extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); >> &g...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...age(struct vm_area_struct *vma, unsigned long a) >> flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); >> } >> >> +void native_flush_tlb_multi(const struct cpumask *cpumask, >> + const struct flush_tlb_info *info); >> + >> void native_flush_tlb_others(const struct cpumask *cpumask, >> const struct flush_tlb_info *info); >> >> @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, >> extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); >> &g...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...bflush.h @@ -569,6 +569,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info); + void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT +#define flush_tlb...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...bflush.h @@ -569,6 +569,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info); + void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT +#define flush_tlb...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...bflush.h @@ -569,6 +569,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info); + void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT +#define flush_tlb...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...bflush.h @@ -569,6 +569,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } +void native_flush_tlb_multi(const struct cpumask *cpumask, + const struct flush_tlb_info *info); + void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); @@ -593,6 +596,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); #ifndef CONFIG_PARAVIRT +#define flush_tlb...
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
[ Cover-letter is identical to v2, including benchmark results, excluding the change log. ] Currently, local and remote TLB flushes are not performed concurrently, which introduces unnecessary overhead - each INVLPG can take 100s of cycles. This patch-set allows TLB flushes to be run concurrently: first request the remote CPUs to initiate the flush, then run it locally, and finally wait for
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
Currently, local and remote TLB flushes are not performed concurrently, which introduces unnecessary overhead - each INVLPG can take 100s of cycles. This patch-set allows TLB flushes to be run concurrently: first request the remote CPUs to initiate the flush, then run it locally, and finally wait for the remote CPUs to finish their work. In addition, there are various small optimizations to avoid
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
On Thu, Jul 18, 2019 at 05:58:32PM -0700, Nadav Amit wrote: > @@ -709,8 +716,9 @@ void native_flush_tlb_others(const struct cpumask *cpumask, > * doing a speculative memory access. > */ > if (info->freed_tables) { > - smp_call_function_many(cpumask, flush_tlb_func_remote, > - (void *)info, 1); > + __smp_call_function_many(cpumask, flush_tlb_func_remote, > + fl...
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
On Thu, Jul 18, 2019 at 05:58:32PM -0700, Nadav Amit wrote: > @@ -709,8 +716,9 @@ void native_flush_tlb_others(const struct cpumask *cpumask, > * doing a speculative memory access. > */ > if (info->freed_tables) { > - smp_call_function_many(cpumask, flush_tlb_func_remote, > - (void *)info, 1); > + __smp_call_function_many(cpumask, flush_tlb_func_remote, > + fl...
2019 May 27
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...empted vCPUs */ for_each_cpu(cpu, flushmask) { + if (cpu == smp_processor_id()) + continue; + src = &per_cpu(steal_time, cpu); state = READ_ONCE(src->preempted); if ((state & KVM_VCPU_PREEMPTED)) { @@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s } } - native_flush_tlb_others(flushmask, info); + native_flush_tlb_multi(flushmask, info); } static void __init kvm_guest_init(void) @@ -628,9 +631,8 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_pa...
2019 May 27
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...empted vCPUs */ for_each_cpu(cpu, flushmask) { + if (cpu == smp_processor_id()) + continue; + src = &per_cpu(steal_time, cpu); state = READ_ONCE(src->preempted); if ((state & KVM_VCPU_PREEMPTED)) { @@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s } } - native_flush_tlb_others(flushmask, info); + native_flush_tlb_multi(flushmask, info); } static void __init kvm_guest_init(void) @@ -628,9 +631,8 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_pa...
2019 Jul 22
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
> On Jul 22, 2019, at 12:14 PM, Peter Zijlstra <peterz at infradead.org> wrote: > > On Thu, Jul 18, 2019 at 05:58:32PM -0700, Nadav Amit wrote: >> @@ -709,8 +716,9 @@ void native_flush_tlb_others(const struct cpumask *cpumask, >> * doing a speculative memory access. >> */ >> if (info->freed_tables) { >> - smp_call_function_many(cpumask, flush_tlb_func_remote, >> - (void *)info, 1); >> + __smp_call_function_many(cpumask, flush_tlb_func_r...
2008 Jul 26
5
BUG: soft lockup - CPU#1 stuck for 61s!
...0068 Jul 25 02:15:02 vega2008 kernel: CR0: 8005003b CR2: b7ebf978 CR3: 32996000 CR4: 000006f0 Jul 25 02:15:02 vega2008 kernel: DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000 Jul 25 02:15:02 vega2008 kernel: DR6: ffff0ff0 DR7: 00000400 Jul 25 02:15:02 vega2008 kernel: [<c04119c7>] ? native_flush_tlb_others+0x49/0x9b Jul 25 02:15:02 vega2008 kernel: [<c0411e65>] ? flush_tlb_mm+0x51/0x54 Jul 25 02:15:02 vega2008 kernel: [<c045bc58>] ? exit_mmap+0x93/0xc9 Jul 25 02:15:02 vega2008 kernel: [<c04214c2>] ? mmput+0x25/0x68 Jul 25 02:15:02 vega2008 kernel: [<c046e9c9>] ? flush_old_...
2007 Aug 10
9
[PATCH 0/25 -v2] paravirt_ops for x86_64, second round
Here is an slightly updated version of the paravirt_ops patch. If your comments and criticism were welcome before, now it's even more! There are some issues that are _not_ addressed in this revision, and here are the causes: * split debugreg into multiple functions, suggested by Andi: - Me and jsfg agree that introducing more pvops (specially 14!) is not worthwhile. So, although we do