Displaying 20 results from an estimated 72 matches for "switch_mm".
2009 Aug 10
1
[PATCH 1/2] export cpu_tlbstate to modules
vhost net module wants to do copy to/from user from a kernel thread,
which needs switch_mm (like what fs/aio has).
export cpu_tlbstate to make this possible
Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
---
arch/x86/mm/tlb.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e970..e33a5f0 100644
--- a/ar...
2009 Aug 10
1
[PATCH 1/2] export cpu_tlbstate to modules
vhost net module wants to do copy to/from user from a kernel thread,
which needs switch_mm (like what fs/aio has).
export cpu_tlbstate to make this possible
Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
---
arch/x86/mm/tlb.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e970..e33a5f0 100644
--- a/ar...
2020 Apr 04
0
[PATCH 4/6] kernel: move use_mm/unuse_mm to kthread.c
...543a9e1cc..c51a84132d7c 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -4,11 +4,6 @@
#include <asm/mmu_context.h>
-struct mm_struct;
-
-void use_mm(struct mm_struct *mm);
-void unuse_mm(struct mm_struct *mm);
-
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
diff --git a/kernel/kthread.c b/kernel/kthread.c
index bfbfa481be3a..ce4610316377 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1,13 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel thre...
2007 Apr 18
2
[PATCH 21/21] i386 Ldt context inline
I was also able to get the LDT switching functionality out of the
critical path in switch_mm, which reduces the number of function calls,
potential TLB misses and code size.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Index: linux-2.6.14-zach-work/include/asm-i386/desc.h
===================================================================
--- linux-2.6.14-zach-work.orig/include/a...
2007 Apr 18
2
[PATCH 21/21] i386 Ldt context inline
I was also able to get the LDT switching functionality out of the
critical path in switch_mm, which reduces the number of function calls,
potential TLB misses and code size.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Index: linux-2.6.14-zach-work/include/asm-i386/desc.h
===================================================================
--- linux-2.6.14-zach-work.orig/include/a...
2009 Aug 13
0
[PATCHv3 1/2] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Aug 13
0
[PATCHv3 1/2] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Aug 19
0
[PATCHv4 1/2] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Aug 19
0
[PATCHv4 1/2] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Sep 17
0
[PATCHv3 1/2] mm: move use_mm/unuse_mm from aio.c to mm/
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Sep 17
0
[PATCHv3 1/2] mm: move use_mm/unuse_mm from aio.c to mm/
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Aug 11
1
[PATCHv2 1/2] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Aug 11
1
[PATCHv2 1/2] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Aug 27
1
[PATCHv5 1/3] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2009 Aug 27
1
[PATCHv5 1/3] mm: export use_mm/unuse_mm to modules
...called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thr...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...vel4_pgt));
}
__flush_tlb_all();
}
Index: clean-start/include/asm-x86_64/mmu_context.h
===================================================================
--- clean-start.orig/include/asm-x86_64/mmu_context.h
+++ clean-start/include/asm-x86_64/mmu_context.h
@@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_s
load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt))
- load_LDT_nolock(&next->context, cpu);
+ load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
@@ -55,7 +55,7 @@ static inline void switch_mm(struct mm_s
* to...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...vel4_pgt));
}
__flush_tlb_all();
}
Index: clean-start/include/asm-x86_64/mmu_context.h
===================================================================
--- clean-start.orig/include/asm-x86_64/mmu_context.h
+++ clean-start/include/asm-x86_64/mmu_context.h
@@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_s
load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt))
- load_LDT_nolock(&next->context, cpu);
+ load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
@@ -55,7 +55,7 @@ static inline void switch_mm(struct mm_s
* to...
2006 Mar 28
7
context switch
In debugging the sles9 port on 64 bit MP machines, I am seeing a problem
where the hypervisor takes a fault in loading fs in the context switch
code (load_segments()). The selector is one of the TLS selectors. It
appears that the cpu in question has updated this selector with a value
of 0 just prior to the problem I am seeing. Looking at the Linux context
switch code, we first update the TLS
2020 Apr 04
14
improve use_mm / unuse_mm
Hi all,
this series improves the use_mm / unuse_mm interface by better
documenting the assumptions, and my taking the set_fs manipulations
spread over the callers into the core API.
2020 Apr 04
14
improve use_mm / unuse_mm
Hi all,
this series improves the use_mm / unuse_mm interface by better
documenting the assumptions, and my taking the set_fs manipulations
spread over the callers into the core API.