search for: __flush_tlb_one

Displaying 20 results from an estimated 74 matches for "__flush_tlb_one".

2007 Apr 18
0
[PATCH 5/9] 00mm6 kpte flush.patch
.../highmem.c @@ -44,22 +44,19 @@ void *kmap_atomic(struct page *page, enu idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM if (!pte_none(*(kmap_pte-idx))) BUG(); -#endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); - __flush_tlb_one(vaddr); return (void*) vaddr; } void kunmap_atomic(void *kvaddr, enum km_type type) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); +#ifdef CONFIG_DEBUG_HIGHMEM if (vaddr &...
2007 Apr 18
2
pte_offset_map + lazy mmu
...ink its probably an excess pv_op for a relatively minor corner case. It seems to me that it would be better to define kpte_clear_flush as: #define kpte_clear_flush(ptep, vaddr) \ do { \ arch_enter_lazy_mmu_mode(); \ pte_clear(&init_mm, vaddr, ptep); \ __flush_tlb_one(vaddr); \ arch_leave_lazy_mmu_mode(); \ } while (0) and take advantage of mmu batching to make this operation efficient. But I'm not sure if this is safe. (Also, kmap_atomic could use set_pte_at rather than set_pte.) What do you think? J
2007 Apr 18
2
pte_offset_map + lazy mmu
...ink its probably an excess pv_op for a relatively minor corner case. It seems to me that it would be better to define kpte_clear_flush as: #define kpte_clear_flush(ptep, vaddr) \ do { \ arch_enter_lazy_mmu_mode(); \ pte_clear(&init_mm, vaddr, ptep); \ __flush_tlb_one(vaddr); \ arch_leave_lazy_mmu_mode(); \ } while (0) and take advantage of mmu batching to make this operation efficient. But I'm not sure if this is safe. (Also, kmap_atomic could use set_pte_at rather than set_pte.) What do you think? J
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 16/17] paravirt_ops - touch ups
...================================================================= --- clean-start.orig/arch/x86_64/mm/pageattr.c +++ clean-start/arch/x86_64/mm/pageattr.c @@ -81,7 +81,7 @@ static void flush_kernel_map(void *arg) void *adr = page_address(pg); if (cpu_has_clflush) cache_flush_page(adr); - __flush_tlb_one(adr); + __flush_tlb_one((u64)adr); } } Index: clean-start/include/linux/irqflags.h =================================================================== --- clean-start.orig/include/linux/irqflags.h +++ clean-start/include/linux/irqflags.h @@ -74,11 +74,11 @@ #endif /* CONFIG_TRACE_IRQFLAGS_SU...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 16/17] paravirt_ops - touch ups
...================================================================= --- clean-start.orig/arch/x86_64/mm/pageattr.c +++ clean-start/arch/x86_64/mm/pageattr.c @@ -81,7 +81,7 @@ static void flush_kernel_map(void *arg) void *adr = page_address(pg); if (cpu_has_clflush) cache_flush_page(adr); - __flush_tlb_one(adr); + __flush_tlb_one((u64)adr); } } Index: clean-start/include/linux/irqflags.h =================================================================== --- clean-start.orig/include/linux/irqflags.h +++ clean-start/include/linux/irqflags.h @@ -74,11 +74,11 @@ #endif /* CONFIG_TRACE_IRQFLAGS_SU...
2016 Mar 03
1
RFC: [PATCH] x86/kmmio: fix mmiotrace for hugepages
...s(f->addr, &level); if (!pte) { - pr_err("no pte for page 0x%08lx\n", f->page); + pr_err("no pte for addr 0x%08lx\n", f->addr); return -1; } @@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) return -1; } - __flush_tlb_one(f->page); + __flush_tlb_one(f->addr); return 0; } @@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f) int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { - pr_warning("double-arm: page 0x%...
2016 Feb 09
0
mmiotrace fix
...= lookup_address(f->page, &level); + pte_t *pte = lookup_address(f->addr, &level); if (!pte) { - pr_err("no pte for page 0x%08lx\n", f->page); + pr_err("no pte for addr 0x%08lx\n", f->addr); return -1; } @@ -156,7 +192,7 @@ return -1; } - __flush_tlb_one(f->page); + __flush_tlb_one(f->addr); return 0; } @@ -176,12 +212,12 @@ int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { - pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n", - f->page, f->count...
2016 May 03
0
[PATCH 4.5 160/200] x86/mm/kmmio: Fix mmiotrace for hugepages
...; + pte_t *pte = lookup_address(f->addr, &level); if (!pte) { - pr_err("no pte for page 0x%08lx\n", f->page); + pr_err("no pte for addr 0x%08lx\n", f->addr); return -1; } @@ -156,7 +167,7 @@ static int clear_page_presence(struct km return -1; } - __flush_tlb_one(f->page); + __flush_tlb_one(f->addr); return 0; } @@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct k int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { - pr_warning("double-arm: page 0x%08lx, ref %d, old %...
2016 May 03
0
[PATCH 4.4 137/163] x86/mm/kmmio: Fix mmiotrace for hugepages
...; + pte_t *pte = lookup_address(f->addr, &level); if (!pte) { - pr_err("no pte for page 0x%08lx\n", f->page); + pr_err("no pte for addr 0x%08lx\n", f->addr); return -1; } @@ -156,7 +167,7 @@ static int clear_page_presence(struct km return -1; } - __flush_tlb_one(f->page); + __flush_tlb_one(f->addr); return 0; } @@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct k int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { - pr_warning("double-arm: page 0x%08lx, ref %d, old %...
2016 Jul 12
0
[added to the 4.1 stable tree] x86/mm/kmmio: Fix mmiotrace for hugepages
...s(f->addr, &level); if (!pte) { - pr_err("no pte for page 0x%08lx\n", f->page); + pr_err("no pte for addr 0x%08lx\n", f->addr); return -1; } @@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) return -1; } - __flush_tlb_one(f->page); + __flush_tlb_one(f->addr); return 0; } @@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f) int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { - pr_warning("double-arm: page 0x%...
2016 Jul 12
0
[added to the 3.18 stable tree] x86/mm/kmmio: Fix mmiotrace for hugepages
...s(f->addr, &level); if (!pte) { - pr_err("no pte for page 0x%08lx\n", f->page); + pr_err("no pte for addr 0x%08lx\n", f->addr); return -1; } @@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) return -1; } - __flush_tlb_one(f->page); + __flush_tlb_one(f->addr); return 0; } @@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f) int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { - pr_warning("double-arm: page 0x%...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
..."r" (cr4) : "memory"); } -static inline void __flush_tlb_all(void) +static inline void __native_flush_tlb_all(void) { unsigned long cr4 = get_cr4(); set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ set_cr4(cr4); /* write old PGE again and flush TLBs */ } -#define __flush_tlb_one(addr) \ +#define __native_flush_tlb_one(addr) \ __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define __flush_tlb __native_flush_tlb +#define __flush_tlb_one __native_flush_tlb_one +#d...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
..."r" (cr4) : "memory"); } -static inline void __flush_tlb_all(void) +static inline void __native_flush_tlb_all(void) { unsigned long cr4 = get_cr4(); set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ set_cr4(cr4); /* write old PGE again and flush TLBs */ } -#define __flush_tlb_one(addr) \ +#define __native_flush_tlb_one(addr) \ __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define __flush_tlb __native_flush_tlb +#define __flush_tlb_one __native_flush_tlb_one +#d...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...extern unsigned long pgkern_mask; @@ -49,9 +18,6 @@ extern unsigned long pgkern_mask; #define cpu_has_invlpg (boot_cpu_data.x86 > 3) -#define __flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) - #ifdef CONFIG_X86_INVLPG # define __flush_tlb_one(addr) __flush_tlb_single(addr) #else Index: linux-2.6.16-rc5/include/asm-i386/mach-vmi/mach_tlbflush.h =================================================================== --- linux-2.6.16-rc5.orig/include/asm-i386/mach-vmi/mach_tlbflush.h 2006-03-10 13:03:38.000000000 -0800 +++ linux-2.6.16-rc5/in...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...extern unsigned long pgkern_mask; @@ -49,9 +18,6 @@ extern unsigned long pgkern_mask; #define cpu_has_invlpg (boot_cpu_data.x86 > 3) -#define __flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) - #ifdef CONFIG_X86_INVLPG # define __flush_tlb_one(addr) __flush_tlb_single(addr) #else Index: linux-2.6.16-rc5/include/asm-i386/mach-vmi/mach_tlbflush.h =================================================================== --- linux-2.6.16-rc5.orig/include/asm-i386/mach-vmi/mach_tlbflush.h 2006-03-10 13:03:38.000000000 -0800 +++ linux-2.6.16-rc5/in...
2007 Apr 18
2
[PATCH] exec-shield style vdso move.
...:49.000000000 +1000 @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/spinlock.h> +#include <linux/module.h> #include <asm/system.h> #include <asm/pgtable.h> @@ -138,6 +139,10 @@ void set_pmd_pfn(unsigned long vaddr, un __flush_tlb_one(vaddr); } +static int nr_fixmaps = 0; +unsigned long __FIXADDR_TOP = 0xfffff000; +EXPORT_SYMBOL(__FIXADDR_TOP); + void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) { unsigned long address = __fix_to_virt(idx); @@ -147,6 +152,13 @@ void __set_fixmap (enum fixed_...
2007 Apr 18
2
[PATCH] exec-shield style vdso move.
...:49.000000000 +1000 @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/spinlock.h> +#include <linux/module.h> #include <asm/system.h> #include <asm/pgtable.h> @@ -138,6 +139,10 @@ void set_pmd_pfn(unsigned long vaddr, un __flush_tlb_one(vaddr); } +static int nr_fixmaps = 0; +unsigned long __FIXADDR_TOP = 0xfffff000; +EXPORT_SYMBOL(__FIXADDR_TOP); + void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) { unsigned long address = __fix_to_virt(idx); @@ -147,6 +152,13 @@ void __set_fixmap (enum fixed_...
2007 Aug 10
9
[PATCH 0/25 -v2] paravirt_ops for x86_64, second round
Here is an slightly updated version of the paravirt_ops patch. If your comments and criticism were welcome before, now it's even more! There are some issues that are _not_ addressed in this revision, and here are the causes: * split debugreg into multiple functions, suggested by Andi: - Me and jsfg agree that introducing more pvops (specially 14!) is not worthwhile. So, although we do
2007 Aug 10
9
[PATCH 0/25 -v2] paravirt_ops for x86_64, second round
Here is an slightly updated version of the paravirt_ops patch. If your comments and criticism were welcome before, now it's even more! There are some issues that are _not_ addressed in this revision, and here are the causes: * split debugreg into multiple functions, suggested by Andi: - Me and jsfg agree that introducing more pvops (specially 14!) is not worthwhile. So, although we do
2007 Aug 15
13
[PATCH 0/25][V3] pvops_64 last round (hopefully)
This is hopefully the last iteration of the pvops64 patch. >From the last version, we have only one change, which is include/asm-x86_64/processor.h: There were still one survivor in raw asm. Also, git screwed me up for some reason, and the 25th patch was missing the new files, paravirt.{c,h}. (although I do remember having git-add'ed it, but who knows...) Andrew, could you please push it