search for: fix_kmap_begin

Displaying 20 results from an estimated 27 matches for "fix_kmap_begin".

2020 Nov 03
0
[patch V3 13/37] mips/mm/highmem: Switch to generic kmap atomic
...+17,7 @@ #include <spaces.h> #ifdef CONFIG_HIGHMEM #include <linux/threads.h> -#include <asm/kmap_types.h> +#include <asm/kmap_size.h> #endif /* @@ -52,7 +52,7 @@ enum fixed_addresses { #ifdef CONFIG_HIGHMEM /* reserved pte's for temporary kernel mappings */ FIX_KMAP_BEGIN = FIX_CMAP_END + 1, - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1, #endif __end_of_fixed_addresses }; --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h @@ -24,7 +24,7 @@ #include <linux/interrup...
2020 Nov 03
0
[patch V3 10/37] ARM: highmem: Switch to generic kmap atomic
...+++ b/arch/arm/include/asm/fixmap.h @@ -7,14 +7,14 @@ #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) #include <linux/pgtable.h> -#include <asm/kmap_types.h> +#include <asm/kmap_size.h> enum fixed_addresses { FIX_EARLYCON_MEM_BASE, __end_of_permanent_fixed_addresses, FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses, - FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1, /* Support writing RO kernel text via kprobes, jump labels, etc. */ FIX_TEXT_POKE0, --- a/arch/arm/include/asm/highmem.h +++ b/arch...
2020 Nov 03
0
[patch V3 15/37] powerpc/mm/highmem: Switch to generic kmap atomic
...ONFIG_HIGHMEM #include <linux/threads.h> -#include <asm/kmap_types.h> +#include <asm/kmap_size.h> #endif #ifdef CONFIG_KASAN @@ -55,7 +55,7 @@ enum fixed_addresses { FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1, #endif #ifdef CONFIG_PPC_8xx /* For IMMR we need an aligned 512K area */ --- a/arch/powerpc/include/asm/highmem.h +++ b/a...
2020 Nov 03
0
[patch V3 14/37] nds32/mm/highmem: Switch to generic kmap atomic
...nclude/asm/fixmap.h +++ b/arch/nds32/include/asm/fixmap.h @@ -6,7 +6,7 @@ #ifdef CONFIG_HIGHMEM #include <linux/threads.h> -#include <asm/kmap_types.h> +#include <asm/kmap_size.h> #endif enum fixed_addresses { @@ -14,7 +14,7 @@ enum fixed_addresses { FIX_KMAP_RESERVED, FIX_KMAP_BEGIN, #ifdef CONFIG_HIGHMEM - FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS), + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1, #endif FIX_EARLYCON_MEM_BASE, __end_of_fixed_addresses --- a/arch/nds32/include/asm/highmem.h +++ b/arch/nds32/include/asm/highmem.h @@ -5,7 +5,6 @@...
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in: https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de and the second version of this: https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de this series provides a preemptible variant of kmap_atomic & related interfaces. This is achieved by: - Removing the RT dependency from migrate_disable/enable() - Consolidating all
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in: https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de and the second version of this: https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de this series provides a preemptible variant of kmap_atomic & related interfaces. This is achieved by: - Removing the RT dependency from migrate_disable/enable() - Consolidating all
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in: https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de and the second version of this: https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de this series provides a preemptible variant of kmap_atomic & related interfaces. This is achieved by: - Removing the RT dependency from migrate_disable/enable() - Consolidating all
2007 Apr 18
0
[PATCH 5/9] 00mm6 kpte flush.patch
...Zachary Amsden <zach@vmware.com> =================================================================== --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -44,22 +44,19 @@ void *kmap_atomic(struct page *page, enu idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM if (!pte_none(*(kmap_pte-idx))) BUG(); -#endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); - __flush_tlb_one(vaddr); return (void*) vaddr; } void kunmap_atomic(void *kvaddr, enum km_type type) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long va...
2020 Nov 03
0
[patch V3 06/37] highmem: Provide generic variant of kmap_atomic*
...map_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); +#endif +} + +static inline int kmap_local_calc_idx(int idx) +{ + return idx + KM_MAX_IDX * smp_processor_id(); +} + +static pte_t *__kmap_pte; + +static pte_t *kmap_get_pte(void) +{ + if (!__kmap_pte) + __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); + return __kmap_pte; +} + +void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) +{ + pte_t pteval, *kmap_pte = kmap_get_pte(); + unsigned long vaddr; + int idx; + + preempt_disable(); + idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + i...
2020 Nov 03
0
[patch V3 04/37] sh/highmem: Remove all traces of unused cruft
...age.h> -#ifdef CONFIG_HIGHMEM -#include <asm/kmap_types.h> -#endif /* * Here we define all the compile-time 'special' virtual @@ -53,11 +50,6 @@ enum fixed_addresses { FIX_CMAP_BEGIN, FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, -#ifdef CONFIG_HIGHMEM - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ - FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, -#endif - #ifdef CONFIG_IOREMAP_FIXED /* * FIX_IOREMAP entries are useful for mapping physical address --- a/arch/sh/include/asm/kmap_types.h +++ /dev/null @@ -1,15 +0,0 @@ -/...
2020 Nov 03
0
[patch V3 09/37] arc/mm/highmem: Use generic kmap atomic implementation
...MAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE) -#define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */ -#define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS) -#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define FIX_KMAP_SLOTS (KM_MAX_IDX * NR_CPUS) +#define FIX_KMAP_BEGIN (0UL) +#define FIX_KMAP_END ((FIX_KMAP_BEGIN + FIX_KMAP_SLOTS) - 1) + +#define FIXADDR_TOP (FIXMAP_BASE + (FIX_KMAP_END << PAGE_SHIFT)) + +/* + * This should be converted to the asm-generic version, but of course this + * is needlessly different from all other architectures. Sigh - tglx +...
2020 Nov 03
0
[patch V3 24/37] sched: highmem: Store local kmaps in task struct
...x. Uses the PFN encoded into the pteval + * and the map index calculation because the actual mapped + * virtual address is not stored in task::kmap_ctrl. + * For any sane architecture this is optimized out. + */ + idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); + + addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + arch_kmap_local_pre_unmap(addr); + pte_clear(&init_mm, addr, kmap_pte - idx); + arch_kmap_local_post_unmap(addr); + } +} + +void __kmap_local_sched_in(void) +{ + struct task_struct *tsk = current; + pte_t *kmap_pte = kmap_get_pte(); + int i; + + /* Restore kmaps */ + for (i = 0; i...
2019 Mar 12
0
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
...); page_link seems to point to kernel memory. I found an apparent solution like parisc on arm 32bit: void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); However on arm 64bit kunmap_atomic is not implemented at all and other 32bit implementations don't do it, for example sparc seems to do the cache flush too if the kernel is...
2020 Nov 03
0
[patch V3 25/37] mm/highmem: Provide kmap_local*
...id *__kmap_local_pfn_prot(unsigned lon unsigned long vaddr; int idx; + /* + * Disable migration so resulting virtual address is stable + * accross preemption. + */ + migrate_disable(); preempt_disable(); idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); @@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr) current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); kmap_local_idx_pop(); preempt_enable(); + migrate_enable(); } EXPORT_SYMBOL(kunmap_local_indexed);
2007 Apr 18
1
[PATCH, experimental] i386 Allow the fixmap to be relocated at boot time
This most curious patch allows the fixmap on i386 to be unfixed. The = result is that we can create a dynamically sizable hole at the top of = kernel linear address space. I know at least some virtualization = developers are interested in being able to achieve this to achieve = run-time sizing of a hole in which a hypervisor can live, or at least to = test out the performance
2007 Apr 18
1
[PATCH, experimental] i386 Allow the fixmap to be relocated at boot time
This most curious patch allows the fixmap on i386 to be unfixed. The = result is that we can create a dynamically sizable hole at the top of = kernel linear address space. I know at least some virtualization = developers are interested in being able to achieve this to achieve = run-time sizing of a hole in which a hypervisor can live, or at least to = test out the performance
2011 Jul 18
2
[PATCH tip/x86/mm] x86_32: calculate additional memory needed by the fixmap
...ght need up + * to two additional pte pages to replace the page declared by + * head_32.S and the one allocated by early_ioremap_init, if they + * are even partially used for the kmap. + */ + kmap_begin_pmd_idx = __fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; + kmap_end_pmd_idx = __fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; + if (kmap_begin_pmd_idx != kmap_end_pmd_idx) { + if (kmap_end_pmd_idx == fixmap_end_pmd_idx) + size++; + if (btmap_begin_pmd_idx >= kmap_begin_pmd_idx && + btmap_begin_pmd_idx <= kmap_end_pmd_idx) + size++; + } +#endif + + ptes += (size * PMD_SIZE + PAGE_...
2019 Mar 12
9
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
On Tue, Mar 12, 2019 at 10:59:09AM +0800, Jason Wang wrote: > > On 2019/3/12 ??2:14, David Miller wrote: > > From: "Michael S. Tsirkin" <mst at redhat.com> > > Date: Mon, 11 Mar 2019 09:59:28 -0400 > > > > > On Mon, Mar 11, 2019 at 03:13:17PM +0800, Jason Wang wrote: > > > > On 2019/3/8 ??10:12, Christoph Hellwig wrote: > > >
2019 Mar 12
9
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
On Tue, Mar 12, 2019 at 10:59:09AM +0800, Jason Wang wrote: > > On 2019/3/12 ??2:14, David Miller wrote: > > From: "Michael S. Tsirkin" <mst at redhat.com> > > Date: Mon, 11 Mar 2019 09:59:28 -0400 > > > > > On Mon, Mar 11, 2019 at 03:13:17PM +0800, Jason Wang wrote: > > > > On 2019/3/8 ??10:12, Christoph Hellwig wrote: > > >
2006 Mar 14
12
[RFC] VMI for Xen?
I''m sure everyone has seen the drop of VMI patches for Linux at this point, but just in case, the link is included below. I''ve read this version of the VMI spec and have made my way through most of the patches. While I wasn''t really that impressed with the first spec wrt Xen, the second version seems to be much more palatable. Specifically, the code inlining and