search for: page_kernel

Displaying 20 results from an estimated 228 matches for "page_kernel".

2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...om @@ -2011,7 +2012,7 @@ static void __init xen_write_cr3_init(unsigned long cr3) pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, - PFN_DOWN(__pa(initial_page_table))); + PFN_DOWN(__pa_symbol(initial_page_table))); set_page_prot(initial_page_table, PAGE_KERNEL); set_page_prot(initial_kernel_pmd, PAGE_KERNEL); @@ -2036,7 +2037,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) copy_page(initial_page_table, pgd); initial_page_table[KERNEL_PGD_BOUNDARY] = - __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); + __pgd(__pa...
2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...om @@ -2011,7 +2012,7 @@ static void __init xen_write_cr3_init(unsigned long cr3) pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, - PFN_DOWN(__pa(initial_page_table))); + PFN_DOWN(__pa_symbol(initial_page_table))); set_page_prot(initial_page_table, PAGE_KERNEL); set_page_prot(initial_kernel_pmd, PAGE_KERNEL); @@ -2036,7 +2037,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) copy_page(initial_page_table, pgd); initial_page_table[KERNEL_PGD_BOUNDARY] = - __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); + __pgd(__pa...
2020 Sep 15
0
[PATCH 17/18] dma-iommu: implement ->alloc_noncoherent
...in *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); - pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages; struct sg_table sgt; @@ -1030,8 +1031,10 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, gfp |= __GFP_ZERO; if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blo...
2020 Sep 25
2
[PATCH 17/18] dma-iommu: implement ->alloc_noncoherent
..._blocking(gfp)) { > + struct page *page; > + > + page = dma_common_alloc_pages(dev, size, handle, dir, gfp); > + if (!page) > + return NULL; > + return page_address(page); > + } > + > + return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, > + PAGE_KERNEL, 0); iommu_dma_alloc_remap() makes use of the DMA_ATTR_ALLOC_SINGLE_PAGES attribute to optimize the allocations for devices which don't care about how contiguous the backing memory is. Do you think we could add an attrs argument to this function and pass it there? As ARM is being moved to the...
2007 Apr 18
0
[PATCH 1/2] Whitespace cleanup in pageattr.c
...e *split_large_page(uns pbase = (pte_t *)page_address(base); SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, - addr == address ? prot : PAGE_KERNEL)); + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + addr == address ? prot : PAGE_KERNEL)); } return base; }
2007 Apr 18
0
[PATCH 1/2] Whitespace cleanup in pageattr.c
...e *split_large_page(uns pbase = (pte_t *)page_address(base); SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, - addr == address ? prot : PAGE_KERNEL)); + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + addr == address ? prot : PAGE_KERNEL)); } return base; }
2015 Nov 11
2
[PATCH] instmem/gk20a: use DMA API CPU mapping
...- struct page *pages[npages]; - int i; - - /* phys_to_page does not exist on all platforms... */ - pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); - for (i = 1; i < npages; i++) - pages[i] = pages[0] + i; - - return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); -} - -static void __iomem * -gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory) -{ - struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); - int npages = nvkm_memory_size(memory) >> 12; - - return vmap(node->pages, npages, VM_MAP, - pgprot_writecombine(PAGE_KERNEL));...
2005 Jun 23
1
[patch] pin/unpin must flush tlb
...=============================================================== --- linux-2.6.11.orig/arch/xen/i386/mm/pgtable.c 2005-06-22 16:25:17.000000000 +0200 +++ linux-2.6.11/arch/xen/i386/mm/pgtable.c 2005-06-23 18:20:45.000000000 +0200 @@ -486,7 +486,8 @@ void mm_pin(struct mm_struct *mm) mm_walk(mm, PAGE_KERNEL_RO); HYPERVISOR_update_va_mapping( (unsigned long)mm->pgd, - pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO), 0); + pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO), + UVMF_TLB_FLUSH); xen_pgd_pin(__pa(mm->pgd)); mm-...
2007 Jan 10
1
[PATCH] linux/i386: allow CONFIG_HIGHPTE on i386 (take 2)
...; } -#endif return pte; } void pte_free(struct page *pte) { - unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT); + unsigned long pfn = page_to_pfn(pte); - if (!pte_write(*virt_to_ptep(va))) - BUG_ON(HYPERVISOR_update_va_mapping( - va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0)); + if (!PageHighMem(pte)) { + unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT); + + if (!pte_write(*virt_to_ptep(va))) + BUG_ON(HYPERVISOR_update_va_mapping( + va, pfn_pte(pfn, PAGE_KERNEL), 0)); + } else { + struct mmuext_op op; + + op.cmd = MMUEXT_UNPIN_TABLE;...
2015 Nov 11
0
[PATCH] instmem/gk20a: use DMA API CPU mapping
...t i; > - > - /* phys_to_page does not exist on all platforms... */ > - pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); > - for (i = 1; i < npages; i++) > - pages[i] = pages[0] + i; > - > - return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); > -} > - > -static void __iomem * > -gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory) > -{ > - struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); > - int npages = nvkm_memory_size(memory) >> 12; > - > - return vmap(node->pages, npages, VM_M...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...te_page = __get_cpu_var(switcher_pte_pages); pte_t regs_pte; - unsigned long pfn; #ifdef CONFIG_X86_PAE pmd_t switcher_pmd; pmd_t *pmd_table; - /* FIXME: native_set_pmd is overkill here. */ - native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> - PAGE_SHIFT, PAGE_KERNEL_EXEC)); + switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT, + PAGE_KERNEL_EXEC); /* Figure out where the pmd page is, by reading the PGD, and converting * it to a virtual address. */ @@ -1157,7 +1154,7 @@ void map_switcher_in_guest(struct lg_cpu pgdirs[cpu->...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...te_page = __get_cpu_var(switcher_pte_pages); pte_t regs_pte; - unsigned long pfn; #ifdef CONFIG_X86_PAE pmd_t switcher_pmd; pmd_t *pmd_table; - /* FIXME: native_set_pmd is overkill here. */ - native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> - PAGE_SHIFT, PAGE_KERNEL_EXEC)); + switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT, + PAGE_KERNEL_EXEC); /* Figure out where the pmd page is, by reading the PGD, and converting * it to a virtual address. */ @@ -1157,7 +1154,7 @@ void map_switcher_in_guest(struct lg_cpu pgdirs[cpu->...
2020 Aug 24
0
[PATCH v6 45/76] x86/sev-es: Allocate and Map IST stack for #VC handler
...*data; + struct cpu_entry_area *cea; + unsigned long vaddr; + phys_addr_t pa; + + data = per_cpu(runtime_data, cpu); + cea = get_cpu_entry_area(cpu); + + /* Map #VC IST stack */ + vaddr = CEA_ESTACK_BOT(&cea->estacks, VC); + pa = __pa(data->ist_stack); + cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); + + /* Map VC fall-back stack */ + vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2); + pa = __pa(data->fallback_stack); + cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); +} + /* Needed in vc_early_forward_exception */ void do_early_exception(struct pt_regs *regs, int trapnr); @@ -249,6 +2...
2020 Sep 29
0
[PATCH v3 3/7] drm/gem: Use struct dma_buf_map in GEM vmap ops and convert GEM backends
...;dmabuf, &map); - if (!ret) - shmem->vaddr = map.vaddr; + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); + if (!ret) { + if (WARN_ON(map->is_iomem)) { + ret = -EIO; + goto err_put_pages; + } + shmem->vaddr = map->vaddr; + } } else { pgprot_t prot = PAGE_KERNEL; @@ -284,6 +290,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) VM_MAP, prot); if (!shmem->vaddr) ret = -ENOMEM; + else + dma_buf_map_set_vaddr(map, shmem->vaddr); } if (ret) { @@ -291,7 +299,7 @@ static void *drm_gem_shmem_vmap_locke...
2020 Oct 15
1
[PATCH v4 06/10] drm/gem: Use struct dma_buf_map in GEM vmap ops and convert GEM backends
...;dmabuf, &map); - if (!ret) - shmem->vaddr = map.vaddr; + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); + if (!ret) { + if (WARN_ON(map->is_iomem)) { + ret = -EIO; + goto err_put_pages; + } + shmem->vaddr = map->vaddr; + } } else { pgprot_t prot = PAGE_KERNEL; @@ -284,6 +290,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) VM_MAP, prot); if (!shmem->vaddr) ret = -ENOMEM; + else + dma_buf_map_set_vaddr(map, shmem->vaddr); } if (ret) { @@ -291,7 +299,7 @@ static void *drm_gem_shmem_vmap_locke...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...); + SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : PAGE_KERNEL)); } return base; @@ -146,6 +147,7 @@ __change_page_attr(struct page *page, pg BUG_ON(!page_count(kpte_page)); if (cpu_has_pse && (page_count(kpte_page) == 1)) { + ClearPagePTE(virt_to_page(kpte)); list_add(&kpte_page->lru, &df_list); revert_page(kpte_page,...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...); + SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : PAGE_KERNEL)); } return base; @@ -146,6 +147,7 @@ __change_page_attr(struct page *page, pg BUG_ON(!page_count(kpte_page)); if (cpu_has_pse && (page_count(kpte_page) == 1)) { + ClearPagePTE(virt_to_page(kpte)); list_add(&kpte_page->lru, &df_list); revert_page(kpte_page,...
2019 Jun 17
0
[PATCH 07/25] memremap: validate the pagemap type passed to devm_memremap_pages
...+++++++++++ 1 file changed, 27 insertions(+) diff --git a/kernel/memremap.c b/kernel/memremap.c index 6e1970719dc2..6a2dd31a6250 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -157,6 +157,33 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) pgprot_t pgprot = PAGE_KERNEL; int error, nid, is_ram; + switch (pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { + WARN(1, "Device private memory not supported\n"); + return ERR_PTR(-EINVAL); + } + break; + case MEMORY_DEVICE_PUBLIC: + if (!IS_ENABLED(CONFIG_DEVI...
2019 Jun 17
2
[PATCH 07/25] memremap: validate the pagemap type passed to devm_memremap_pages
...ns(+) > > diff --git a/kernel/memremap.c b/kernel/memremap.c > index 6e1970719dc2..6a2dd31a6250 100644 > --- a/kernel/memremap.c > +++ b/kernel/memremap.c > @@ -157,6 +157,33 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) > pgprot_t pgprot = PAGE_KERNEL; > int error, nid, is_ram; > > + switch (pgmap->type) { > + case MEMORY_DEVICE_PRIVATE: > + if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { > + WARN(1, "Device private memory not supported\n"); > +...
2020 Sep 26
0
[PATCH 17/18] dma-iommu: implement ->alloc_noncoherent
...> > + > > + page = dma_common_alloc_pages(dev, size, handle, dir, gfp); > > + if (!page) > > + return NULL; > > + return page_address(page); > > + } > > + > > + return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, > > + PAGE_KERNEL, 0); > > iommu_dma_alloc_remap() makes use of the DMA_ATTR_ALLOC_SINGLE_PAGES attribute > to optimize the allocations for devices which don't care about how contiguous > the backing memory is. Do you think we could add an attrs argument to this > function and pass it there? >...