search for: phys_pfn

Displaying 20 results from an estimated 32 matches for "phys_pfn".

2019 May 13
2
[Qemu-devel] [PATCH v8 3/6] libnvdimm: add dax_dev sync flag
...pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX + && pfn_t_to_page(pfn)->pgmap == pgmap + && pfn_t_to_page(end_pfn)->pgmap == pgmap + && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) + && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) dax_enabled = true; put_dev_pagemap(pgmap); Thanks, Pankaj
2019 May 13
2
[Qemu-devel] [PATCH v8 3/6] libnvdimm: add dax_dev sync flag
...pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX + && pfn_t_to_page(pfn)->pgmap == pgmap + && pfn_t_to_page(end_pfn)->pgmap == pgmap + && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) + && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) dax_enabled = true; put_dev_pagemap(pgmap); Thanks, Pankaj
2020 Nov 03
0
[patch V3 20/37] io-mapping: Cleanup atomic iomap
...ic_pfn_prot); +EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot); --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -69,13 +69,17 @@ io_mapping_map_atomic_wc(struct io_mappi BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); + preempt_disable(); + pagefault_disable(); + return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); } static inline void io_mapping_unmap_atomic(void __iomem *vaddr) { - iounmap_atomic(vaddr); + kunmap_local_indexed((void __force *)vaddr); + page...
2019 May 13
0
[Qemu-devel] [PATCH v8 3/6] libnvdimm: add dax_dev sync flag
...gmap && pgmap->type == MEMORY_DEVICE_FS_DAX > + && pfn_t_to_page(pfn)->pgmap == pgmap > + && pfn_t_to_page(end_pfn)->pgmap == pgmap > + && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) > + && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) Ugh, yes, device-mapper continues to be an awkward fit for dax (or vice versa). We would either need a way to have a multi-level pfn to pagemap lookup for composite devices, or a way to...
2020 Sep 25
0
[PATCH 2/2] mm: remove extra ZONE_DEVICE struct page refcount
..._key); - return 0; -} -#else -static int devmap_managed_enable_get(struct dev_pagemap *pgmap) -{ - return -EINVAL; -} -static void devmap_managed_enable_put(void) -{ -} -#endif /* CONFIG_DEV_PAGEMAP_OPS */ - static void pgmap_array_delete(struct resource *res) { xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), @@ -90,16 +58,6 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap) return (res->start + resource_size(res)) >> PAGE_SHIFT; } -static unsigned long pfn_next(unsigned long pfn) -{ - if (pfn % 1024 == 0) - cond_resched(); - return pfn + 1;...
2019 May 11
2
[PATCH v8 3/6] libnvdimm: add dax_dev sync flag
On Fri, May 10, 2019 at 5:45 PM Pankaj Gupta <pagupta at redhat.com> wrote: > > > > > > > > > This patch adds 'DAXDEV_SYNC' flag which is set > > > for nd_region doing synchronous flush. This later > > > is used to disable MAP_SYNC functionality for > > > ext4 & xfs filesystem for devices don't support > > >
2020 Sep 16
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...d_key); - return 0; -} -#else -static int devmap_managed_enable_get(struct dev_pagemap *pgmap) -{ - return -EINVAL; -} -static void devmap_managed_enable_put(void) -{ -} -#endif /* CONFIG_DEV_PAGEMAP_OPS */ - static void pgmap_array_delete(struct range *range) { xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), @@ -181,7 +152,6 @@ void memunmap_pages(struct dev_pagemap *pgmap) pageunmap_range(pgmap, i); WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); - devmap_managed_enable_put(); } EXPORT_SYMBOL_GPL(memunmap_pages);...
2019 Jun 13
0
[PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
...ore in-depth description of what that callback can and - * cannot do, in include/linux/memremap.h - */ - return devmem->page_fault(vma, addr, page, flags, pmdp); -} -#endif /* CONFIG_DEVICE_PRIVATE */ - static void pgmap_array_delete(struct resource *res) { xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), diff --git a/mm/hmm.c b/mm/hmm.c index 6dc769feb2e1..aab799677c7d 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -1330,15 +1330,12 @@ static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap) percpu_ref_kill(pgmap->ref); } -static vm_fault_t hmm_devmem_fault...
2020 Sep 17
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...nable_get(struct dev_pagemap *pgmap) > -{ > - return -EINVAL; > -} > -static void devmap_managed_enable_put(void) > -{ > -} > -#endif /* CONFIG_DEV_PAGEMAP_OPS */ > - > static void pgmap_array_delete(struct range *range) > { > xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), > @@ -181,7 +152,6 @@ void memunmap_pages(struct dev_pagemap *pgmap) > pageunmap_range(pgmap, i); > > WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); > - devmap_managed_enable_put(); > } &gt...
2019 Jun 26
0
[PATCH 12/25] memremap: add a migrate_to_ram method to struct dev_pagemap_ops
...ore in-depth description of what that callback can and - * cannot do, in include/linux/memremap.h - */ - return devmem->page_fault(vma, addr, page, flags, pmdp); -} -#endif /* CONFIG_DEVICE_PRIVATE */ - static void pgmap_array_delete(struct resource *res) { xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), @@ -193,6 +162,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) WARN(1, "Device private memory not supported\n"); return ERR_PTR(-EINVAL); } + if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { + WA...
2019 Jun 13
1
[PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
...an and > - * cannot do, in include/linux/memremap.h > - */ > - return devmem->page_fault(vma, addr, page, flags, pmdp); > -} > -#endif /* CONFIG_DEVICE_PRIVATE */ > - > static void pgmap_array_delete(struct resource *res) > { > xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), > diff --git a/mm/hmm.c b/mm/hmm.c > index 6dc769feb2e1..aab799677c7d 100644 > --- a/mm/hmm.c > +++ b/mm/hmm.c > @@ -1330,15 +1330,12 @@ static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap) > percpu_ref_kill(pgmap->ref); > }...
2020 Sep 25
6
[RFC PATCH v2 0/2] mm: remove extra ZONE_DEVICE struct page refcount
Matthew Wilcox, Ira Weiny, and others have complained that ZONE_DEVICE struct page reference counting is ugly because they are "free" when the reference count is one instead of zero. This leads to explicit checks for ZONE_DEVICE pages in places like put_page(), GUP, THP splitting, and page migration which have to adjust the expected reference count when determining if the page is
2020 Sep 14
5
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
ZONE_DEVICE struct pages have an extra reference count that complicates the code for put_page() and several places in the kernel that need to check the reference count to see that a page is not being used (gup, compaction, migration, etc.). Clean up the code so the reference count doesn't need to be treated specially for ZONE_DEVICE. Signed-off-by: Ralph Campbell <rcampbell at
2020 Oct 01
0
[RFC PATCH v3 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...d_key); - return 0; -} -#else -static int devmap_managed_enable_get(struct dev_pagemap *pgmap) -{ - return -EINVAL; -} -static void devmap_managed_enable_put(void) -{ -} -#endif /* CONFIG_DEV_PAGEMAP_OPS */ - static void pgmap_array_delete(struct range *range) { xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), @@ -91,13 +62,6 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) return (range->start + range_len(range)) >> PAGE_SHIFT; } -static unsigned long pfn_next(unsigned long pfn) -{ - if (pfn % 1024 == 0) - cond_resched();...
2008 Jan 18
0
[PATCH] minios: support COW for a zero page
...page); +#endif + page = tab[l2_table_offset(addr)]; + if (!(page & _PAGE_PRESENT)) + return 0; + tab = pte_to_virt(page); + + page = tab[l1_table_offset(addr)]; + if (!(page & _PAGE_PRESENT)) + return 0; + /* Only support CoW for the zero page. */ + if (PHYS_PFN(page) != mfn_zero) + return 0; + + new_page = alloc_pages(0); + memset((void*) new_page, 0, PAGE_SIZE); + + rc = HYPERVISOR_update_va_mapping(addr & PAGE_MASK, __pte(virt_to_mach(new_page) | L1_PROT), UVMF_INVLPG); + if (!rc) + return 1; + + printk("Map zero page to %lx failed: %d.\n&...
2020 Jul 28
0
[vhost:vhost 38/45] include/linux/vdpa.h:43:21: error: expected ':', ',', ';', '}' or '__attribute__' before '.' token
...note: in expansion of macro 'virt_addr_valid' 170 | #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn)) | ^~~~~~~~~~~~~~~ include/linux/dma-mapping.h:352:19: note: in expansion of macro 'pfn_valid' 352 | if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) | ^~~~~~~~~ -- In file included from drivers/vdpa/vdpa.c:13: >> include/linux/vdpa.h:43:21: error: expected ':', ',', ';', '}' or '__attribute__' before '.' token 43 | bool features_valid....
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2019 Jun 26
0
[PATCH 14/25] memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag
...d_pfn->mode == PFN_MODE_PMEM) { nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res) - offset) / PAGE_SIZE); @@ -634,7 +633,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) memcpy(altmap, &__altmap, sizeof(*altmap)); altmap->free = PHYS_PFN(offset - reserve); altmap->alloc = 0; - pgmap->altmap_valid = true; + pgmap->flags |= PGMAP_ALTMAP_VALID; } else return -ENXIO; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 093408ce40ad..e7d8cc9f41e8 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pm...