search for: pfn_to_page

Displaying 20 results from an estimated 419 matches for "pfn_to_page".

2020 Mar 10
2
[PATCH v1 07/11] virtio-mem: Allow to offline partially unplugged memory blocks
...+ * offlined and add the unplugged pages to the managed > + * page counters (so offlining code can correctly subtract > + * them again). > + */ > + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + > + sb_id * vm->subblock_size); > + adjust_managed_page_count(pfn_to_page(pfn), nr_pages); > + for (i = 0; i < nr_pages; i++) > + page_ref_dec(pfn_to_page(pfn + i)); Is there ever situation this might be a different than 1->0 transition? -- Michal Hocko SUSE Labs
2020 Mar 10
2
[PATCH v1 07/11] virtio-mem: Allow to offline partially unplugged memory blocks
...+ * offlined and add the unplugged pages to the managed > + * page counters (so offlining code can correctly subtract > + * them again). > + */ > + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + > + sb_id * vm->subblock_size); > + adjust_managed_page_count(pfn_to_page(pfn), nr_pages); > + for (i = 0; i < nr_pages; i++) > + page_ref_dec(pfn_to_page(pfn + i)); Is there ever situation this might be a different than 1->0 transition? -- Michal Hocko SUSE Labs
2020 Mar 10
1
[PATCH v1 07/11] virtio-mem: Allow to offline partially unplugged memory blocks
...aged > >> + * page counters (so offlining code can correctly subtract > >> + * them again). > >> + */ > >> + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + > >> + sb_id * vm->subblock_size); > >> + adjust_managed_page_count(pfn_to_page(pfn), nr_pages); > >> + for (i = 0; i < nr_pages; i++) > >> + page_ref_dec(pfn_to_page(pfn + i)); > > > > Is there ever situation this might be a different than 1->0 transition? > > Only if some other code would be taking a reference. At least not from...
2023 Jan 30
2
[PATCH 22/23] vring: use bvec_set_page to initialize a bvec
...e27da544814a 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -1126,9 +1126,8 @@ static int iotlb_translate(const struct vringh *vrh, size = map->size - addr + map->start; pa = map->addr + addr - map->start; pfn = pa >> PAGE_SHIFT; - iov[ret].bv_page = pfn_to_page(pfn); - iov[ret].bv_len = min(len - s, size); - iov[ret].bv_offset = pa & (PAGE_SIZE - 1); + bvec_set_page(&iov[ret], pfn_to_page(pfn), min(len - s, size), + pa & (PAGE_SIZE - 1)); s += size; addr += size; ++ret; -- 2.39.0
2020 Mar 02
0
[PATCH v1 07/11] virtio-mem: Allow to offline partially unplugged memory blocks
...he pages so the memory can get + * offlined and add the unplugged pages to the managed + * page counters (so offlining code can correctly subtract + * them again). + */ + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size); + adjust_managed_page_count(pfn_to_page(pfn), nr_pages); + for (i = 0; i < nr_pages; i++) + page_ref_dec(pfn_to_page(pfn + i)); + } +} + +static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm, + unsigned long mb_id) +{ + const unsigned long nr_pages = PFN_DOWN(vm->subblock_size); + unsigned long pfn; + int...
2019 Jun 26
0
[PATCH 14/25] memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag
...5 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -131,17 +131,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page; + struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); int ret; - /* - * If we have an altmap then we need to skip over any reserved PFNs - * when querying the zone. - */ - page = pfn_to_page(start_pfn); - if (altmap) - page += vmem_altmap_offset(altmap); - __remove_pages(page_zone(page), start_pfn, nr...
2020 Sep 25
1
[PATCH 1/2] ext4/xfs: add page refcount helper
...ged, 11 insertions(+), 6 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 994ab66a9907..8eddbcc0e149 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -358,7 +358,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping, for_each_mapped_pfn(entry, pfn) { struct page *page = pfn_to_page(pfn); - WARN_ON_ONCE(trunc && page_ref_count(page) > 1); + WARN_ON_ONCE(trunc && !dax_layout_is_idle_page(page)); WARN_ON_ONCE(page->mapping && page->mapping != mapping); page->mapping = NULL; page->index = 0; @@ -372,7 +372,7 @@ static struct pag...
2020 Oct 08
2
[PATCH] mm: make device private reference counts zero based
.../powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 84e5a2dc8be5..a0d08b1d8c1e 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -711,7 +711,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) dpage = pfn_to_page(uvmem_pfn); dpage->zone_device_data = pvt; - get_page(dpage); + init_page_count(dpage); lock_page(dpage); return dpage; out_clear: @@ -1151,6 +1151,7 @@ int kvmppc_uvmem_init(void) struct resource *res; void *addr; unsigned long pfn_last, pfn_first; + unsigned long pfn; size = k...
2020 Jul 16
0
[PATCH vhost next 09/10] vdpa/mlx5: Add shared memory registration code
...rs/vdpa/mlx5/core/mr.c:4: drivers/vdpa/mlx5/core/mr.c: In function 'map_direct_mr': >> drivers/vdpa/mlx5/core/mr.c:254:21: error: implicit declaration of function '__phys_to_pfn'; did you mean 'page_to_pfn'? [-Werror=implicit-function-declaration] 254 | pg = pfn_to_page(__phys_to_pfn(pa)); | ^~~~~~~~~~~~~ arch/ia64/include/asm/page.h:108:40: note: in definition of macro 'pfn_to_page' 108 | # define pfn_to_page(pfn) (vmem_map + (pfn)) | ^~~ drivers/vdpa/mlx5/core/mr.c: A...
2012 Nov 20
12
[PATCH v2 00/11] xen: Initial kexec/kdump implementation
Hi, This set of patches contains initial kexec/kdump implementation for Xen v2 (previous version were posted to few people by mistake; sorry for that). Currently only dom0 is supported, however, almost all infrustructure required for domU support is ready. Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code. This could simplify and reduce a bit size of kernel code.
2012 Nov 20
12
[PATCH v2 00/11] xen: Initial kexec/kdump implementation
Hi, This set of patches contains initial kexec/kdump implementation for Xen v2 (previous version were posted to few people by mistake; sorry for that). Currently only dom0 is supported, however, almost all infrustructure required for domU support is ready. Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code. This could simplify and reduce a bit size of kernel code.
2012 Nov 20
12
[PATCH v2 00/11] xen: Initial kexec/kdump implementation
Hi, This set of patches contains initial kexec/kdump implementation for Xen v2 (previous version were posted to few people by mistake; sorry for that). Currently only dom0 is supported, however, almost all infrustructure required for domU support is ready. Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code. This could simplify and reduce a bit size of kernel code.
2020 Sep 25
6
[RFC PATCH v2 0/2] mm: remove extra ZONE_DEVICE struct page refcount
Matthew Wilcox, Ira Weiny, and others have complained that ZONE_DEVICE struct page reference counting is ugly because they are "free" when the reference count is one instead of zero. This leads to explicit checks for ZONE_DEVICE pages in places like put_page(), GUP, THP splitting, and page migration which have to adjust the expected reference count when determining if the page is
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...lloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@ -261,8 +266,10 @@ void pgd_free(pgd_t *pgd) /* in the PAE case user pgd entries are overwritten before usage */ if (PTRS_PER_PMD &...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...lloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@ -261,8 +266,10 @@ void pgd_free(pgd_t *pgd) /* in the PAE case user pgd entries are overwritten before usage */ if (PTRS_PER_PMD &...
2020 Oct 01
0
[RFC PATCH v3 1/2] ext4/xfs: add page refcount helper
...ged, 14 insertions(+), 9 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 5b47834f2e1b..85c63f735909 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -358,7 +358,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping, for_each_mapped_pfn(entry, pfn) { struct page *page = pfn_to_page(pfn); - WARN_ON_ONCE(trunc && page_ref_count(page) > 1); + WARN_ON_ONCE(trunc && !dax_layout_is_idle_page(page)); WARN_ON_ONCE(page->mapping && page->mapping != mapping); page->mapping = NULL; page->index = 0; @@ -372,7 +372,7 @@ static struct pag...
2020 Mar 10
0
[PATCH v1 07/11] virtio-mem: Allow to offline partially unplugged memory blocks
...the unplugged pages to the managed >> + * page counters (so offlining code can correctly subtract >> + * them again). >> + */ >> + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + >> + sb_id * vm->subblock_size); >> + adjust_managed_page_count(pfn_to_page(pfn), nr_pages); >> + for (i = 0; i < nr_pages; i++) >> + page_ref_dec(pfn_to_page(pfn + i)); > > Is there ever situation this might be a different than 1->0 transition? Only if some other code would be taking a reference. At least not from virtio-mem perspective. --...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...14 +247,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cache_free(pgd_cache, pgd); return NULL; @@ -268,7 +268,7 @@ void pgd_free(pgd_t *pgd) if (PTRS_PER_PMD...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...14 +247,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cache_free(pgd_cache, pgd); return NULL; @@ -268,7 +268,7 @@ void pgd_free(pgd_t *pgd) if (PTRS_PER_PMD...
2020 Oct 12
2
[PATCH v2] mm/hmm: make device private reference counts zero based
.../powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 84e5a2dc8be5..a0d08b1d8c1e 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -711,7 +711,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) dpage = pfn_to_page(uvmem_pfn); dpage->zone_device_data = pvt; - get_page(dpage); + init_page_count(dpage); lock_page(dpage); return dpage; out_clear: @@ -1151,6 +1151,7 @@ int kvmppc_uvmem_init(void) struct resource *res; void *addr; unsigned long pfn_last, pfn_first; + unsigned long pfn; size = k...