search for: page_to_pfn

Displaying 20 results from an estimated 547 matches for "page_to_pfn".

2011 Oct 05
0
[PATCH 3/8] xen: netfront: convert to SKB paged frag API.
...ak; } - skb_shinfo(skb)->frags[0].page = page; + __skb_fill_page_desc(skb, 0, page, 0, 0); skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } @@ -309,8 +309,8 @@ no_skb: BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; - pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); - vaddr = page_address(skb_shinfo(skb)->frags[0].page); + pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); + vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); req = RING_GET_REQUEST(&np->rx, req_prod + i...
2010 Aug 31
2
[PATCH 2/5] staging: hv: Fixed lockup problem with bounce_buffer scatter list - RESEND
...request->DataBuffer.Offset = sgl[0].offset; - for (i = 0; i < scsi_sg_count(scmnd); i++) { + for (i = 0; i < sg_count; i++) { DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", i, sgl[i].length, sgl[i].offset); - request->DataBuffer.PfnArray[i] = - page_to_pfn(sg_page((&sgl[i]))); + request->DataBuffer.PfnArray[i] = + page_to_pfn(sg_page((&sgl[i]))); } } else if (scsi_sglist(scmnd)) { /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ -- 1.6.0.2
2010 Aug 31
2
[PATCH 2/5] staging: hv: Fixed lockup problem with bounce_buffer scatter list - RESEND
...request->DataBuffer.Offset = sgl[0].offset; - for (i = 0; i < scsi_sg_count(scmnd); i++) { + for (i = 0; i < sg_count; i++) { DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", i, sgl[i].length, sgl[i].offset); - request->DataBuffer.PfnArray[i] = - page_to_pfn(sg_page((&sgl[i]))); + request->DataBuffer.PfnArray[i] = + page_to_pfn(sg_page((&sgl[i]))); } } else if (scsi_sglist(scmnd)) { /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ -- 1.6.0.2
2013 Jun 24
3
[konrad.wilk@oracle.com: [PATCH] drm/i915: make compact dma scatter lists creation work with SWIOTLB backend.]
...15_gem_object *obj) gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp &= ~(__GFP_IO | __GFP_WAIT); } - +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + st->nents++; + sg_set_page(sg, page, PAGE_SIZE, 0); + sg = sg_next(sg); + continue; + } +#endif if (!i || page_to_pfn(page) != last_pfn + 1) { if (i) sg = sg_next(sg); @@ -1812,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) } last_pfn = page_to_pfn(page); } - - sg_mark_end(sg); +#ifdef CONFIG_SWIOTLB + if (!swiotlb_nr_tbl()) +#endif + sg_mark_end(sg); obj->pages...
2017 Aug 03
4
[PATCH v13 4/5] mm: support reporting free page blocks
...order) { struct free_area *free_area = &zone->free_area[order]; enum migratetype mt; struct page *page; if (!free_area->nr_pages) continue; for_each_migratetype_order(order, mt) { list_for_each_entry(page, &free_area->free_list[mt], lru) { pfn = page_to_pfn(page); visit(opaque2, prn, 1<<order); } } } spin_unlock_irqrestore(&zone->lock, flags); } [...] > +/* > + * Walk through the free page blocks in the system. The @visit callback is > + * invoked to handle each free page block. > + * > + * Note: some pa...
2017 Aug 03
4
[PATCH v13 4/5] mm: support reporting free page blocks
...order) { struct free_area *free_area = &zone->free_area[order]; enum migratetype mt; struct page *page; if (!free_area->nr_pages) continue; for_each_migratetype_order(order, mt) { list_for_each_entry(page, &free_area->free_list[mt], lru) { pfn = page_to_pfn(page); visit(opaque2, prn, 1<<order); } } } spin_unlock_irqrestore(&zone->lock, flags); } [...] > +/* > + * Walk through the free page blocks in the system. The @visit callback is > + * invoked to handle each free page block. > + * > + * Note: some pa...
2019 Jul 29
0
[PATCH 8/9] mm: remove the unused MIGRATE_PFN_DEVICE flag
...veau_dmem.c index 6cb930755970..f04686a2c21f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -582,8 +582,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, *dma_addr)) goto out_dma_unmap; - return migrate_pfn(page_to_pfn(dpage)) | - MIGRATE_PFN_LOCKED | MIGRATE_PFN_DEVICE; + return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; out_dma_unmap: dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 229153c2c496..8b46cfdb1a0e 1006...
2019 Aug 14
0
[PATCH 09/10] mm: remove the unused MIGRATE_PFN_DEVICE flag
...veau_dmem.c index d96b987b9982..fa1439941596 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -580,8 +580,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, *dma_addr)) goto out_dma_unmap; - return migrate_pfn(page_to_pfn(dpage)) | - MIGRATE_PFN_LOCKED | MIGRATE_PFN_DEVICE; + return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; out_dma_unmap: dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 1e67dcfd318f..72120061b7d4 1006...
2013 Aug 27
1
[PATCH] xen/balloon: don't set P2M entry for auto translated guest
...rivers/xen/balloon.c @@ -430,8 +430,13 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); - __set_phys_to_machine(pfn, - pfn_to_mfn(page_to_pfn(__get_cpu_var(balloon_scratch_page)))); + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + unsigned long p; + struct page *pg; + pg = __get_cpu_var(balloon_scratch_page); + p = page_to_pfn(pg); + __set_phys_to_machine(pfn, pfn_to_mfn(p)); + } balloon_append(pfn_to_page(pfn));...
2019 Jul 29
0
[PATCH 3/9] nouveau: factor out device memory address calculation
...rm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -102,6 +102,14 @@ struct nouveau_migrate { unsigned long dma_nr; }; +static unsigned long nouveau_dmem_page_addr(struct page *page) +{ + struct nouveau_dmem_chunk *chunk = page->zone_device_data; + unsigned long idx = page_to_pfn(page) - chunk->pfn_first; + + return (idx << PAGE_SHIFT) + chunk->bo->bo.offset; +} + static void nouveau_dmem_page_free(struct page *page) { struct nouveau_dmem_chunk *chunk = page->zone_device_data; @@ -169,9 +177,7 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...===================================================== --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -60,6 +60,7 @@ static struct page *split_large_page(uns address = __pa(address); addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); + paravirt_alloc_pt(page_to_pfn(base)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : ref_prot)); @@ -166,6 +167,7 @@ __change_page_attr(struct page *page, pg if (!PageReser...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...===================================================== --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -60,6 +60,7 @@ static struct page *split_large_page(uns address = __pa(address); addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); + paravirt_alloc_pt(page_to_pfn(base)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : ref_prot)); @@ -166,6 +167,7 @@ __change_page_attr(struct page *page, pg if (!PageReser...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...===================================================== --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -60,6 +60,7 @@ static struct page *split_large_page(uns address = __pa(address); addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); + paravirt_alloc_pt(page_to_pfn(base)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : ref_prot)); @@ -166,6 +167,7 @@ __change_page_attr(struct page *page, pg if (!PageReser...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...===================================================== --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -60,6 +60,7 @@ static struct page *split_large_page(uns address = __pa(address); addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); + paravirt_alloc_pt(page_to_pfn(base)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : ref_prot)); @@ -166,6 +167,7 @@ __change_page_attr(struct page *page, pg if (!PageReser...
2016 Jul 27
4
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
On 07/26/2016 06:23 PM, Liang Li wrote: > + for_each_migratetype_order(order, t) { > + list_for_each(curr, &zone->free_area[order].free_list[t]) { > + pfn = page_to_pfn(list_entry(curr, struct page, lru)); > + if (pfn >= start_pfn && pfn <= end_pfn) { > + page_num = 1UL << order; > + if (pfn + page_num > end_pfn) > + page_num = end_pfn - pfn; > + bitmap_set(bitmap, pfn - start_pfn, page_num); > + } > +...
2016 Jul 27
4
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
On 07/26/2016 06:23 PM, Liang Li wrote: > + for_each_migratetype_order(order, t) { > + list_for_each(curr, &zone->free_area[order].free_list[t]) { > + pfn = page_to_pfn(list_entry(curr, struct page, lru)); > + if (pfn >= start_pfn && pfn <= end_pfn) { > + page_num = 1UL << order; > + if (pfn + page_num > end_pfn) > + page_num = end_pfn - pfn; > + bitmap_set(bitmap, pfn - start_pfn, page_num); > + } > +...
2020 Nov 03
0
[PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path"
...lse { > - unpin_user_pages(page_list, pinned); > - ret = -ENOMEM; > - } > - goto unlock; > + if (locked > lock_limit) { > + ret = -ENOMEM; > + goto out; > } > > + cur_base = msg->uaddr & PAGE_MASK; > iova &= PAGE_MASK; > - map_pfn = page_to_pfn(page_list[0]); > - > - /* One more iteration to avoid extra vdpa_map() call out of loop. */ > - for (i = 0; i <= npages; i++) { > - unsigned long this_pfn; > - u64 csize; > - > - /* The last chunk may have no valid PFN next to it */ > - this_pfn = i < npages ? page...
2007 Jan 10
1
[PATCH] linux/i386: allow CONFIG_HIGHPTE on i386 (take 2)
...8,41 @@ struct page *pte_alloc_one(struct mm_str #ifdef CONFIG_HIGHPTE pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); + if (pte && PageHighMem(pte)) { + struct mmuext_op op; + + kmap_flush_unused(); + op.cmd = MMUEXT_PIN_L1_TABLE; + op.arg1.mfn = pfn_to_mfn(page_to_pfn(pte)); + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); + } #else pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); +#endif if (pte) { SetPageForeign(pte, pte_free); set_page_count(pte, 1); } -#endif return pte; } void pte_free(struct page *pte) { -...
2020 Oct 01
0
[PATCH] vhost-vdpa: fix page pinning leakage in error path
...user_pages(page_list, pinned); + ret = -ENOMEM; + } + goto unlock; + } - while (npages) { - pinned = min_t(unsigned long, npages, list_size); - ret = pin_user_pages(cur_base, pinned, - gup_flags, page_list, NULL); - if (ret != pinned) - goto out; - - if (!last_pfn) - map_pfn = page_to_pfn(page_list[0]); - - for (i = 0; i < ret; i++) { - unsigned long this_pfn = page_to_pfn(page_list[i]); - u64 csize; - - if (last_pfn && (this_pfn != last_pfn + 1)) { - /* Pin a contiguous chunk of memory */ - csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT; - if (vhost...
2020 Oct 01
0
[PATCH v2] vhost-vdpa: fix page pinning leakage in error path
...user_pages(page_list, pinned); + ret = -ENOMEM; + } + goto unlock; + } - while (npages) { - pinned = min_t(unsigned long, npages, list_size); - ret = pin_user_pages(cur_base, pinned, - gup_flags, page_list, NULL); - if (ret != pinned) - goto out; - - if (!last_pfn) - map_pfn = page_to_pfn(page_list[0]); - - for (i = 0; i < ret; i++) { - unsigned long this_pfn = page_to_pfn(page_list[i]); - u64 csize; - - if (last_pfn && (this_pfn != last_pfn + 1)) { - /* Pin a contiguous chunk of memory */ - csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT; - if (vhost...