search for: __free_page

Displaying 20 results from an estimated 206 matches for "__free_page".

2019 Jul 31
1
[PATCH 5/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...> - dpage = migrate_pfn_to_page(dst_pfns[i]); > - if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR) > - continue; > - > - spage = migrate_pfn_to_page(src_pfns[i]); > - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) { > - dst_pfns[i] = MIGRATE_PFN_ERROR; > - __free_page(dpage); > - continue; > - } > - > - fault->dma[fault->npages] = > - dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE, > - PCI_DMA_BIDIRECTIONAL, > - DMA_ATTR_SKIP_CPU_SYNC); > - if (dma_mapping_error(dev, fault->dma[fault->npages])) { > - dst...
2019 Aug 08
0
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...i++) { - struct page *spage, *dpage; - - dpage = migrate_pfn_to_page(dst_pfns[i]); - if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR) - continue; - - spage = migrate_pfn_to_page(src_pfns[i]); - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - - fault->dma[fault->npages] = - dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(dev, fault->dma[fault->npages])) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page...
2019 Jul 29
0
[PATCH 5/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...i++) { - struct page *spage, *dpage; - - dpage = migrate_pfn_to_page(dst_pfns[i]); - if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR) - continue; - - spage = migrate_pfn_to_page(src_pfns[i]); - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - - fault->dma[fault->npages] = - dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(dev, fault->dma[fault->npages])) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page...
2019 Aug 08
1
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...> - dpage = migrate_pfn_to_page(dst_pfns[i]); > - if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR) > - continue; > - > - spage = migrate_pfn_to_page(src_pfns[i]); > - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) { > - dst_pfns[i] = MIGRATE_PFN_ERROR; > - __free_page(dpage); > - continue; > - } > - > - fault->dma[fault->npages] = > - dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE, > - PCI_DMA_BIDIRECTIONAL, > - DMA_ATTR_SKIP_CPU_SYNC); > - if (dma_mapping_error(dev, fault->dma[fault->npages])) { > - dst...
2013 Jun 21
5
[patch] xen-netback: double free on unload
...a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1968,8 +1968,8 @@ static void __exit netback_fini(void) del_timer_sync(&netbk->net_timer); kthread_stop(netbk->task); for (j = 0; j < MAX_PENDING_REQS; j++) { - if (netbk->mmap_pages[i]) - __free_page(netbk->mmap_pages[i]); + if (netbk->mmap_pages[j]) + __free_page(netbk->mmap_pages[j]); } }
2016 Sep 26
6
[PATCH v4 0/3] drm/nouveau: set DMA mask before mapping scratch page
This v4 is now a 3 piece series, after Alexandre pointed out that both GF 100 and NV50 are affected by the same issue, and that a related issue has been solved already for Tegra in commit 9d0394c6bed5 ("drm/nouveau/instmem/gk20a: set DMA mask early"). The issue that this series addresses is the fact that the Nouveau driver invokes the DMA API before setting the DMA mask. In both cases
2016 Jun 21
1
[RFC PATCH v2] drm/nouveau/fb/nv50: set DMA mask before mapping scratch page
...m_fb *base) struct nv50_fb *fb = nv50_fb(base); struct nvkm_device *device = fb->base.subdev.device; - if (fb->r100c08_page) { + if (fb->r100c08 && fb->r100c08 != DMA_ERROR_CODE) dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(fb->r100c08_page); - } + + __free_page(fb->r100c08_page); return fb; } @@ -264,13 +283,9 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device, *pfb = &fb->base; fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (fb->r100c08_page) { -...
2013 May 26
6
[PATCH v8, part3 12/14] mm: correctly update zone->mamaged_pages
...balloon_low--; - totalram_pages++; + adjust_managed_page_count(page, 1); return page; } @@ -372,9 +361,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) #endif /* Relinquish the page back to the allocator. */ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); + __free_reserved_page(page); } balloon_stats.current_pages += rc; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f8feeec..95c5a6b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1246,7 +1246,7 @@ static void __init gather_bootmem_prealloc(void) * side-effects, like CommitLimit g...
2013 May 26
6
[PATCH v8, part3 12/14] mm: correctly update zone->mamaged_pages
...balloon_low--; - totalram_pages++; + adjust_managed_page_count(page, 1); return page; } @@ -372,9 +361,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) #endif /* Relinquish the page back to the allocator. */ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); + __free_reserved_page(page); } balloon_stats.current_pages += rc; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f8feeec..95c5a6b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1246,7 +1246,7 @@ static void __init gather_bootmem_prealloc(void) * side-effects, like CommitLimit g...
2013 May 26
6
[PATCH v8, part3 12/14] mm: correctly update zone->mamaged_pages
...balloon_low--; - totalram_pages++; + adjust_managed_page_count(page, 1); return page; } @@ -372,9 +361,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) #endif /* Relinquish the page back to the allocator. */ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); + __free_reserved_page(page); } balloon_stats.current_pages += rc; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f8feeec..95c5a6b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1246,7 +1246,7 @@ static void __init gather_bootmem_prealloc(void) * side-effects, like CommitLimit g...
2016 Sep 26
0
[PATCH v4 2/3] drm/nouveau/fb/gf100: defer DMA mapping of scratch page to init() hook
...3,12 +114,13 @@ gf100_fb_dtor(struct nvkm_fb *base) struct gf100_fb *fb = gf100_fb(base); struct nvkm_device *device = fb->base.subdev.device; - if (fb->r100c10_page) { + if (fb->r100c10) { dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(fb->r100c10_page); } + __free_page(fb->r100c10_page); + return fb; } @@ -124,11 +136,9 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, *pfb = &fb->base; fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (fb->r100c10_page) {...
2012 Oct 11
0
[PATCH] Btrfs: Fix wrong error handling code
...ns(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 64dc93f..a32ebfe 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4104,8 +4104,8 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) return eb; err: - for (i--; i >= 0; i--) - __free_page(eb->pages[i]); + for (; i > 0; i--) + __free_page(eb->pages[i - 1]); __free_extent_buffer(eb); return NULL; } -- 1.7.12.3 -- To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at...
2014 Jul 31
2
[PATCH v5] drm/nouveau: map pages using DMA API
...@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object) struct nv50_fb_priv *priv = (void *)object; if (priv->r100c08_page) { - nv_device_unmap_page(device, priv->r100c08); + dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(priv->r100c08_page); } diff --git a/nvkm/subdev/fb/nvc0.c b/nvkm/subdev/fb/nvc0.c index 0670ae33ee45..9f5f3ac8d4c6 100644 --- a/nvkm/subdev/fb/nvc0.c +++ b/nvkm/subdev/fb/nvc0.c @@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object) struct nvc0_fb_priv *priv = (void *)object; if...
2016 Sep 26
0
[PATCH v4 3/3] drm/nouveau/fb/nv50: defer DMA mapping of scratch page to init() hook
...@ -233,11 +245,11 @@ nv50_fb_dtor(struct nvkm_fb *base) struct nv50_fb *fb = nv50_fb(base); struct nvkm_device *device = fb->base.subdev.device; - if (fb->r100c08_page) { + if (fb->r100c08) dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(fb->r100c08_page); - } + + __free_page(fb->r100c08_page); return fb; } @@ -264,13 +276,9 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device, *pfb = &fb->base; fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (fb->r100c08_page) { -...
2019 Aug 08
10
turn hmm migrate_vma upside down v2
Hi Jérôme, Ben and Jason, below is a series against the hmm tree which starts revamping the migrate_vma functionality. The prime idea is to export three slightly lower level functions and thus avoid the need for migrate_vma_ops callbacks. Diffstat: 5 files changed, 281 insertions(+), 607 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git
2016 Jul 07
3
[PATCH v3] drm/nouveau/fb/nv50: set DMA mask before mapping scratch page
...@ -233,11 +255,11 @@ nv50_fb_dtor(struct nvkm_fb *base) struct nv50_fb *fb = nv50_fb(base); struct nvkm_device *device = fb->base.subdev.device; - if (fb->r100c08_page) { + if (fb->r100c08) dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(fb->r100c08_page); - } + + __free_page(fb->r100c08_page); return fb; } @@ -264,13 +286,9 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device, *pfb = &fb->base; fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (fb->r100c08_page) { -...
2017 Dec 26
0
[PATCH v20 4/7] virtio-balloon: VIRTIO_BALLOON_F_SG
...op(&pages))) { >>>>>> balloon_page_enqueue(&vb->vb_dev_info, page); >>>>>> + if (use_sg) { >>>>>> + if (xb_set_page(vb, page, &pfn_min, &pfn_max) < 0) { >>>>>> + __free_page(page); >>>>>> + continue; >>>>>> + } >>>>>> + } else { >>>>>> + set_page_pfns(vb, vb->pfns + vb->num_pfns, page); >>>>>> + } >>>>> Is thi...
2012 Apr 13
0
[PATCH] virtio_balloon: fix handling of PAGE_SIZE != 4k
...vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; totalram_pages--; - vb->num_pages++; list_add(&page->lru, &vb->pages); } @@ -130,8 +154,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) { unsigned int i; - for (i = 0; i < num; i++) { - __free_page(pfn_to_page(pfns[i])); + /* Find pfns pointing at start of each page, get pages and free them. */ + for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { + __free_page(balloon_pfn_to_page(pfns[i])); totalram_pages++; } } @@ -143,11 +168,12 @@ static void leak_balloon(struct virtio_ba...
2012 Apr 13
0
[PATCH] virtio_balloon: fix handling of PAGE_SIZE != 4k
...vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; totalram_pages--; - vb->num_pages++; list_add(&page->lru, &vb->pages); } @@ -130,8 +154,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) { unsigned int i; - for (i = 0; i < num; i++) { - __free_page(pfn_to_page(pfns[i])); + /* Find pfns pointing at start of each page, get pages and free them. */ + for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { + __free_page(balloon_pfn_to_page(pfns[i])); totalram_pages++; } } @@ -143,11 +168,12 @@ static void leak_balloon(struct virtio_ba...
2012 Mar 09
2
[PATCH] linux-2.6.18/gnttab: add deferred freeing logic
..._irqrestore(&gnttab_list_lock, flags); + if (_gnttab_end_foreign_access_ref(entry->ref)) { + put_free_entry(entry->ref); + if (entry->page) { + printk(KERN_DEBUG + "freeing g.e. %#x (pfn %#lx)\n", + entry->ref, page_to_pfn(entry->page)); + __free_page(entry->page); + } else + printk(KERN_DEBUG "freeing g.e. %#x\n", + entry->ref); + kfree(entry); + entry = NULL; + } else { + if (!--entry->warn_delay) + printk(KERN_INFO "g.e. %#x still pending\n", + entry->ref); + if (!first) +...