Displaying 20 results from an estimated 335 matches for "npages".
Did you mean:
pages
2024 Mar 06
1
[PATCH v3] nouveau/dmem: handle kcalloc() allocation failure
...ouveau_dmem.c
index 12feecf71e7..6fb65b01d77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __G...
2008 Jan 14
6
[PATCH] KVM virtio balloon driver
...+ __func__);
+ goto out;
+ }
+
+ /* TODO: kick several balloon buffers at once */
+ v->vq->vq_ops->kick(v->vq);
+out:
+ spin_unlock_irq(&v->queue_lock);
+ atomic_inc(&v->inflight_bufs);
+ return err;
+}
+
+static int kvm_balloon_inflate(struct virtballoon *v, int32_t npages)
+{
+ LIST_HEAD(tmp_list);
+ struct page *page, *tmp;
+ struct balloon_buf *buf;
+ u32 *pfn;
+ int allocated = 0;
+ int i, r = -ENOMEM;
+
+ buf = alloc_balloon_buf(v->vdev, GFP_KERNEL);
+ if (!buf)
+ return r;
+
+ pfn = (u32 *)&buf->data;
+ *pfn++ = (u32)npages;
+
+ for (i = 0; i < np...
2008 Jan 14
6
[PATCH] KVM virtio balloon driver
...+ __func__);
+ goto out;
+ }
+
+ /* TODO: kick several balloon buffers at once */
+ v->vq->vq_ops->kick(v->vq);
+out:
+ spin_unlock_irq(&v->queue_lock);
+ atomic_inc(&v->inflight_bufs);
+ return err;
+}
+
+static int kvm_balloon_inflate(struct virtballoon *v, int32_t npages)
+{
+ LIST_HEAD(tmp_list);
+ struct page *page, *tmp;
+ struct balloon_buf *buf;
+ u32 *pfn;
+ int allocated = 0;
+ int i, r = -ENOMEM;
+
+ buf = alloc_balloon_buf(v->vdev, GFP_KERNEL);
+ if (!buf)
+ return r;
+
+ pfn = (u32 *)&buf->data;
+ *pfn++ = (u32)npages;
+
+ for (i = 0; i < np...
2020 Nov 03
0
[PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path"
...t; struct vhost_iotlb *iotlb = dev->iotlb;
> struct page **page_list;
> - struct vm_area_struct **vmas;
> + unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
> unsigned int gup_flags = FOLL_LONGTERM;
> - unsigned long map_pfn, last_pfn = 0;
> - unsigned long npages, lock_limit;
> - unsigned long i, nmap = 0;
> + unsigned long npages, cur_base, map_pfn, last_pfn = 0;
> + unsigned long locked, lock_limit, pinned, i;
> u64 iova = msg->iova;
> - long pinned;
> int ret = 0;
>
> if (vhost_iotlb_itree_first(iotlb, msg->iova,...
2024 Mar 03
1
[PATCH] nouveau/dmem: handle kcalloc() allocation failure
...nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 12feecf71e7..9a578262c6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -374,13 +374,13 @@ static void
nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
{
unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
- unsigned long *src_pfns, *dst_pfns;
- dma_addr_t *dma_addrs;
+ unsigned long src_pfns[npages], dst_pfns[npages];
+ dma_addr_t dma_addrs[npages];
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERN...
2020 Oct 01
0
[PATCH] vhost-vdpa: fix page pinning leakage in error path
...lb_update(struct vhost_vdpa *v,
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list;
- unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ struct vm_area_struct **vmas;
unsigned int gup_flags = FOLL_LONGTERM;
- unsigned long npages, cur_base, map_pfn, last_pfn = 0;
- unsigned long locked, lock_limit, pinned, i;
+ unsigned long map_pfn, last_pfn = 0;
+ unsigned long npages, lock_limit;
+ unsigned long i, nmap = 0;
u64 iova = msg->iova;
+ long pinned;
int ret = 0;
if (vhost_iotlb_itree_first(iotlb, msg->iova,...
2020 Oct 01
0
[PATCH v2] vhost-vdpa: fix page pinning leakage in error path
...lb_update(struct vhost_vdpa *v,
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list;
- unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ struct vm_area_struct **vmas;
unsigned int gup_flags = FOLL_LONGTERM;
- unsigned long npages, cur_base, map_pfn, last_pfn = 0;
- unsigned long locked, lock_limit, pinned, i;
+ unsigned long map_pfn, last_pfn = 0;
+ unsigned long npages, lock_limit;
+ unsigned long i, nmap = 0;
u64 iova = msg->iova;
+ long pinned;
int ret = 0;
if (vhost_iotlb_itree_first(iotlb, msg->iova,...
2014 May 30
0
[PATCH] drm/gk20a/fb: use dma_alloc_coherent() for VRAM
...kfree(mem);
}
@@ -58,11 +58,9 @@ gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
{
struct device *dev = nv_device_base(nv_device(pfb));
- struct nouveau_mem *mem;
- int type = memtype & 0xff;
- dma_addr_t dma_addr;
- int npages;
- int order;
+ struct gk20a_mem *mem;
+ u32 type = memtype & 0xff;
+ u32 npages, order;
int i;
nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size,
@@ -80,59 +78,48 @@ gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
order = fls(align);...
2019 Aug 07
4
[PATCH] nouveau/hmm: map pages after migration
...rm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
- /*
- * FIXME optimization: update GPU page table to point to newly migrated
- * memory.
- */
migrate_vma_finalize(args);
}
@@ -631,11 +635,12 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
unsigned long npages = (end - start) >> PAGE_SHIFT;
unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
dma_addr_t *dma_addrs;
+ u64 *pfns;
struct migrate_vma args = {
.vma = vma,
.start = start,
};
- unsigned long c, i;
+ unsigned long i;
int ret = -ENOMEM;
args.src = kcalloc(max, sizeof(...
2019 Sep 11
6
[PATCH 0/4] HMM tests and minor fixes
These changes are based on Jason's latest hmm branch.
Patch 1 was previously posted here [1] but was dropped from the orginal
series. Hopefully, the tests will reduce concerns about edge conditions.
I'm sure more tests could be usefully added but I thought this was a good
starting point.
[1] https://lore.kernel.org/linux-mm/20190726005650.2566-6-rcampbell at nvidia.com/
Ralph Campbell
2008 Jan 08
1
[PATCH] kvm guest balloon driver
...tk("%s: add_buf err\n", __func__);
+ goto out;
+ }
+ atomic_inc(&virtballoon.inflight_bufs);
+
+ /* TODO: kick several balloon buffers at once */
+ vq->vq_ops->kick(vq);
+out:
+ spin_unlock_irq(&balloon_queue_lock);
+ return err;
+}
+
+static int kvm_balloon_inflate(int32_t npages)
+{
+ LIST_HEAD(tmp_list);
+ struct balloon_page *node, *tmp;
+ struct balloon_buf *buf;
+ u32 *pfn;
+ int allocated = 0;
+ int i, r = -ENOMEM;
+
+ buf = alloc_balloon_buf();
+ if (!buf)
+ return r;
+
+ pfn = (u32 *)&buf->data;
+ *pfn++ = (u32)npages;
+
+ for (i = 0; i < npages; i++) {
+...
2008 Jan 08
1
[PATCH] kvm guest balloon driver
...tk("%s: add_buf err\n", __func__);
+ goto out;
+ }
+ atomic_inc(&virtballoon.inflight_bufs);
+
+ /* TODO: kick several balloon buffers at once */
+ vq->vq_ops->kick(vq);
+out:
+ spin_unlock_irq(&balloon_queue_lock);
+ return err;
+}
+
+static int kvm_balloon_inflate(int32_t npages)
+{
+ LIST_HEAD(tmp_list);
+ struct balloon_page *node, *tmp;
+ struct balloon_buf *buf;
+ u32 *pfn;
+ int allocated = 0;
+ int i, r = -ENOMEM;
+
+ buf = alloc_balloon_buf();
+ if (!buf)
+ return r;
+
+ pfn = (u32 *)&buf->data;
+ *pfn++ = (u32)npages;
+
+ for (i = 0; i < npages; i++) {
+...
2017 Oct 22
2
[PATCH v1 2/3] virtio-balloon: deflate up to oom_pages on OOM
...ges, "pages to free on OOM");
>
> #ifdef CONFIG_BALLOON_COMPACTION
> @@ -359,16 +359,20 @@ static int virtballoon_oom_notify(struct notifier_block *self,
> {
> struct virtio_balloon *vb;
> unsigned long *freed;
> - unsigned num_freed_pages;
> + unsigned int npages = oom_pages;
>
> vb = container_of(self, struct virtio_balloon, nb);
> if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
> return NOTIFY_OK;
>
> freed = parm;
> - num_freed_pages = leak_balloon(vb, oom_pages);
> +
> + /* Don't deflate...
2017 Oct 22
2
[PATCH v1 2/3] virtio-balloon: deflate up to oom_pages on OOM
...ges, "pages to free on OOM");
>
> #ifdef CONFIG_BALLOON_COMPACTION
> @@ -359,16 +359,20 @@ static int virtballoon_oom_notify(struct notifier_block *self,
> {
> struct virtio_balloon *vb;
> unsigned long *freed;
> - unsigned num_freed_pages;
> + unsigned int npages = oom_pages;
>
> vb = container_of(self, struct virtio_balloon, nb);
> if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
> return NOTIFY_OK;
>
> freed = parm;
> - num_freed_pages = leak_balloon(vb, oom_pages);
> +
> + /* Don't deflate...
2024 Mar 08
0
[PATCH v3] nouveau/dmem: handle kcalloc() allocation failure
...b01d77 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
> dma_addr_t *dma_addrs;
> struct nouveau_fence *fence;
>
> - src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
> - dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
> - dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
> + src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
> + dst_pfns = kvcalloc(npages, sizeof(*dst_pfn...
2020 Aug 18
2
[PATCH 1/2] drm: allow limiting the scatter list size.
...pu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index f06e19e7be04..e5b6e7996f80 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt;
- sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages,
+ etnaviv_obj->base.max_segment);
if (IS_ERR(sgt)) {
dev_err(dev->dev, "...
2018 Jul 20
0
[PATCH RFC V4 3/3] KVM: X86: Adding skeleton for Memory ROE
.../
lockdep_assert_held(&kvm->slots_lock);
if (is_dirty)
@@ -6672,7 +6672,98 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
}
#endif
-/*
+#ifdef CONFIG_KVM_MROE
+static void kvm_mroe_protect_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, u64 npages)
+{
+ int i;
+
+ for (i = gfn - slot->base_gfn; i < gfn + npages - slot->base_gfn; i++)
+ set_bit(i, slot->mroe_bitmap);
+ kvm_mmu_slot_apply_write_access(kvm, slot);
+ kvm_arch_flush_shadow_memslot(kvm, slot);
+}
+
+static int __kvm_mroe_protect_range(struct kvm *kvm, gpa_t gpa, u64 n...
2020 Sep 07
2
[PATCH v4 1/1] drm: allow limiting the scatter list size.
...pu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index f06e19e7be04..ea19f1d27275 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt;
- sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
+ etnaviv_obj->pages, npages);
if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed t...
2020 Jan 13
9
[PATCH v6 0/6] mm/hmm/test: add self tests for HMM
This series adds new functions to the mmu interval notifier API to
allow device drivers with MMUs to dynamically mirror a process' page
tables based on device faults and invalidation callbacks. The Nouveau
driver is updated to use the extended API and a set of stand alone self
tests is added to help validate and maintain correctness.
The patches are based on linux-5.5.0-rc6 and are for
2020 Aug 18
2
[PATCH v2 1/2] drm: allow limiting the scatter list size.
...pu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index f06e19e7be04..90654246b335 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt;
- sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages,
+ etnaviv_obj->base.dev->max_segment);
if (IS_ERR(sgt)) {
dev_err(dev->dev...