search for: imem

Displaying 20 results from an estimated 152 matches for "imem".

2017 Jan 30
2
[PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex
...struct gk20a_instmem { struct nvkm_instmem base; /* protects vaddr_* and gk20a_instobj::vaddr* */ - spinlock_t lock; + struct mutex lock; /* CPU mappings LRU */ unsigned int vaddr_use; @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) struct gk20a_instmem *imem = node->base.imem; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; const u64 size = nvkm_memory_size(memory); - unsigned long flags; nvkm_ltc_flush(ltc); - spin_lock_irqsave(&imem->lock, flags); + mutex_lock(&imem->lock); if (node->base.vaddr) { if (...
2015 Nov 11
2
[PATCH] instmem/gk20a: use DMA API CPU mapping
...19 @@ gk20a_instobj_size(struct nvkm_memory *memory) return (u64)gk20a_instobj(memory)->mem.size << 12; } -static void __iomem * -gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory) -{ - struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); - struct device *dev = node->base.imem->base.subdev.device->dev; - int npages = nvkm_memory_size(memory) >> 12; - struct page *pages[npages]; - int i; - - /* phys_to_page does not exist on all platforms... */ - pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); - for (i = 1; i < npages; i++) -...
2015 Nov 09
2
[PATCH] instmem/gk20a: fix race conditions
...35 @@ gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory) } /* - * Must be called while holding gk20a_instmem_lock + * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. + */ +static void +gk20a_instobj_recycle_vaddr(struct gk20a_instobj *obj) +{ + struct gk20a_instmem *imem = obj->imem; + /* there should not be any user left... */ + WARN_ON(obj->use_cpt); + list_del(&obj->vaddr_node); + vunmap(obj->vaddr); + obj->vaddr = NULL; + imem->vaddr_use -= nvkm_memory_size(&obj->memory); + nvkm_debug(&imem->base.subdev, "vaddr used: %x/...
2017 Feb 24
1
[PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex
...otects vaddr_* and gk20a_instobj::vaddr* */ >> - spinlock_t lock; >> + struct mutex lock; >> >> /* CPU mappings LRU */ >> unsigned int vaddr_use; >> @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) >> struct gk20a_instmem *imem = node->base.imem; >> struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; >> const u64 size = nvkm_memory_size(memory); >> - unsigned long flags; >> >> nvkm_ltc_flush(ltc); >> >> - spin_lock_irqsave(&imem->lock, flags); >> + mu...
2015 Nov 11
0
[PATCH] instmem/gk20a: use DMA API CPU mapping
...ory *memory) > return (u64)gk20a_instobj(memory)->mem.size << 12; > } > > -static void __iomem * > -gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory) > -{ > - struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); > - struct device *dev = node->base.imem->base.subdev.device->dev; > - int npages = nvkm_memory_size(memory) >> 12; > - struct page *pages[npages]; > - int i; > - > - /* phys_to_page does not exist on all platforms... */ > - pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); > -...
2015 Nov 11
0
[PATCH] instmem/gk20a: fix race conditions
...ry *memory) > } > > /* > - * Must be called while holding gk20a_instmem_lock > + * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. > + */ > +static void > +gk20a_instobj_recycle_vaddr(struct gk20a_instobj *obj) > +{ > + struct gk20a_instmem *imem = obj->imem; > + /* there should not be any user left... */ > + WARN_ON(obj->use_cpt); > + list_del(&obj->vaddr_node); > + vunmap(obj->vaddr); > + obj->vaddr = NULL; > + imem->vaddr_use -= nvkm_memory_size(&obj->memory); > + nvkm_debug(&imem-&gt...
2015 Oct 26
2
[PATCH] instmem/gk20a: exclusively acquire instobjs
...{ /* protects vaddr_* and gk20a_instobj::vaddr* */ spinlock_t lock; + unsigned long flags; /* CPU mappings LRU */ unsigned int vaddr_use; @@ -188,12 +189,11 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) struct gk20a_instobj *node = gk20a_instobj(memory); struct gk20a_instmem *imem = node->imem; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; - const u64 size = nvkm_memory_size(memory); - unsigned long flags; + u64 size; nvkm_ltc_flush(ltc); - spin_lock_irqsave(&imem->lock, flags); + spin_lock_irqsave(&imem->lock, imem->flags); if (...
2017 Feb 23
0
[PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex
...base; > > /* protects vaddr_* and gk20a_instobj::vaddr* */ > - spinlock_t lock; > + struct mutex lock; > > /* CPU mappings LRU */ > unsigned int vaddr_use; > @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) > struct gk20a_instmem *imem = node->base.imem; > struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; > const u64 size = nvkm_memory_size(memory); > - unsigned long flags; > > nvkm_ltc_flush(ltc); > > - spin_lock_irqsave(&imem->lock, flags); > + mutex_lock(&imem->lock)...
2015 Nov 04
0
[PATCH] instmem/gk20a: exclusively acquire instobjs
...obj::vaddr* */ > spinlock_t lock; > + unsigned long flags; > > /* CPU mappings LRU */ > unsigned int vaddr_use; > @@ -188,12 +189,11 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) > struct gk20a_instobj *node = gk20a_instobj(memory); > struct gk20a_instmem *imem = node->imem; > struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; > - const u64 size = nvkm_memory_size(memory); > - unsigned long flags; > + u64 size; > > nvkm_ltc_flush(ltc); > > - spin_lock_irqsave(&imem->lock, flags); > + spin_lock_irqsave(...
2023 Dec 08
1
[PATCH] drm/nouveau: Fixup gk20a instobj hierarchy
From: Thierry Reding <treding at nvidia.com> Commit 12c9b05da918 ("drm/nouveau/imem: support allocations not preserved across suspend") uses container_of() to cast from struct nvkm_memory to struct nvkm_instobj, assuming that all instance objects are derived from struct nvkm_instobj. For the gk20a family that's not the case and they are derived from struct nvkm_memory ins...
2017 Aug 17
0
[PATCH 08/13] drm/nouveau/imem/gk20a: Use sychronized interface of the IOMMU-API
...pu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -322,8 +322,9 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) /* Unmap pages from GPU address space and free them */ for (i = 0; i < node->base.mem.size; i++) { - iommu_unmap(imem->domain, - (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); + iommu_unmap_sync(imem->domain, + (r->offset + i) << imem->iommu_pgshift, + PAGE_SIZE); dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page...
2016 Jun 10
0
[PATCH v4 14/44] drm/nouveau: dma-mapping: Use unsigned long for dma_attrs
...gned long attrs; }; #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) @@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) goto out; dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, - node->handle, &imem->attrs); + node->handle, imem->attrs); out: return node; @@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, -...
2016 Jun 30
0
[PATCH v5 14/44] drm/nouveau: dma-mapping: Use unsigned long for dma_attrs
...gned long attrs; }; #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) @@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) goto out; dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, - node->handle, &imem->attrs); + node->handle, imem->attrs); out: return node; @@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, -...
2016 Jul 13
0
[PATCH v6 15/46] drm/nouveau: dma-mapping: Use unsigned long for dma_attrs
...gned long attrs; }; #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) @@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) goto out; dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, - node->handle, &imem->attrs); + node->handle, imem->attrs); out: return node; @@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, -...
2023 Dec 14
1
[PATCH] drm/nouveau: Fixup gk20a instobj hierarchy
On 08/12/2023 10:46, Thierry Reding wrote: > From: Thierry Reding <treding at nvidia.com> > > Commit 12c9b05da918 ("drm/nouveau/imem: support allocations not > preserved across suspend") uses container_of() to cast from struct > nvkm_memory to struct nvkm_instobj, assuming that all instance objects > are derived from struct nvkm_instobj. For the gk20a family that's not > the case and they are derived from st...
2016 Dec 12
1
[bug report] drm/nouveau/imem: convert to new-style nvkm_subdev
[ No idea why it's only complaining about this a year later... -dan ] Hello Ben Skeggs, This is a semi-automatic email about new static checker warnings. The patch b7a2bc1886d0: "drm/nouveau/imem: convert to new-style nvkm_subdev" from Aug 20, 2015, leads to the following Smatch complaint: drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c:536 gk20a_instobj_new() error: we previously assumed 'node' could be null (see line 532) drivers/gpu/drm/nouveau/nvkm/subdev/instmem/...
2016 Feb 20
0
[PATCH v4 3/6] iccsense: implement for ina209, ina219 and ina3221
...nouveau/nvkm/engine/device/base.c index a239e49..2536890 100644 --- a/drm/nouveau/nvkm/engine/device/base.c +++ b/drm/nouveau/nvkm/engine/device/base.c @@ -1347,6 +1347,7 @@ nvc0_chipset = { .gpio = g94_gpio_new, .i2c = g94_i2c_new, .ibus = gf100_ibus_new, + .iccsense = gf100_iccsense_new, .imem = nv50_instmem_new, .ltc = gf100_ltc_new, .mc = gf100_mc_new, @@ -1383,6 +1384,7 @@ nvc1_chipset = { .gpio = g94_gpio_new, .i2c = g94_i2c_new, .ibus = gf100_ibus_new, + .iccsense = gf100_iccsense_new, .imem = nv50_instmem_new, .ltc = gf100_ltc_new, .mc = gf100_mc_new, @@ -1418,6 +14...
2017 Dec 08
3
[PATCH] drm/nouveau/imem/nv50: fix incorrect use of refcount API
Commit be55287aa5b ("drm/nouveau/imem/nv50: embed nvkm_instobj directly into nv04_instobj") introduced some new calls to the refcount api to the nv50 mapping code. In one particular instance, it does the following: if (!refcount_inc_not_zero(&iobj->maps)) { ... refcount_inc(&iobj->maps);...
2017 Dec 18
1
[PATCH] drm/nouveau/imem/nv50: fix incorrect use of refcount API
.... Best regards, Pierre [0]: https://github.com/skeggsb/nouveau/commit/9068f1df2394f0e4ab2b2a28cac06b462fe0a0aa On 2017-12-18 — 09:27, Ard Biesheuvel wrote: > On 8 December 2017 at 19:30, Ard Biesheuvel <ard.biesheuvel at linaro.org> wrote: > > Commit be55287aa5b ("drm/nouveau/imem/nv50: embed nvkm_instobj directly > > into nv04_instobj") introduced some new calls to the refcount api to > > the nv50 mapping code. In one particular instance, it does the > > following: > > > > if (!refcount_inc_not_zero(&iobj->maps)) { > >...
2015 Sep 04
4
[PATCH 0/4] tegra: DMA mask and IOMMU bit fixes
These 4 patches fix two issues that existed on Tegra regarding DMA: 1) The bit indicating whether to use an IOMMU or not was hardcoded ; make this a platform property and use it in instmem 2) The DMA mask was not set for platform devices. Fix this by converting more pci_dma* to the DMA API, and use that more generic code to set the DMA mask properly for all platforms. Tested on both x86