search for: nvkm_memory_size

Displaying 15 results from an estimated 15 matches for "nvkm_memory_size".

2015 Nov 09
2
[PATCH] instmem/gk20a: fix race conditions
...+static void +gk20a_instobj_recycle_vaddr(struct gk20a_instobj *obj) +{ + struct gk20a_instmem *imem = obj->imem; + /* there should not be any user left... */ + WARN_ON(obj->use_cpt); + list_del(&obj->vaddr_node); + vunmap(obj->vaddr); + obj->vaddr = NULL; + imem->vaddr_use -= nvkm_memory_size(&obj->memory); + nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, + imem->vaddr_max); +} + +/* + * Must be called while holding gk20a_instmem::lock */ static void gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) { whil...
2015 Nov 11
2
[PATCH] instmem/gk20a: use DMA API CPU mapping
...return (u64)gk20a_instobj(memory)->mem.size << 12; } -static void __iomem * -gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory) -{ - struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); - struct device *dev = node->base.imem->base.subdev.device->dev; - int npages = nvkm_memory_size(memory) >> 12; - struct page *pages[npages]; - int i; - - /* phys_to_page does not exist on all platforms... */ - pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); - for (i = 1; i < npages; i++) - pages[i] = pages[0] + i; - - return vmap(pages, npages, VM_MAP,...
2015 Oct 26
2
[PATCH] instmem/gk20a: exclusively acquire instobjs
...CPU mappings LRU */ unsigned int vaddr_use; @@ -188,12 +189,11 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) struct gk20a_instobj *node = gk20a_instobj(memory); struct gk20a_instmem *imem = node->imem; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; - const u64 size = nvkm_memory_size(memory); - unsigned long flags; + u64 size; nvkm_ltc_flush(ltc); - spin_lock_irqsave(&imem->lock, flags); + spin_lock_irqsave(&imem->lock, imem->flags); if (node->vaddr) { /* remove us from the LRU list since we cannot be unmapped */ @@ -202,6 +202,8 @@ gk20a_instob...
2015 Nov 11
0
[PATCH] instmem/gk20a: use DMA API CPU mapping
...;mem.size << 12; > } > > -static void __iomem * > -gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory) > -{ > - struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); > - struct device *dev = node->base.imem->base.subdev.device->dev; > - int npages = nvkm_memory_size(memory) >> 12; > - struct page *pages[npages]; > - int i; > - > - /* phys_to_page does not exist on all platforms... */ > - pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); > - for (i = 1; i < npages; i++) > - pages[i] = pages[0] + i; &gt...
2015 Nov 11
0
[PATCH] instmem/gk20a: fix race conditions
...r(struct gk20a_instobj *obj) > +{ > + struct gk20a_instmem *imem = obj->imem; > + /* there should not be any user left... */ > + WARN_ON(obj->use_cpt); > + list_del(&obj->vaddr_node); > + vunmap(obj->vaddr); > + obj->vaddr = NULL; > + imem->vaddr_use -= nvkm_memory_size(&obj->memory); > + nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, > + imem->vaddr_max); > +} > + > +/* > + * Must be called while holding gk20a_instmem::lock > */ > static void > gk20a_instmem_vaddr_gc(struct g...
2023 Dec 08
1
[PATCH] drm/nouveau: Fixup gk20a instobj hierarchy
...), struct gk20a_instobj, base.memory) /* * Used for objects allocated using the DMA API @@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) list_del(&obj->vaddr_node); vunmap(obj->base.vaddr); obj->base.vaddr = NULL; - imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); + imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, imem->vaddr_max); } @@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset...
2015 Nov 04
0
[PATCH] instmem/gk20a: exclusively acquire instobjs
...signed int vaddr_use; > @@ -188,12 +189,11 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) > struct gk20a_instobj *node = gk20a_instobj(memory); > struct gk20a_instmem *imem = node->imem; > struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; > - const u64 size = nvkm_memory_size(memory); > - unsigned long flags; > + u64 size; > > nvkm_ltc_flush(ltc); > > - spin_lock_irqsave(&imem->lock, flags); > + spin_lock_irqsave(&imem->lock, imem->flags); > > if (node->vaddr) { > /* remove us from the LRU list since we cann...
2017 Jan 30
2
[PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex
...pinlock_t lock; + struct mutex lock; /* CPU mappings LRU */ unsigned int vaddr_use; @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) struct gk20a_instmem *imem = node->base.imem; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; const u64 size = nvkm_memory_size(memory); - unsigned long flags; nvkm_ltc_flush(ltc); - spin_lock_irqsave(&imem->lock, flags); + mutex_lock(&imem->lock); if (node->base.vaddr) { if (!node->use_cpt) { @@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) out: node->use_...
2023 Dec 14
1
[PATCH] drm/nouveau: Fixup gk20a instobj hierarchy
...> /* > * Used for objects allocated using the DMA API > @@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) > list_del(&obj->vaddr_node); > vunmap(obj->base.vaddr); > obj->base.vaddr = NULL; > - imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); > + imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); > nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, > imem->vaddr_max); > } > @@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvk...
2017 Feb 24
1
[PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex
...PU mappings LRU */ >> unsigned int vaddr_use; >> @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) >> struct gk20a_instmem *imem = node->base.imem; >> struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; >> const u64 size = nvkm_memory_size(memory); >> - unsigned long flags; >> >> nvkm_ltc_flush(ltc); >> >> - spin_lock_irqsave(&imem->lock, flags); >> + mutex_lock(&imem->lock); >> >> if (node->base.vaddr) { >> if (!node->use_cpt) { >> @@ -216,7 +215,...
2019 Nov 08
1
[PATCH] RFC: drm/nouveau: Make BAR1 support optional
...struct nvkm_device *device = subdev->device; - struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); int engn, runl, pbid, ret, i, j; enum nvkm_devidx engidx; u32 *map; @@ -967,12 +966,19 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) if (ret) return ret; - ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), - &fifo->user.bar); - if (ret) - return ret; + if (device->bar) { + struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); + + ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), + &fifo->user.bar); + if (ret) + return ret; + + return n...
2017 Feb 23
0
[PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex
...; > > /* CPU mappings LRU */ > unsigned int vaddr_use; > @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) > struct gk20a_instmem *imem = node->base.imem; > struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; > const u64 size = nvkm_memory_size(memory); > - unsigned long flags; > > nvkm_ltc_flush(ltc); > > - spin_lock_irqsave(&imem->lock, flags); > + mutex_lock(&imem->lock); > > if (node->base.vaddr) { > if (!node->use_cpt) { > @@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(s...
2018 Mar 10
17
[RFC PATCH 00/13] SVM (share virtual memory) with HMM in nouveau
From: Jérôme Glisse <jglisse at redhat.com> (mm is cced just to allow exposure of device driver work without ccing a long list of peoples. I do not think there is anything usefull to discuss from mm point of view but i might be wrong, so just for the curious :)). git://people.freedesktop.org/~glisse/linux branch: nouveau-hmm-v00
2016 Jan 18
6
[PATCH v2 0/5] nouveau: add secure boot support for dGPU and Tegra
This is a highly changed revision of the first patch series that adds secure boot support to Nouveau. This code still depends on NVIDIA releasing official firmware files, but the files released with SHIELD TV and Pixel C can already be used on a Jetson TX1. As you know we are working hard to release the official firmware files, however in the meantime it doesn't hurt to review the code so it
2016 Feb 24
11
[PATCH v3 00/11] nouveau: add secure boot support for dGPU and Tegra
New version of the secure boot code that works with the blobs just merged into linux-firmware. Since the required Mesa patches are also merged, this set is the last piece of the puzzle to get out-of-the-box accelerated Maxwell 2. The basic code remains the same, with a few improvements with respect to how secure falcons are started. Hopefully the patchset is better split too. I have a