Displaying 20 results from an estimated 34 matches for "nvkm_done".
2018 May 24
3
[PATCH] drm/nouveau/secboot/acr: Remove VLA usage
...unc =
acr->func->ls_func[_img->falcon_id];
- u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -447,6 +460,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
nvkm_done(wpr_blob);
+ kfree(gdesc);
+
return 0;
}
@@ -771,7 +786,11 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
struct fw_bl_desc *hsbl_desc;
void *bl, *blob_data, *hsbl_code, *hsbl_data;
u32 code_size;
- u8 bl_desc[bl_desc_size];
+ u8 *bl_desc;
+
+ bl_desc = kzalloc(bl_...
2020 Jul 01
0
[PATCH v3 3/5] nouveau: fix mapping 2MB sysmem pages
...+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+
+ if ((data & (3ULL << 1)) != 0) {
+ addr = (data >> 8) << 12;
+ dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ bool dma = false;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+ u32 datahi = nvkm_ro32(p...
2016 Mar 01
1
[PATCH 1/2] fifo/gf100: take runlist target into account
...struct gf100_fifo *fifo)
struct nvkm_device *device = subdev->device;
struct nvkm_memory *cur;
int nr = 0;
+ int target;
mutex_lock(&subdev->mutex);
cur = fifo->runlist.mem[fifo->runlist.active];
@@ -67,7 +68,10 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
}
nvkm_done(cur);
- nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+ target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
+
+ nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
+ (target << 28));
nvkm_wr32(device, 0x002274, 0x01f00000 | nr);...
2020 Jul 06
0
[PATCH 1/5] nouveau: fix storing invalid ptes
...+
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
@@ -100,7 +104,6 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
}
VMM_WO064(pt, vmm, ptei++ * 8, data);
- map->pfn++;
}
nvkm_done(pt->memory);
}
@@ -310,9 +313,12 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
dma_addr_t addr;
nvkm_kmap(pt->memory);
- while (ptes--) {
+ for (; ptes; ptes--, map->pfn++) {
u64 data = 0;
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (...
2020 Jul 13
0
[PATCH v2 1/5] nouveau: fix storing invalid ptes
...+
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
@@ -100,7 +104,6 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
}
VMM_WO064(pt, vmm, ptei++ * 8, data);
- map->pfn++;
}
nvkm_done(pt->memory);
}
@@ -310,9 +313,12 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
dma_addr_t addr;
nvkm_kmap(pt->memory);
- while (ptes--) {
+ for (; ptes; ptes--, map->pfn++) {
u64 data = 0;
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (...
2020 Jul 23
0
[PATCH v4 1/6] nouveau: fix storing invalid ptes
...+
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
@@ -100,7 +104,6 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
}
VMM_WO064(pt, vmm, ptei++ * 8, data);
- map->pfn++;
}
nvkm_done(pt->memory);
}
@@ -310,9 +313,12 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
dma_addr_t addr;
nvkm_kmap(pt->memory);
- while (ptes--) {
+ for (; ptes; ptes--, map->pfn++) {
u64 data = 0;
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (...
2020 Apr 24
1
[PATCH] drm/nouveau/mmu: Remove unneeded semicolon
...\
PTEI += _ptes; \
PTEN -= _ptes; \
- }; \
+ } \
nvkm_done((PT)->memory); \
} while(0)
--
2.26.0.106.g9fadedd
2019 Dec 18
1
[PATCH v2] drm/nouveau/mmu: Remove unneeded semicolon
...\
PTEI += _ptes; \
PTEN -= _ptes; \
- }; \
+ } \
nvkm_done((PT)->memory); \
} while(0)
--
2.7.4
2019 Dec 16
1
[PATCH] drm/nouveau/mmu: Remove unneeded semicolon
...\
PTEI += _ptes; \
PTEN -= _ptes; \
- }; \
+ } \
nvkm_done((PT)->memory); \
} while(0)
--
2.7.4
2018 Jun 22
0
[PATCH] drm/nouveau/secboot/acr: Remove VLA usage
...or the changes in the r367 file.
> nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
> sizeof(img->wpr_header));
> @@ -447,6 +460,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
>
> nvkm_done(wpr_blob);
>
> + kfree(gdesc);
> +
> return 0;
> }
>
> @@ -771,7 +786,11 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
> struct fw_bl_desc *hsbl_desc;
> void *bl, *blob_data, *hsbl_code, *hsbl_data;
> u32 cod...
2020 Jul 01
8
[PATCH v3 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_pfn_to_map_order()
function. This allows a device driver to know that a given 4K PFN is
actually mapped by the CPU using a larger sized CPU page table entry and
therefore the device driver can safely map system memory using larger
device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for Jason Gunthorpe's
hmm tree. These were
2019 Nov 08
1
[PATCH] RFC: drm/nouveau: Make BAR1 support optional
...;rd32()/->wr32() callbacks nor unmapping via ->unmap(). That means we
effectively don't have a good point where we could emit the memory
barriers.
I see two possibilities here: 1) make all accesses go through the
accessors or 2) guard each series of accesses with a pair of nvkm_map()
and nvkm_done() calls. Both of those would mean that all code paths need
to be carefully audited.
One other thing I'm wondering is if it's okay to put all of this into
the gk104_fifo implementation. I think the result of parameterizing on
device->bar is pretty neat. Also, it seems like most of the re...
2019 Dec 18
0
[PATCH v2] drm/nouveau/mmu: Remove unneeded semicolon
...+= _ptes; \
> PTEN -= _ptes; \
> - }; \
> + } \
> nvkm_done((PT)->memory); \
> } while(0)
>
> --
> 2.7.4
>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 833 bytes
Desc: not available
URL: <https://li...
2020 May 04
0
[PATCH] drm/nouveau/mmu: remove unneeded semicolon
...\
PTEI += _ptes; \
PTEN -= _ptes; \
- }; \
+ } \
nvkm_done((PT)->memory); \
} while(0)
--
2.21.1
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output
array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to
know that a given 4K PFN is actually mapped by the CPU using either a
PMD sized or PUD sized CPU page table entry and therefore the device
driver can safely map system memory using larger device MMU PTEs.
The series is based on 5.8.0-rc3 and is intended for
2020 Jul 06
8
[PATCH 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2020 Jul 21
6
[PATCH v3 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2020 Jul 13
9
[PATCH v2 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2020 Jun 19
0
[PATCH 10/16] nouveau/hmm: support mapping large sysmem pages
...L << 1; /* SYSTEM_COHERENT_MEMORY. */
@@ -99,7 +106,7 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
data |= BIT_ULL(0); /* VALID. */
}
- VMM_WO064(pt, vmm, ptei++ * 8, data);
+ VMM_WO064(pt, vmm, ptei++ << pt->ptei_shift, data);
map->pfn++;
}
nvkm_done(pt->memory);
@@ -264,6 +271,9 @@ gp100_vmm_desc_pd0 = {
.sparse = gp100_vmm_pd0_sparse,
.pde = gp100_vmm_pd0_pde,
.mem = gp100_vmm_pd0_mem,
+ .pfn = gp100_vmm_pgt_pfn,
+ .pfn_clear = gp100_vmm_pfn_clear,
+ .pfn_unmap = gp100_vmm_pfn_unmap,
};
static void
@@ -286,6 +296,9 @@ gp100_vmm_d...
2020 Jul 23
9
[PATCH v4 0/6] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is