Hi guys, so I got some hardware and tested this and after hammering out tons of typos it now seems to work fine. Could you give it more testing? Thanks in advance, Christian
Christian König
2020-Aug-21 16:00 UTC
[Nouveau] [PATCH 1/3] drm/ttm: make sure that we always zero init mem.bus
We are trying to remove the io_lru handling and depend on zero init base and offset here. Signed-off-by: Christian K?nig <christian.koenig at amd.com> --- drivers/gpu/drm/ttm/ttm_bo.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e3931e515906..60b583d07b04 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -650,6 +650,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, evict_mem.mm_node = NULL; evict_mem.bus.io_reserved_vm = false; evict_mem.bus.io_reserved_count = 0; + evict_mem.bus.base = 0; + evict_mem.bus.offset = 0; ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); if (ret) { @@ -1084,6 +1086,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, mem.page_alignment = bo->mem.page_alignment; mem.bus.io_reserved_vm = false; mem.bus.io_reserved_count = 0; + mem.bus.base = 0; + mem.bus.offset = 0; mem.mm_node = NULL; /* @@ -1243,6 +1247,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->mem.page_alignment = page_alignment; bo->mem.bus.io_reserved_vm = false; bo->mem.bus.io_reserved_count = 0; + bo->mem.bus.base = 0; + bo->mem.bus.offset = 0; bo->moving = NULL; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->acc_size = acc_size; -- 2.17.1
Christian König
2020-Aug-21 16:00 UTC
[Nouveau] [PATCH 2/3] drm/nouveau: move io_reserve_lru handling into the driver v4
While working on TTM cleanups I've found that the io_reserve_lru used by Nouveau is actually not working at all. In general we should remove driver specific handling from the memory management, so this patch moves the io_reserve_lru handling into Nouveau instead. v2: don't call ttm_bo_unmap_virtual in nouveau_ttm_io_mem_reserve v3: rebased and use both base and offset in the check v4: fix small typos and test the patch Signed-off-by: Christian K?nig <christian.koenig at amd.com> Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch> --- drivers/gpu/drm/nouveau/nouveau_bo.c | 111 ++++++++++++++++++++------ drivers/gpu/drm/nouveau/nouveau_bo.h | 3 + drivers/gpu/drm/nouveau/nouveau_drv.h | 2 + drivers/gpu/drm/nouveau/nouveau_ttm.c | 44 +++++++++- 4 files changed, 135 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 5392e5fea5d4..ee0e135ddcbb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -137,6 +137,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) struct nouveau_bo *nvbo = nouveau_bo(bo); WARN_ON(nvbo->pin_refcnt > 0); + nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); /* @@ -304,6 +305,7 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_placement_set(nvbo, flags, 0); + INIT_LIST_HEAD(&nvbo->io_reserve_lru); ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, &nvbo->placement, align >> PAGE_SHIFT, false, @@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) PAGE_SIZE, DMA_FROM_DEVICE); } +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); +} + +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_del_init(&nvbo->io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); +} + int nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, bool no_wait_gpu) @@ -888,6 +910,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, if (bo->destroy != nouveau_bo_del_ttm) return; + nouveau_bo_del_io_reserve_lru(bo); + if (mem && new_reg->mem_type != TTM_PL_SYSTEM && mem->mem.page == nvbo->page) { list_for_each_entry(vma, &nvbo->vma_list, head) { @@ -1018,23 +1042,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) filp->private_data); } +static void +nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, + struct ttm_resource *reg) +{ + struct nouveau_mem *mem = nouveau_mem(reg); + + if (!reg->bus.base && !reg->bus.offset) + return; /* already freed */ + + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { + switch (reg->mem_type) { + case TTM_PL_TT: + if (mem->kind) + nvif_object_unmap_handle(&mem->mem.object); + break; + case TTM_PL_VRAM: + nvif_object_unmap_handle(&mem->mem.object); + break; + default: + break; + } + } + reg->bus.base = 0; + reg->bus.offset = 0; +} + static int nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); struct nouveau_mem *mem = nouveau_mem(reg); + int ret; + + if (reg->bus.base || reg->bus.offset) + return 0; /* already mapped */ reg->bus.addr = NULL; - reg->bus.offset = 0; reg->bus.size = reg->num_pages << PAGE_SHIFT; - reg->bus.base = 0; reg->bus.is_iomem = false; + mutex_lock(&drm->ttm.io_reserve_mutex); +retry: switch (reg->mem_type) { case TTM_PL_SYSTEM: /* System memory */ - return 0; + ret = 0; + goto out; case TTM_PL_TT: #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { @@ -1043,9 +1098,12 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) reg->bus.is_iomem = !drm->agp.cma; } #endif - if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || + !mem->kind) { /* untiled */ + ret = 0; break; + } fallthrough; /* tiled memory */ case TTM_PL_VRAM: reg->bus.offset = reg->start << PAGE_SHIFT; @@ -1058,7 +1116,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) } args; u64 handle, length; u32 argc = 0; - int ret; switch (mem->mem.object.oclass) { case NVIF_CLASS_MEM_NV50: @@ -1084,39 +1141,47 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) &handle, &length); if (ret != 1) { if (WARN_ON(ret == 0)) - return -EINVAL; - return ret; + ret = -EINVAL; + goto out; } reg->bus.base = 0; reg->bus.offset = handle; + ret = 0; } break; default: - return -EINVAL; + ret = -EINVAL; } - return 0; + +out: + if (ret == -ENOSPC) { + struct nouveau_bo *nvbo; + + nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, + typeof(*nvbo), + io_reserve_lru); + if (nvbo) { + list_del_init(&nvbo->io_reserve_lru); + drm_vma_node_unmap(&nvbo->bo.base.vma_node, + bdev->dev_mapping); + nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); + goto retry; + } + + } + mutex_unlock(&drm->ttm.io_reserve_mutex); + return ret; } static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nouveau_mem *mem = nouveau_mem(reg); - if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { - switch (reg->mem_type) { - case TTM_PL_TT: - if (mem->kind) - nvif_object_unmap_handle(&mem->mem.object); - break; - case TTM_PL_VRAM: - nvif_object_unmap_handle(&mem->mem.object); - break; - default: - break; - } - } + mutex_lock(&drm->ttm.io_reserve_mutex); + nouveau_ttm_io_mem_free_locked(drm, reg); + mutex_unlock(&drm->ttm.io_reserve_mutex); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index aecb7481df0d..ae90aca33fef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -18,6 +18,7 @@ struct nouveau_bo { bool force_coherent; struct ttm_bo_kmap_obj kmap; struct list_head head; + struct list_head io_reserve_lru; /* protected by ttm_bo_reserve() */ struct drm_file *reserved_by; @@ -96,6 +97,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, bool no_wait_gpu); void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo); void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo); +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo); +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo); /* TODO: submit equivalent to TTM generic API upstream? */ static inline void __iomem * diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index f63ac72aa556..26a2c1090045 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -164,6 +164,8 @@ struct nouveau_drm { int type_vram; int type_host[2]; int type_ncoh[2]; + struct mutex io_reserve_mutex; + struct list_head io_reserve_lru; } ttm; /* GEM interface support */ diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 53c6f8827322..a62f37b1131c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -123,13 +123,51 @@ const struct ttm_resource_manager_func nv04_gart_manager = { .free = nouveau_manager_del, }; +static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ttm_buffer_object *bo = vma->vm_private_data; + pgprot_t prot; + vm_fault_t ret; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + nouveau_bo_del_io_reserve_lru(bo); + + prot = vm_get_page_prot(vma->vm_flags); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + + nouveau_bo_add_io_reserve_lru(bo); + + dma_resv_unlock(bo->base.resv); + + return ret; +} + +static struct vm_operations_struct nouveau_ttm_vm_ops = { + .fault = nouveau_ttm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + int nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv = filp->private_data; struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); + int ret; - return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); + ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev); + if (ret) + return ret; + + vma->vm_ops = &nouveau_ttm_vm_ops; + return 0; } static int @@ -173,7 +211,6 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) } man->func = &nouveau_vram_manager; - man->use_io_reserve_lru = true; ttm_resource_manager_init(man, drm->gem.vram_available >> PAGE_SHIFT); @@ -339,6 +376,9 @@ nouveau_ttm_init(struct nouveau_drm *drm) return ret; } + mutex_init(&drm->ttm.io_reserve_mutex); + INIT_LIST_HEAD(&drm->ttm.io_reserve_lru); + NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); return 0; -- 2.17.1
Christian König
2020-Aug-21 16:00 UTC
[Nouveau] [PATCH 3/3] drm/ttm: remove io_reserve_lru handling
From: Christian K?nig <ckoenig.leichtzumerken at gmail.com> That is not used any more. Signed-off-by: Christian K?nig <christian.koenig at amd.com> Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch> --- drivers/gpu/drm/ttm/ttm_bo.c | 34 ++------- drivers/gpu/drm/ttm/ttm_bo_util.c | 108 +---------------------------- drivers/gpu/drm/ttm/ttm_bo_vm.c | 39 +++-------- drivers/gpu/drm/ttm/ttm_resource.c | 3 - include/drm/ttm/ttm_bo_api.h | 1 - include/drm/ttm/ttm_bo_driver.h | 5 -- include/drm/ttm/ttm_resource.h | 16 ----- 7 files changed, 18 insertions(+), 188 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 60b583d07b04..8257421fc9a0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -263,11 +263,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; - ret = ttm_mem_io_lock(old_man, true); - if (unlikely(ret != 0)) - goto out_err; - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(old_man); + ttm_bo_unmap_virtual(bo); /* * Create and bind a ttm if required. @@ -538,7 +534,6 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -556,9 +551,7 @@ static void ttm_bo_release(struct kref *kref) bo->bdev->driver->release_notify(bo); drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); - ttm_mem_io_lock(man, false); - ttm_mem_io_free_vm(bo); - ttm_mem_io_unlock(man); + ttm_mem_io_free(bdev, &bo->mem); } if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || @@ -648,8 +641,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.bus.io_reserved_vm = false; - evict_mem.bus.io_reserved_count = 0; evict_mem.bus.base = 0; evict_mem.bus.offset = 0; @@ -1084,8 +1075,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - mem.bus.io_reserved_vm = false; - mem.bus.io_reserved_count = 0; mem.bus.base = 0; mem.bus.offset = 0; mem.mm_node = NULL; @@ -1236,7 +1225,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); - INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->type = type; bo->num_pages = num_pages; @@ -1245,8 +1233,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - bo->mem.bus.io_reserved_vm = false; - bo->mem.bus.io_reserved_count = 0; bo->mem.bus.base = 0; bo->mem.bus.offset = 0; bo->moving = NULL; @@ -1551,25 +1537,13 @@ EXPORT_SYMBOL(ttm_bo_device_init); * buffer object vm functions. */ -void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - - drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); - ttm_mem_io_free_vm(bo); -} - void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); - ttm_mem_io_lock(man, false); - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(man); + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); + ttm_mem_io_free(bdev, &bo->mem); } - - EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7b372ede12c2..38dbcae31024 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -91,118 +91,31 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible) -{ - if (likely(!man->use_io_reserve_lru)) - return 0; - - if (interruptible) - return mutex_lock_interruptible(&man->io_reserve_mutex); - - mutex_lock(&man->io_reserve_mutex); - return 0; -} - -void ttm_mem_io_unlock(struct ttm_resource_manager *man) -{ - if (likely(!man->use_io_reserve_lru)) - return; - - mutex_unlock(&man->io_reserve_mutex); -} - -static int ttm_mem_io_evict(struct ttm_resource_manager *man) -{ - struct ttm_buffer_object *bo; - - bo = list_first_entry_or_null(&man->io_reserve_lru, - struct ttm_buffer_object, - io_reserve_lru); - if (!bo) - return -ENOSPC; - - list_del_init(&bo->io_reserve_lru); - ttm_bo_unmap_virtual_locked(bo); - return 0; -} - int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); - int ret; - - if (mem->bus.io_reserved_count++) - return 0; - if (!bdev->driver->io_mem_reserve) return 0; -retry: - ret = bdev->driver->io_mem_reserve(bdev, mem); - if (ret == -ENOSPC) { - ret = ttm_mem_io_evict(man); - if (ret == 0) - goto retry; - } - return ret; + return bdev->driver->io_mem_reserve(bdev, mem); } void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - if (--mem->bus.io_reserved_count) - return; - - if (!bdev->driver->io_mem_free) - return; - - bdev->driver->io_mem_free(bdev, mem); -} - -int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); - struct ttm_resource *mem = &bo->mem; - int ret; - - if (mem->bus.io_reserved_vm) - return 0; - - ret = ttm_mem_io_reserve(bo->bdev, mem); - if (unlikely(ret != 0)) - return ret; - mem->bus.io_reserved_vm = true; - if (man->use_io_reserve_lru) - list_add_tail(&bo->io_reserve_lru, - &man->io_reserve_lru); - return 0; -} - -void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource *mem = &bo->mem; - - if (!mem->bus.io_reserved_vm) - return; - - mem->bus.io_reserved_vm = false; - list_del_init(&bo->io_reserve_lru); - ttm_mem_io_free(bo->bdev, mem); + if (bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); } static int ttm_resource_ioremap(struct ttm_bo_device *bdev, struct ttm_resource *mem, void **virtual) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; *virtual = NULL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); - ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; @@ -216,9 +129,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size); if (!addr) { - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -236,9 +147,7 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev, if (virtual && mem->bus.addr == NULL) iounmap(virtual); - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -452,7 +361,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); - INIT_LIST_HEAD(&fbo->base.io_reserve_lru); fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); @@ -567,8 +475,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_resource_manager *man - ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -579,9 +485,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (start_page > bo->num_pages) return -EINVAL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); - ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -596,10 +500,6 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { - struct ttm_buffer_object *bo = map->bo; - struct ttm_resource_manager *man - ttm_manager_type(bo->bdev, bo->mem.mem_type); - if (!map->virtual) return; switch (map->bo_kmap_type) { @@ -617,9 +517,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(map->bo->bdev, &map->bo->mem); - ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0b805f72d18e..d3dc0682425c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -281,8 +281,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t i; vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; - struct ttm_resource_manager *man - ttm_manager_type(bdev, bo->mem.mem_type); /* * Refuse to fault imported pages. This should be handled @@ -321,24 +319,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(ret != 0)) return ret; - err = ttm_mem_io_lock(man, true); + err = ttm_mem_io_reserve(bdev, &bo->mem); if (unlikely(err != 0)) - return VM_FAULT_NOPAGE; - err = ttm_mem_io_reserve_vm(bo); - if (unlikely(err != 0)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + return VM_FAULT_SIGBUS; page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); page_last = vma_pages(vma) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->num_pages)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + if (unlikely(page_offset >= bo->num_pages)) + return VM_FAULT_SIGBUS; prot = ttm_io_prot(bo->mem.placement, prot); if (!bo->mem.bus.is_iomem) { @@ -350,21 +341,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, }; ttm = bo->ttm; - if (ttm_tt_populate(bo->ttm, &ctx)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; - } + if (ttm_tt_populate(bo->ttm, &ctx)) + return VM_FAULT_OOM; } else { /* Iomem should not be marked encrypted */ prot = pgprot_decrypted(prot); } /* We don't prefault on huge faults. Yet. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { - ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, - fault_page_size, prot); - goto out_io_unlock; - } + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) + return ttm_bo_vm_insert_huge(vmf, bo, page_offset, + fault_page_size, prot); /* * Speculatively prefault a number of pages. Only error on @@ -376,8 +363,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } else { page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; + return VM_FAULT_OOM; } else if (unlikely(!page)) { break; } @@ -404,7 +390,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, /* Never error on prefaulted PTEs */ if (unlikely((ret & VM_FAULT_ERROR))) { if (i == 0) - goto out_io_unlock; + return VM_FAULT_NOPAGE; else break; } @@ -413,9 +399,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(++page_offset >= page_last)) break; } - ret = VM_FAULT_NOPAGE; -out_io_unlock: - ttm_mem_io_unlock(man); return ret; } EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 33b642532e5c..3a4602f9a03b 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -65,10 +65,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, { unsigned i; - man->use_io_reserve_lru = false; - mutex_init(&man->io_reserve_mutex); spin_lock_init(&man->move_lock); - INIT_LIST_HEAD(&man->io_reserve_lru); man->size = p_size; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 6c580987ba16..36ff64e2736c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -151,7 +151,6 @@ struct ttm_buffer_object { struct list_head lru; struct list_head ddestroy; struct list_head swap; - struct list_head io_reserve_lru; /** * Members protected by a bo reservation. diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index bc8d0ebb7568..9744ed9d677e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -441,11 +441,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); */ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); -int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); -void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); -int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible); -void ttm_mem_io_unlock(struct ttm_resource_manager *man); - /** * ttm_bo_reserve: * diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index bac22a56f6cd..555ccf14f8b6 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -113,10 +113,6 @@ struct ttm_resource_manager_func { * @default_caching: The default caching policy used for a buffer object * placed in this memory type if the user doesn't provide one. * @func: structure pointer implementing the range manager. See above - * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures - * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions - * reserved by the TTM vm system. - * @io_reserve_lru: Optional lru list for unreserving io mem regions. * @move_lock: lock for move fence * static information. bdev::driver::io_mem_free is never used. * @lru: The lru list for this memory type. @@ -134,16 +130,8 @@ struct ttm_resource_manager { uint32_t available_caching; uint32_t default_caching; const struct ttm_resource_manager_func *func; - struct mutex io_reserve_mutex; - bool use_io_reserve_lru; spinlock_t move_lock; - /* - * Protected by @io_reserve_mutex: - */ - - struct list_head io_reserve_lru; - /* * Protected by the global->lru_lock. */ @@ -164,8 +152,6 @@ struct ttm_resource_manager { * @is_iomem: is this io memory ? * @size: size in byte * @offset: offset from the base address - * @io_reserved_vm: The VM system has a refcount in @io_reserved_count - * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve * * Structure indicating the bus placement of an object. */ @@ -175,8 +161,6 @@ struct ttm_bus_placement { unsigned long size; unsigned long offset; bool is_iomem; - bool io_reserved_vm; - uint64_t io_reserved_count; }; /** -- 2.17.1
I've also given this a spin here, seems to be okay. I've read the patches and they all seem fine. Reviewed-by: Dave Airlie <airlied at redhat.com> for all 3 patches. Dave. On Sat, 22 Aug 2020 at 02:00, Christian K?nig <ckoenig.leichtzumerken at gmail.com> wrote:> > Hi guys, > > so I got some hardware and tested this and after hammering out tons of typos it now seems to work fine. > > Could you give it more testing? > > Thanks in advance, > Christian > >
Dave Airlie
2020-Aug-26 01:29 UTC
[Nouveau] [PATCH 2/3] drm/nouveau: move io_reserve_lru handling into the driver v4
On Sat, 22 Aug 2020 at 02:01, Christian K?nig <ckoenig.leichtzumerken at gmail.com> wrote:> > While working on TTM cleanups I've found that the io_reserve_lru used by > Nouveau is actually not working at all. > > In general we should remove driver specific handling from the memory > management, so this patch moves the io_reserve_lru handling into Nouveau > instead. > > v2: don't call ttm_bo_unmap_virtual in nouveau_ttm_io_mem_reserve > v3: rebased and use both base and offset in the check > v4: fix small typos and test the patch > > Signed-off-by: Christian K?nig <christian.koenig at amd.com> > Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch> > --- > drivers/gpu/drm/nouveau/nouveau_bo.c | 111 ++++++++++++++++++++------ > drivers/gpu/drm/nouveau/nouveau_bo.h | 3 + > drivers/gpu/drm/nouveau/nouveau_drv.h | 2 + > drivers/gpu/drm/nouveau/nouveau_ttm.c | 44 +++++++++- > 4 files changed, 135 insertions(+), 25 deletions(-) > > diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c > index 5392e5fea5d4..ee0e135ddcbb 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_bo.c > +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c > @@ -137,6 +137,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) > struct nouveau_bo *nvbo = nouveau_bo(bo); > > WARN_ON(nvbo->pin_refcnt > 0); > + nouveau_bo_del_io_reserve_lru(bo); > nv10_bo_put_tile_region(dev, nvbo->tile, NULL); > > /* > @@ -304,6 +305,7 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, > > nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; > nouveau_bo_placement_set(nvbo, flags, 0); > + INIT_LIST_HEAD(&nvbo->io_reserve_lru); > > ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, > &nvbo->placement, align >> PAGE_SHIFT, false, > @@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) > PAGE_SIZE, DMA_FROM_DEVICE); > } > > +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) > +{ > + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); > + struct nouveau_bo *nvbo = nouveau_bo(bo); > + > + mutex_lock(&drm->ttm.io_reserve_mutex); > + list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); > + mutex_unlock(&drm->ttm.io_reserve_mutex); > +} > + > +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) > +{ > + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); > + struct nouveau_bo *nvbo = nouveau_bo(bo); > + > + mutex_lock(&drm->ttm.io_reserve_mutex); > + list_del_init(&nvbo->io_reserve_lru); > + mutex_unlock(&drm->ttm.io_reserve_mutex); > +} > + > int > nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, > bool no_wait_gpu) > @@ -888,6 +910,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, > if (bo->destroy != nouveau_bo_del_ttm) > return; > > + nouveau_bo_del_io_reserve_lru(bo); > + > if (mem && new_reg->mem_type != TTM_PL_SYSTEM && > mem->mem.page == nvbo->page) { > list_for_each_entry(vma, &nvbo->vma_list, head) { > @@ -1018,23 +1042,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) > filp->private_data); > } > > +static void > +nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, > + struct ttm_resource *reg) > +{ > + struct nouveau_mem *mem = nouveau_mem(reg); > + > + if (!reg->bus.base && !reg->bus.offset) > + return; /* already freed */ > + > + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { > + switch (reg->mem_type) { > + case TTM_PL_TT: > + if (mem->kind) > + nvif_object_unmap_handle(&mem->mem.object); > + break; > + case TTM_PL_VRAM: > + nvif_object_unmap_handle(&mem->mem.object); > + break; > + default: > + break; > + } > + } > + reg->bus.base = 0; > + reg->bus.offset = 0; > +} > + > static int > nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) > { > struct nouveau_drm *drm = nouveau_bdev(bdev); > struct nvkm_device *device = nvxx_device(&drm->client.device); > struct nouveau_mem *mem = nouveau_mem(reg); > + int ret; > + > + if (reg->bus.base || reg->bus.offset) > + return 0; /* already mapped */ > > reg->bus.addr = NULL; > - reg->bus.offset = 0; > reg->bus.size = reg->num_pages << PAGE_SHIFT; > - reg->bus.base = 0; > reg->bus.is_iomem = false; > > + mutex_lock(&drm->ttm.io_reserve_mutex); > +retry: > switch (reg->mem_type) { > case TTM_PL_SYSTEM: > /* System memory */ > - return 0; > + ret = 0; > + goto out; > case TTM_PL_TT: > #if IS_ENABLED(CONFIG_AGP) > if (drm->agp.bridge) { > @@ -1043,9 +1098,12 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) > reg->bus.is_iomem = !drm->agp.cma; > } > #endif > - if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) > + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || > + !mem->kind) { > /* untiled */ > + ret = 0; > break; > + } > fallthrough; /* tiled memory */ > case TTM_PL_VRAM: > reg->bus.offset = reg->start << PAGE_SHIFT; > @@ -1058,7 +1116,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) > } args; > u64 handle, length; > u32 argc = 0; > - int ret; > > switch (mem->mem.object.oclass) { > case NVIF_CLASS_MEM_NV50: > @@ -1084,39 +1141,47 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) > &handle, &length); > if (ret != 1) { > if (WARN_ON(ret == 0)) > - return -EINVAL; > - return ret; > + ret = -EINVAL; > + goto out; > } > > reg->bus.base = 0; > reg->bus.offset = handle; > + ret = 0; > } > break; > default: > - return -EINVAL; > + ret = -EINVAL; > } > - return 0; > + > +out: > + if (ret == -ENOSPC) { > + struct nouveau_bo *nvbo; > + > + nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, > + typeof(*nvbo), > + io_reserve_lru); > + if (nvbo) { > + list_del_init(&nvbo->io_reserve_lru); > + drm_vma_node_unmap(&nvbo->bo.base.vma_node, > + bdev->dev_mapping); > + nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); > + goto retry; > + } > + > + } > + mutex_unlock(&drm->ttm.io_reserve_mutex); > + return ret; > } > > static void > nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) > { > struct nouveau_drm *drm = nouveau_bdev(bdev); > - struct nouveau_mem *mem = nouveau_mem(reg); > > - if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { > - switch (reg->mem_type) { > - case TTM_PL_TT: > - if (mem->kind) > - nvif_object_unmap_handle(&mem->mem.object); > - break; > - case TTM_PL_VRAM: > - nvif_object_unmap_handle(&mem->mem.object); > - break; > - default: > - break; > - } > - } > + mutex_lock(&drm->ttm.io_reserve_mutex); > + nouveau_ttm_io_mem_free_locked(drm, reg); > + mutex_unlock(&drm->ttm.io_reserve_mutex); > } > > static int > diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h > index aecb7481df0d..ae90aca33fef 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_bo.h > +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h > @@ -18,6 +18,7 @@ struct nouveau_bo { > bool force_coherent; > struct ttm_bo_kmap_obj kmap; > struct list_head head; > + struct list_head io_reserve_lru; > > /* protected by ttm_bo_reserve() */ > struct drm_file *reserved_by; > @@ -96,6 +97,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, > bool no_wait_gpu); > void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo); > void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo); > +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo); > +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo); > > /* TODO: submit equivalent to TTM generic API upstream? */ > static inline void __iomem * > diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h > index f63ac72aa556..26a2c1090045 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_drv.h > +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h > @@ -164,6 +164,8 @@ struct nouveau_drm { > int type_vram; > int type_host[2]; > int type_ncoh[2]; > + struct mutex io_reserve_mutex; > + struct list_head io_reserve_lru; > } ttm; > > /* GEM interface support */ > diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c > index 53c6f8827322..a62f37b1131c 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c > +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c > @@ -123,13 +123,51 @@ const struct ttm_resource_manager_func nv04_gart_manager = { > .free = nouveau_manager_del, > }; > > +static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) > +{ > + struct vm_area_struct *vma = vmf->vma; > + struct ttm_buffer_object *bo = vma->vm_private_data; > + pgprot_t prot; > + vm_fault_t ret; > + > + ret = ttm_bo_vm_reserve(bo, vmf); > + if (ret) > + return ret; > + > + nouveau_bo_del_io_reserve_lru(bo); > + > + prot = vm_get_page_prot(vma->vm_flags); > + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); > + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) > + return ret; > + > + nouveau_bo_add_io_reserve_lru(bo); > + > + dma_resv_unlock(bo->base.resv); > + > + return ret; > +} > + > +static struct vm_operations_struct nouveau_ttm_vm_ops = {should this be const? Dave.
Seemingly Similar Threads
- [PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver v2
- [PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver
- [PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver v2
- [PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver v2
- [PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver