Christian König
2019-Nov-20 13:17 UTC
[Nouveau] Move io_reserve_lru handling into the driver
Just a gentle ping on this. Already got the Acked-by from Daniel, but I need some of the nouveau guys to test this since I can only compile test it. Regards, Christian.
Christian König
2019-Nov-20 13:17 UTC
[Nouveau] [PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver
From: Christian König <ckoenig.leichtzumerken at gmail.com>
While working on TTM cleanups I've found that the io_reserve_lru used by
Nouveau is actually not working at all.
In general we should remove driver specific handling from the memory
management, so this patch moves the io_reserve_lru handling into Nouveau
instead.
The patch should be functional correct, but is only compile tested!
Signed-off-by: Christian König <christian.koenig at amd.com>
Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
drivers/gpu/drm/nouveau/nouveau_bo.c | 106 ++++++++++++++++++++------
drivers/gpu/drm/nouveau/nouveau_bo.h | 3 +
drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +
drivers/gpu/drm/nouveau/nouveau_ttm.c | 43 ++++++++++-
4 files changed, 130 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c
b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f8015e0318d7..d144c2550a6f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -137,6 +137,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct nouveau_bo *nvbo = nouveau_bo(bo);
WARN_ON(nvbo->pin_refcnt > 0);
+ nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
/*
@@ -304,6 +305,7 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int
align, u32 flags,
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
+ INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
@@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE);
}
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_del_init(&nvbo->io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@@ -682,8 +704,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev,
uint32_t type,
}
man->func = &nouveau_vram_manager;
- man->io_reserve_fastpath = false;
- man->use_io_reserve_lru = true;
} else {
man->func = &ttm_bo_manager_func;
}
@@ -1312,6 +1332,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool
evict,
if (bo->destroy != nouveau_bo_del_ttm)
return;
+ nouveau_bo_del_io_reserve_lru(bo);
+
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
list_for_each_entry(vma, &nvbo->vma_list, head) {
@@ -1434,6 +1456,30 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo,
struct file *filp)
filp->private_data);
}
+static void
+nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, struct ttm_mem_reg
*reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+
+ if (!reg->bus.base)
+ return; /* already freed */
+
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+ switch (reg->mem_type) {
+ case TTM_PL_TT:
+ if (mem->kind)
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ case TTM_PL_VRAM:
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ default:
+ break;
+ }
+ }
+ reg->bus.base = 0;
+}
+
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{
@@ -1441,18 +1487,26 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *reg)
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg);
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ if (reg->bus.base)
+ return 0; /* already mapped */
reg->bus.addr = NULL;
reg->bus.offset = 0;
reg->bus.size = reg->num_pages << PAGE_SHIFT;
- reg->bus.base = 0;
reg->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+retry:
switch (reg->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
- return 0;
+ ret = 0;
+ goto out;
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
@@ -1476,7 +1530,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *reg)
} args;
u64 handle, length;
u32 argc = 0;
- int ret;
switch (mem->mem.object.oclass) {
case NVIF_CLASS_MEM_NV50:
@@ -1500,38 +1553,45 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *reg)
ret = nvif_object_map_handle(&mem->mem.object,
&args, argc,
&handle, &length);
- if (ret != 1)
- return ret ? ret : -EINVAL;
+ if (ret != 1) {
+ ret = ret ? ret : -EINVAL;
+ goto out;
+ }
+ ret = 0;
reg->bus.base = 0;
reg->bus.offset = handle;
}
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+
+out:
+ if (ret == -EAGAIN) {
+ nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
+ typeof(*nvbo),
+ io_reserve_lru);
+ if (nvbo) {
+ list_del_init(&nvbo->io_reserve_lru);
+ ttm_bo_unmap_virtual(&nvbo->bo);
+ nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+ goto retry;
+ }
+
+ }
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+ return ret;
}
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_mem *mem = nouveau_mem(reg);
- if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
- switch (reg->mem_type) {
- case TTM_PL_TT:
- if (mem->kind)
- nvif_object_unmap_handle(&mem->mem.object);
- break;
- case TTM_PL_VRAM:
- nvif_object_unmap_handle(&mem->mem.object);
- break;
- default:
- break;
- }
- }
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ nouveau_ttm_io_mem_free_locked(drm, reg);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h
b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 38f9d8350963..c47fcdf80ade 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -17,6 +17,7 @@ struct nouveau_bo {
bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
+ struct list_head io_reserve_lru;
/* protected by ttm_bo_reserve() */
struct drm_file *reserved_by;
@@ -92,6 +93,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool
interruptible,
bool no_wait_gpu);
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h
b/drivers/gpu/drm/nouveau/nouveau_drv.h
index da8c46e09943..cd19c8ce5939 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -158,6 +158,8 @@ struct nouveau_drm {
int type_vram;
int type_host[2];
int type_ncoh[2];
+ struct mutex io_reserve_mutex;
+ struct list_head io_reserve_lru;
} ttm;
/* GEM interface support */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c
b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 77a0c6ad3cef..50518b48e9b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -162,13 +162,51 @@ const struct ttm_mem_type_manager_func nv04_gart_manager =
{
.debug = nouveau_manager_debug
};
+static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ nouveau_bo_del_io_reserve_lru(bo);
+
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags &
FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ nouveau_bo_add_io_reserve_lru(bo);
+
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+
+static struct vm_operations_struct nouveau_ttm_vm_ops = {
+ .fault = nouveau_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
+ int ret;
- return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+ ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+ if (ret)
+ return ret;
+
+ vma->vm_ops = &nouveau_ttm_vm_ops;
+ return 0;
}
static int
@@ -273,6 +311,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
+ mutex_init(&drm->ttm.io_reserve_mutex);
+ INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
+
NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available
>> 20));
NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available
>> 20));
return 0;
--
2.20.1
Christian König
2019-Nov-20 13:17 UTC
[Nouveau] [PATCH 2/2] drm/ttm: remove io_reserve_lru handling
From: Christian König <ckoenig.leichtzumerken at gmail.com>
That is not used any more.
Signed-off-by: Christian König <christian.koenig at amd.com>
Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
drivers/gpu/drm/ttm/ttm_bo.c | 37 +++-------
drivers/gpu/drm/ttm/ttm_bo_util.c | 114 +-----------------------------
drivers/gpu/drm/ttm/ttm_bo_vm.c | 33 +++------
include/drm/ttm/ttm_bo_api.h | 5 --
include/drm/ttm/ttm_bo_driver.h | 5 --
5 files changed, 20 insertions(+), 174 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7e7925fecd9e..093f08f9072c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -311,13 +311,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object
*bo,
int ret = 0;
if (old_is_pci || new_is_pci ||
- ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING)
== 0)) {
- ret = ttm_mem_io_lock(old_man, true);
- if (unlikely(ret != 0))
- goto out_err;
- ttm_bo_unmap_virtual_locked(bo);
- ttm_mem_io_unlock(old_man);
- }
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING)
== 0))
+ ttm_bo_unmap_virtual(bo);
/*
* Create and bind a ttm if required.
@@ -651,15 +646,12 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo container_of(kref, struct
ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
if (bo->bdev->driver->release_notify)
bo->bdev->driver->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_lock(man, false);
- ttm_mem_io_free_vm(bo);
- ttm_mem_io_unlock(man);
+ ttm_mem_io_free(bdev, &bo->mem);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
}
@@ -708,8 +700,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.bus.io_reserved_vm = false;
- evict_mem.bus.io_reserved_count = 0;
+ evict_mem.bus.base = 0;
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
@@ -1152,8 +1143,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object
*bo,
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
- mem.bus.io_reserved_vm = false;
- mem.bus.io_reserved_count = 0;
+ mem.bus.base = 0;
/*
* Determine where to move the buffer.
*/
@@ -1298,8 +1288,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
- bo->mem.bus.io_reserved_vm = false;
- bo->mem.bus.io_reserved_count = 0;
+ bo->mem.bus.base = 0;
bo->moving = NULL;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->acc_size = acc_size;
@@ -1745,22 +1734,12 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
return true;
}
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
-
- drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
- ttm_mem_io_free_vm(bo);
-}
-
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- ttm_mem_io_lock(man, false);
- ttm_bo_unmap_virtual_locked(bo);
- ttm_mem_io_unlock(man);
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+ ttm_mem_io_free(bdev, &bo->mem);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2b0e5a088da0..ace31ad65997 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -91,124 +91,30 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
-{
- if (likely(man->io_reserve_fastpath))
- return 0;
-
- if (interruptible)
- return mutex_lock_interruptible(&man->io_reserve_mutex);
-
- mutex_lock(&man->io_reserve_mutex);
- return 0;
-}
-
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
-{
- if (likely(man->io_reserve_fastpath))
- return;
-
- mutex_unlock(&man->io_reserve_mutex);
-}
-
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
-{
- struct ttm_buffer_object *bo;
-
- if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
- return -EAGAIN;
-
- bo = list_first_entry(&man->io_reserve_lru,
- struct ttm_buffer_object,
- io_reserve_lru);
- list_del_init(&bo->io_reserve_lru);
- ttm_bo_unmap_virtual_locked(bo);
-
- return 0;
-}
-
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- int ret = 0;
-
if (!bdev->driver->io_mem_reserve)
return 0;
- if (likely(man->io_reserve_fastpath))
- return bdev->driver->io_mem_reserve(bdev, mem);
-
- if (bdev->driver->io_mem_reserve &&
- mem->bus.io_reserved_count++ == 0) {
-retry:
- ret = bdev->driver->io_mem_reserve(bdev, mem);
- if (ret == -EAGAIN) {
- ret = ttm_mem_io_evict(man);
- if (ret == 0)
- goto retry;
- }
- }
- return ret;
+
+ return bdev->driver->io_mem_reserve(bdev, mem);
}
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- if (likely(man->io_reserve_fastpath))
- return;
-
- if (bdev->driver->io_mem_reserve &&
- --mem->bus.io_reserved_count == 0 &&
- bdev->driver->io_mem_free)
+ if (bdev->driver->io_mem_free)
bdev->driver->io_mem_free(bdev, mem);
-
-}
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- int ret;
-
- if (!mem->bus.io_reserved_vm) {
- struct ttm_mem_type_manager *man -
&bo->bdev->man[mem->mem_type];
-
- ret = ttm_mem_io_reserve(bo->bdev, mem);
- if (unlikely(ret != 0))
- return ret;
- mem->bus.io_reserved_vm = true;
- if (man->use_io_reserve_lru)
- list_add_tail(&bo->io_reserve_lru,
- &man->io_reserve_lru);
- }
- return 0;
-}
-
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_reg *mem = &bo->mem;
-
- if (mem->bus.io_reserved_vm) {
- mem->bus.io_reserved_vm = false;
- list_del_init(&bo->io_reserve_lru);
- ttm_mem_io_free(bo->bdev, mem);
- }
}
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg
*mem,
void **virtual)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
- (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
- ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
@@ -220,9 +126,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev,
struct ttm_mem_reg *m
else
addr = ioremap_nocache(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!addr) {
- (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
@@ -239,9 +143,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev,
struct ttm_mem_reg *
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
- (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -616,8 +518,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_type_manager *man -
&bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
@@ -628,9 +528,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages)
return -EINVAL;
- (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
- ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
@@ -645,10 +543,6 @@ EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
- struct ttm_buffer_object *bo = map->bo;
- struct ttm_mem_type_manager *man -
&bo->bdev->man[bo->mem.mem_type];
-
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
@@ -666,9 +560,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
- (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
- ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index e6495ca2630b..508b28bcfb0e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -185,8 +185,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgoff_t i;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
- struct ttm_mem_type_manager *man - &bdev->man[bo->mem.mem_type];
/*
* Refuse to fault imported pages. This should be handled
@@ -225,24 +223,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(ret != 0))
return ret;
- err = ttm_mem_io_lock(man, true);
+ err = ttm_mem_io_reserve(bdev, &bo->mem);
if (unlikely(err != 0))
- return VM_FAULT_NOPAGE;
- err = ttm_mem_io_reserve_vm(bo);
- if (unlikely(err != 0)) {
- ret = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
+ return VM_FAULT_SIGBUS;
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages)) {
- ret = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
+ if (unlikely(page_offset >= bo->num_pages))
+ return VM_FAULT_SIGBUS;
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
if (!bo->mem.bus.is_iomem) {
@@ -254,10 +245,8 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
};
ttm = bo->ttm;
- if (ttm_tt_populate(bo->ttm, &ctx)) {
- ret = VM_FAULT_OOM;
- goto out_io_unlock;
- }
+ if (ttm_tt_populate(bo->ttm, &ctx))
+ return VM_FAULT_OOM;
} else {
/* Iomem should not be marked encrypted */
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
@@ -273,8 +262,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
- ret = VM_FAULT_OOM;
- goto out_io_unlock;
+ return VM_FAULT_OOM;
} else if (unlikely(!page)) {
break;
}
@@ -292,7 +280,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
if (i == 0)
- goto out_io_unlock;
+ return VM_FAULT_NOPAGE;
else
break;
}
@@ -301,10 +289,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(++page_offset >= page_last))
break;
}
- ret = VM_FAULT_NOPAGE;
-out_io_unlock:
- ttm_mem_io_unlock(man);
- return ret;
+ return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 66ca49db9633..3c082d3d6959 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -62,8 +62,6 @@ struct ttm_lru_bulk_move;
* @is_iomem: is this io memory ?
* @size: size in byte
* @offset: offset from the base address
- * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
- * @io_reserved_count: Refcounting the numbers of callers to
ttm_mem_io_reserve
*
* Structure indicating the bus placement of an object.
*/
@@ -73,11 +71,8 @@ struct ttm_bus_placement {
unsigned long size;
unsigned long offset;
bool is_iomem;
- bool io_reserved_vm;
- uint64_t io_reserved_count;
};
-
/**
* struct ttm_mem_reg
*
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cac7a8a0825a..e294ebb749db 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -624,11 +624,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
*/
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-
/**
* __ttm_bo_reserve:
*
--
2.20.1
Possibly Parallel Threads
- [PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver
- TTM/Nouveau cleanups
- [PATCH 1/3] drm/ttm: make sure that we always zero init mem.bus v2
- Moving LRU handling into Nouveau v3
- [PATCH 1/4] drm: remove optional dummy function from drivers using TTM