Displaying 6 results from an estimated 6 matches for "req_obj".
2025 Feb 03
1
[PATCH 0/4] drm/gpuvm: Add support for single-page-filled mappings
...;
+ op.map.gem.range = gem_range;
return fn->sm_step_map(&op, priv);
}
@@ -2102,7 +2103,8 @@ static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ struct drm_gem_object *req_obj,
+ u64 req_offset, u64 req_gem_range)
{
struct drm_gpuva *va, *next;
u64 req_end = req_addr + req_range;
@@ -2237,7 +2239,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
return op_map_cb(ops, priv,...
2025 Feb 02
1
[PATCH 0/4] drm/gpuvm: Add support for single-page-filled mappings
Hi Lina,
On Sun, Feb 02, 2025 at 10:34:49PM +0900, Asahi Lina wrote:
> Some hardware requires dummy page mappings to efficiently implement
> Vulkan sparse features. These mappings consist of the same physical
> memory page, repeated for a large range of address space (e.g. 16GiB).
>
> Add support for this to drm_gpuvm. Currently, drm_gpuvm expects BO
> ranges to correspond 1:1
2025 Feb 03
1
[PATCH 0/4] drm/gpuvm: Add support for single-page-filled mappings
...;
> return fn->sm_step_map(&op, priv);
> }
> @@ -2102,7 +2103,8 @@ static int
> __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> const struct drm_gpuvm_ops *ops, void *priv,
> u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + struct drm_gem_object *req_obj,
> + u64 req_offset, u64 req_gem_range)
> {
> struct drm_gpuva *va, *next;
> u64 req_end = req_addr + req_range;
> @@ -2237,7 +2239,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>
>...
2023 Jun 29
3
[PATCH drm-next v6 02/13] drm: manager to keep track of GPUs VA mappings
...DRM_GPUVA_OP_UNMAP;
+ op.unmap.va = va;
+ op.unmap.keep = merge;
+
+ return fn->sm_step_unmap(&op, priv);
+}
+
+static int
+__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
+ const struct drm_gpuva_fn_ops *ops, void *priv,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ struct drm_gpuva *va, *next, *prev = NULL;
+ u64 req_end = req_addr + req_range;
+ int ret;
+
+ if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+ return -EINVAL;
+
+ drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+ struct drm_gem_object...
2023 Jul 13
1
[PATCH drm-next v7 02/13] drm: manager to keep track of GPUs VA mappings
...DRM_GPUVA_OP_UNMAP;
+ op.unmap.va = va;
+ op.unmap.keep = merge;
+
+ return fn->sm_step_unmap(&op, priv);
+}
+
+static int
+__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
+ const struct drm_gpuva_fn_ops *ops, void *priv,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ struct drm_gpuva *va, *next, *prev = NULL;
+ u64 req_end = req_addr + req_range;
+ int ret;
+
+ if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+ return -EINVAL;
+
+ drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+ struct drm_gem_object...
2023 Jul 20
2
[PATCH drm-misc-next v8 01/12] drm: manager to keep track of GPUs VA mappings
...DRM_GPUVA_OP_UNMAP;
+ op.unmap.va = va;
+ op.unmap.keep = merge;
+
+ return fn->sm_step_unmap(&op, priv);
+}
+
+static int
+__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
+ const struct drm_gpuva_fn_ops *ops, void *priv,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ struct drm_gpuva *va, *next, *prev = NULL;
+ u64 req_end = req_addr + req_range;
+ int ret;
+
+ if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+ return -EINVAL;
+
+ drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+ struct drm_gem_object...