aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>2025-08-21 23:00:48 +0530
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>2025-08-26 11:25:35 +0530
commit29c39c56a08b238d1a069cd0e16adfbef30c4968 (patch)
tree66272df47e90e3b61b86c443f4f45ba56d9bca84 /drivers/gpu/drm
parentdrm/xe/vma: Move pat_index to vma attributes (diff)
downloadlinux-29c39c56a08b238d1a069cd0e16adfbef30c4968.tar.gz
linux-29c39c56a08b238d1a069cd0e16adfbef30c4968.zip
drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
This change simplifies the logic by ensuring that remapped previous or next VMAs are created with the same memory attributes as the original VMA. By passing struct xe_vma_mem_attr as a parameter, we maintain consistency in memory attributes. -v2 *dst = *src (Matthew Brost) -v3 (Matthew Brost) Drop unnecessary helper pass attr ptr as input to new_vma and vma_create Cc: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250821173104.3030148-5-himal.prasad.ghimiray@intel.com Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index bb831629d149..12b478e7ef01 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1168,7 +1168,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
u64 start, u64 end,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr,
+ unsigned int flags)
{
struct xe_vma *vma;
struct xe_tile *tile;
@@ -1223,7 +1224,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (vm->xe->info.has_atomic_enable_pte_bit)
vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
- vma->attr.pat_index = pat_index;
+ vma->attr = *attr;
if (bo) {
struct drm_gpuvm_bo *vm_bo;
@@ -2475,7 +2476,7 @@ unwind_prefetch_ops:
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr, unsigned int flags)
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct drm_exec exec;
@@ -2504,7 +2505,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
}
vma = xe_vma_create(vm, bo, op->gem.offset,
op->va.addr, op->va.addr +
- op->va.range - 1, pat_index, flags);
+ op->va.range - 1, attr, flags);
if (IS_ERR(vma))
goto err_unlock;
@@ -2647,6 +2648,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ .pat_index = op->map.pat_index,
+ };
+
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
@@ -2656,7 +2666,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
flags |= op->map.is_cpu_addr_mirror ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
- vma = new_vma(vm, &op->base.map, op->map.pat_index,
+ vma = new_vma(vm, &op->base.map, &default_attr,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2704,7 +2714,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
- old->attr.pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2734,7 +2744,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.next) {
vma = new_vma(vm, op->base.remap.next,
- old->attr.pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);