aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2025-06-29 13:13:05 -0700
committerRob Clark <robin.clark@oss.qualcomm.com>2025-07-04 17:48:36 -0700
commitfeb8ef4636a457a1fd916a3ae575f552935e69b9 (patch)
treee8a5ad9c03613d716cc68aba1d6bad6ef2db316a /drivers/gpu
parentdrm/msm: Lazily create context VM (diff)
downloadlinux-feb8ef4636a457a1fd916a3ae575f552935e69b9.tar.gz
linux-feb8ef4636a457a1fd916a3ae575f552935e69b9.zip
drm/msm: Add opt-in for VM_BIND
Add a SET_PARAM for userspace to request to manage to the VM itself, instead of getting a kernel managed VM. In order to transition to a userspace managed VM, this param must be set before any mappings are created. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661494/
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h29
6 files changed, 75 insertions, 8 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 7364b7e9c266..62b5f294a2aa 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2276,7 +2276,7 @@ a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
}
static struct drm_gpuvm *
-a6xx_create_private_vm(struct msm_gpu *gpu)
+a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
{
struct msm_mmu *mmu;
@@ -2286,7 +2286,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START,
- adreno_private_vm_size(gpu), true);
+ adreno_private_vm_size(gpu), kernel_managed);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2baf381ea401..ff25e3dada04 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -504,6 +504,21 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
if (!capable(CAP_SYS_ADMIN))
return UERR(EPERM, drm, "invalid permissions");
return msm_context_set_sysprof(ctx, gpu, value);
+ case MSM_PARAM_EN_VM_BIND:
+ /* We can only support VM_BIND with per-process pgtables: */
+ if (ctx->vm == gpu->vm)
+ return UERR(EINVAL, drm, "requires per-process pgtables");
+
+ /*
+ * We can only swtich to VM_BIND mode if the VM has not yet
+ * been created:
+ */
+ if (ctx->vm)
+ return UERR(EBUSY, drm, "VM already created");
+
+ ctx->userspace_managed_vm = value;
+
+ return 0;
default:
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index cf0805ed4c75..d75753745f12 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -232,9 +232,21 @@ static void load_gpu(struct drm_device *dev)
*/
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
{
+ static DEFINE_MUTEX(init_lock);
struct msm_drm_private *priv = dev->dev_private;
- if (!ctx->vm)
- ctx->vm = msm_gpu_create_private_vm(priv->gpu, current);
+
+ /* Once ctx->vm is created it is valid for the lifetime of the context: */
+ if (ctx->vm)
+ return ctx->vm;
+
+ mutex_lock(&init_lock);
+ if (!ctx->vm) {
+ ctx->vm = msm_gpu_create_private_vm(
+ priv->gpu, current, !ctx->userspace_managed_vm);
+
+ }
+ mutex_unlock(&init_lock);
+
return ctx->vm;
}
@@ -424,6 +436,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
@@ -445,6 +460,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ if (msm_context_is_vmbind(ctx))
+ return UERR(EINVAL, dev, "VM_BIND is enabled");
+
/* Only supported if per-process address space is supported: */
if (priv->gpu->vm == vm)
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 186d160b74de..d16d3012434a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -82,6 +82,14 @@ static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
return;
/*
+ * VM_BIND does not depend on implicit teardown of VMAs on handle
+ * close, but instead on implicit teardown of the VM when the device
+ * is closed (see msm_gem_vm_close())
+ */
+ if (msm_context_is_vmbind(ctx))
+ return;
+
+ /*
* TODO we might need to kick this to a queue to avoid blocking
* in CLOSE ioctl
*/
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index fc4d6c9049b0..c08c942d85a0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -829,7 +829,8 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
/* Return a new address space for a msm_drm_private instance */
struct drm_gpuvm *
-msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task)
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed)
{
struct drm_gpuvm *vm = NULL;
@@ -841,7 +842,7 @@ msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task)
* the global one
*/
if (gpu->funcs->create_private_vm) {
- vm = gpu->funcs->create_private_vm(gpu);
+ vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
if (!IS_ERR(vm))
to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 29662742a7e1..b38a33a67ee9 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -79,7 +79,7 @@ struct msm_gpu_funcs {
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
- struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu);
+ struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool kernel_managed);
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
/**
@@ -365,6 +365,14 @@ struct msm_context {
bool closed;
/**
+ * @userspace_managed_vm:
+ *
+ * Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
+ * MSM_PARAM_EN_VM_BIND?
+ */
+ bool userspace_managed_vm;
+
+ /**
* @vm:
*
* The per-process GPU address-space. Do not access directly, use
@@ -457,6 +465,22 @@ struct msm_context {
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
/**
+ * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ *
+ * @ctx: the drm_file context
+ *
+ * See MSM_PARAM_EN_VM_BIND. If userspace is managing the VM, it can
+ * do sparse binding including having multiple, potentially partial,
+ * mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
+ * SET_IOVA) are rejected because they don't have a sensible meaning.
+ */
+static inline bool
+msm_context_is_vmbind(struct msm_context *ctx)
+{
+ return ctx->userspace_managed_vm;
+}
+
+/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
* @gpu: the gpu instance
@@ -683,7 +707,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
const char *name, struct msm_gpu_config *config);
struct drm_gpuvm *
-msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task);
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed);
void msm_gpu_cleanup(struct msm_gpu *gpu);