From ef283674a17e000bb6b2ff05dd2ac5cbf2e3ae0d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 22 Apr 2024 11:58:57 +0300 Subject: drm/uapi: Move drm_color_ctm_3x4 out from drm_mode.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_color_ctm_3x4 is some undocumented amgdpu private uapi and thus has no business being in drm_mode.h. At least move it to some amdgpu specific header, albeit with the wrong namespace as maybe something somewhere is using this already? Cc: Harry Wentland Cc: Joshua Ashton Cc: Alex Deucher Fixes: 6872a189be50 ("drm/amd/display: Add 3x4 CTM support for plane CTM") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20240422085857.17651-1-ville.syrjala@linux.intel.com Reviewed-by: Harry Wentland --- include/uapi/drm/amdgpu_drm.h | 9 +++++++++ include/uapi/drm/drm_mode.h | 8 -------- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 96e32dafd4f0..d5ebafacdd70 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -1269,6 +1269,15 @@ struct drm_amdgpu_info_gpuvm_fault { #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ #define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */ +/* FIXME wrong namespace! */ +struct drm_color_ctm_3x4 { + /* + * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude + * (not two's complement!) format. + */ + __u64 matrix[12]; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 1ca5c7e418fd..d390011b89b4 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -846,14 +846,6 @@ struct drm_color_ctm { __u64 matrix[9]; }; -struct drm_color_ctm_3x4 { - /* - * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude - * (not two's complement!) format. - */ - __u64 matrix[12]; -}; - struct drm_color_lut { /* * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and -- cgit v1.2.3 From cdfad4db7756563db7d458216d9e3c2651dddc7d Mon Sep 17 00:00:00 2001 From: Tomasz Rusinowicz Date: Mon, 13 May 2024 14:04:27 +0200 Subject: accel/ivpu: Add NPU profiling support Implement time based Metric Streamer profiling UAPI. This is a generic mechanism allowing user mode tools to sample NPU metrics. These metrics are defined by the FW and transparent to the driver. The user space can check for this feature by checking DRM_IVPU_CAP_METRIC_STREAMER driver capability. Signed-off-by: Tomasz Rusinowicz Signed-off-by: Jacek Lawrynowicz Link: https://patchwork.freedesktop.org/patch/msgid/20240513120431.3187212-9-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/Makefile | 3 +- drivers/accel/ivpu/ivpu_drv.c | 14 +- drivers/accel/ivpu/ivpu_drv.h | 3 + drivers/accel/ivpu/ivpu_jsm_msg.c | 98 ++++++++++++ drivers/accel/ivpu/ivpu_jsm_msg.h | 8 +- drivers/accel/ivpu/ivpu_ms.c | 309 ++++++++++++++++++++++++++++++++++++++ drivers/accel/ivpu/ivpu_ms.h | 36 +++++ drivers/accel/ivpu/ivpu_pm.c | 4 + include/uapi/drm/ivpu_accel.h | 69 ++++++++- 9 files changed, 540 insertions(+), 4 deletions(-) create mode 100644 drivers/accel/ivpu/ivpu_ms.c create mode 100644 drivers/accel/ivpu/ivpu_ms.h (limited to 'include/uapi') diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index 95ff7ad16338..1c67a73cfefe 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -# Copyright (C) 2023 Intel Corporation +# Copyright (C) 2023-2024 Intel Corporation intel_vpu-y := \ ivpu_drv.o \ @@ -13,6 +13,7 @@ intel_vpu-y := \ ivpu_jsm_msg.o \ ivpu_mmu.o \ ivpu_mmu_context.o \ + ivpu_ms.o \ ivpu_pm.o intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index ca4fcef7edf5..a02a1929f5a1 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -26,6 +26,7 @@ #include "ivpu_jsm_msg.h" #include "ivpu_mmu.h" #include "ivpu_mmu_context.h" +#include "ivpu_ms.h" #include "ivpu_pm.h" #ifndef DRIVER_VERSION_STR @@ -100,6 +101,7 @@ static void file_priv_release(struct kref *ref) mutex_unlock(&vdev->context_list_lock); pm_runtime_put_autosuspend(vdev->drm.dev); + mutex_destroy(&file_priv->ms_lock); mutex_destroy(&file_priv->lock); kfree(file_priv); } @@ -122,7 +124,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param { switch (args->index) { case DRM_IVPU_CAP_METRIC_STREAMER: - args->value = 0; + args->value = 1; break; case DRM_IVPU_CAP_DMA_MEMORY_RANGE: args->value = 1; @@ -231,10 +233,13 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) goto err_dev_exit; } + INIT_LIST_HEAD(&file_priv->ms_instance_list); + file_priv->vdev = vdev; file_priv->bound = true; kref_init(&file_priv->ref); mutex_init(&file_priv->lock); + mutex_init(&file_priv->ms_lock); mutex_lock(&vdev->context_list_lock); @@ -263,6 +268,7 @@ err_xa_erase: xa_erase_irq(&vdev->context_xa, ctx_id); err_unlock: mutex_unlock(&vdev->context_list_lock); + mutex_destroy(&file_priv->ms_lock); mutex_destroy(&file_priv->lock); kfree(file_priv); err_dev_exit: @@ -278,6 +284,7 @@ static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n", file_priv->ctx.id, current->comm, task_pid_nr(current)); + ivpu_ms_cleanup(file_priv); ivpu_file_priv_put(&file_priv); } @@ -288,6 +295,10 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0), }; static int ivpu_wait_for_ready(struct ivpu_device *vdev) @@ -638,6 +649,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev) ivpu_prepare_for_reset(vdev); ivpu_shutdown(vdev); + ivpu_ms_cleanup_all(vdev); ivpu_jobs_abort_all(vdev); ivpu_job_done_consumer_fini(vdev); ivpu_pm_cancel_recovery(vdev); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 9e9d85ad78ea..55341762b9d9 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -155,6 +155,9 @@ struct ivpu_file_priv { struct mutex lock; /* Protects cmdq */ struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX]; struct ivpu_mmu_context ctx; + struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */ + struct list_head ms_instance_list; + struct ivpu_bo *ms_info_bo; bool has_mmu_faults; bool bound; }; diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c index 4b260065ad72..e8dd73d947e4 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.c +++ b/drivers/accel/ivpu/ivpu_jsm_msg.c @@ -440,3 +440,101 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev) return ret; } + +int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask, + u64 sampling_rate, u64 buffer_addr, u64 buffer_size) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START }; + struct vpu_jsm_msg resp; + int ret; + + req.payload.metric_streamer_start.metric_group_mask = metric_group_mask; + req.payload.metric_streamer_start.sampling_rate = sampling_rate; + req.payload.metric_streamer_start.buffer_addr = buffer_addr; + req.payload.metric_streamer_start.buffer_size = buffer_size; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) { + ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret); + return ret; + } + + return ret; +} + +int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP }; + struct vpu_jsm_msg resp; + int ret; + + req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) + ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret); + + return ret; +} + +int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask, + u64 buffer_addr, u64 buffer_size, u64 *bytes_written) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE }; + struct vpu_jsm_msg resp; + int ret; + + req.payload.metric_streamer_update.metric_group_mask = metric_group_mask; + req.payload.metric_streamer_update.buffer_addr = buffer_addr; + req.payload.metric_streamer_update.buffer_size = buffer_size; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) { + ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret); + return ret; + } + + if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) { + ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n", + resp.payload.metric_streamer_done.bytes_written, buffer_size); + return -EOVERFLOW; + } + + *bytes_written = resp.payload.metric_streamer_done.bytes_written; + + return ret; +} + +int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr, + u64 buffer_size, u32 *sample_size, u64 *info_size) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO }; + struct vpu_jsm_msg resp; + int ret; + + req.payload.metric_streamer_start.metric_group_mask = metric_group_mask; + req.payload.metric_streamer_start.buffer_addr = buffer_addr; + req.payload.metric_streamer_start.buffer_size = buffer_size; + + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp, + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); + if (ret) { + ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret); + return ret; + } + + if (!resp.payload.metric_streamer_done.sample_size) { + ivpu_warn_ratelimited(vdev, "Invalid sample size\n"); + return -EBADMSG; + } + + if (sample_size) + *sample_size = resp.payload.metric_streamer_done.sample_size; + if (info_size) + *info_size = resp.payload.metric_streamer_done.bytes_written; + + return ret; +} diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h index 357728295fe9..060363409fb3 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.h +++ b/drivers/accel/ivpu/ivpu_jsm_msg.h @@ -34,5 +34,11 @@ int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_ int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid, u64 vpu_log_buffer_va); int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev); - +int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask, + u64 sampling_rate, u64 buffer_addr, u64 buffer_size); +int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask); +int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask, + u64 buffer_addr, u64 buffer_size, u64 *bytes_written); +int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr, + u64 buffer_size, u32 *sample_size, u64 *info_size); #endif diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c new file mode 100644 index 000000000000..2f9d37f5c208 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_ms.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Copyright (C) 2020-2024 Intel Corporation + */ + +#include + +#include "ivpu_drv.h" +#include "ivpu_gem.h" +#include "ivpu_jsm_msg.h" +#include "ivpu_ms.h" +#include "ivpu_pm.h" + +#define MS_INFO_BUFFER_SIZE SZ_16K +#define MS_NUM_BUFFERS 2 +#define MS_READ_PERIOD_MULTIPLIER 2 +#define MS_MIN_SAMPLE_PERIOD_NS 1000000 + +static struct ivpu_ms_instance * +get_instance_by_mask(struct ivpu_file_priv *file_priv, u64 metric_mask) +{ + struct ivpu_ms_instance *ms; + + lockdep_assert_held(&file_priv->ms_lock); + + list_for_each_entry(ms, &file_priv->ms_instance_list, ms_instance_node) + if (ms->mask == metric_mask) + return ms; + + return NULL; +} + +int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct drm_ivpu_metric_streamer_start *args = data; + struct ivpu_device *vdev = file_priv->vdev; + struct ivpu_ms_instance *ms; + u64 single_buff_size; + u32 sample_size; + int ret; + + if (!args->metric_group_mask || !args->read_period_samples || + args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS) + return -EINVAL; + + mutex_lock(&file_priv->ms_lock); + + if (get_instance_by_mask(file_priv, args->metric_group_mask)) { + ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask); + ret = -EALREADY; + goto unlock; + } + + ms = kzalloc(sizeof(*ms), GFP_KERNEL); + if (!ms) { + ret = -ENOMEM; + goto unlock; + } + + ms->mask = args->metric_group_mask; + + ret = ivpu_jsm_metric_streamer_info(vdev, ms->mask, 0, 0, &sample_size, NULL); + if (ret) + goto err_free_ms; + + single_buff_size = sample_size * + ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER); + ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS), + DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); + if (!ms->bo) { + ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size); + ret = -ENOMEM; + goto err_free_ms; + } + + ms->buff_size = ivpu_bo_size(ms->bo) / MS_NUM_BUFFERS; + ms->active_buff_vpu_addr = ms->bo->vpu_addr; + ms->inactive_buff_vpu_addr = ms->bo->vpu_addr + ms->buff_size; + ms->active_buff_ptr = ivpu_bo_vaddr(ms->bo); + ms->inactive_buff_ptr = ivpu_bo_vaddr(ms->bo) + ms->buff_size; + + ret = ivpu_jsm_metric_streamer_start(vdev, ms->mask, args->sampling_period_ns, + ms->active_buff_vpu_addr, ms->buff_size); + if (ret) + goto err_free_bo; + + args->sample_size = sample_size; + args->max_data_size = ivpu_bo_size(ms->bo); + list_add_tail(&ms->ms_instance_node, &file_priv->ms_instance_list); + goto unlock; + +err_free_bo: + ivpu_bo_free(ms->bo); +err_free_ms: + kfree(ms); +unlock: + mutex_unlock(&file_priv->ms_lock); + return ret; +} + +static int +copy_leftover_bytes(struct ivpu_ms_instance *ms, + void __user *user_ptr, u64 user_size, u64 *user_bytes_copied) +{ + u64 copy_bytes; + + if (ms->leftover_bytes) { + copy_bytes = min(user_size - *user_bytes_copied, ms->leftover_bytes); + if (copy_to_user(user_ptr + *user_bytes_copied, ms->leftover_addr, copy_bytes)) + return -EFAULT; + + ms->leftover_bytes -= copy_bytes; + ms->leftover_addr += copy_bytes; + *user_bytes_copied += copy_bytes; + } + + return 0; +} + +static int +copy_samples_to_user(struct ivpu_device *vdev, struct ivpu_ms_instance *ms, + void __user *user_ptr, u64 user_size, u64 *user_bytes_copied) +{ + u64 bytes_written; + int ret; + + *user_bytes_copied = 0; + + ret = copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied); + if (ret) + return ret; + + if (*user_bytes_copied == user_size) + return 0; + + ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, ms->inactive_buff_vpu_addr, + ms->buff_size, &bytes_written); + if (ret) + return ret; + + swap(ms->active_buff_vpu_addr, ms->inactive_buff_vpu_addr); + swap(ms->active_buff_ptr, ms->inactive_buff_ptr); + + ms->leftover_bytes = bytes_written; + ms->leftover_addr = ms->inactive_buff_ptr; + + return copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied); +} + +int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ivpu_metric_streamer_get_data *args = data; + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct ivpu_ms_instance *ms; + u64 bytes_written; + int ret; + + if (!args->metric_group_mask) + return -EINVAL; + + mutex_lock(&file_priv->ms_lock); + + ms = get_instance_by_mask(file_priv, args->metric_group_mask); + if (!ms) { + ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask); + ret = -EINVAL; + goto unlock; + } + + if (!args->buffer_size) { + ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, 0, 0, &bytes_written); + if (ret) + goto unlock; + args->data_size = bytes_written + ms->leftover_bytes; + goto unlock; + } + + if (!args->buffer_ptr) { + ret = -EINVAL; + goto unlock; + } + + ret = copy_samples_to_user(vdev, ms, u64_to_user_ptr(args->buffer_ptr), + args->buffer_size, &args->data_size); +unlock: + mutex_unlock(&file_priv->ms_lock); + + return ret; +} + +static void free_instance(struct ivpu_file_priv *file_priv, struct ivpu_ms_instance *ms) +{ + lockdep_assert_held(&file_priv->ms_lock); + + list_del(&ms->ms_instance_node); + ivpu_jsm_metric_streamer_stop(file_priv->vdev, ms->mask); + ivpu_bo_free(ms->bo); + kfree(ms); +} + +int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct drm_ivpu_metric_streamer_stop *args = data; + struct ivpu_ms_instance *ms; + + if (!args->metric_group_mask) + return -EINVAL; + + mutex_lock(&file_priv->ms_lock); + + ms = get_instance_by_mask(file_priv, args->metric_group_mask); + if (ms) + free_instance(file_priv, ms); + + mutex_unlock(&file_priv->ms_lock); + + return ms ? 0 : -EINVAL; +} + +static inline struct ivpu_bo *get_ms_info_bo(struct ivpu_file_priv *file_priv) +{ + lockdep_assert_held(&file_priv->ms_lock); + + if (file_priv->ms_info_bo) + return file_priv->ms_info_bo; + + file_priv->ms_info_bo = ivpu_bo_create_global(file_priv->vdev, MS_INFO_BUFFER_SIZE, + DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); + return file_priv->ms_info_bo; +} + +int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ivpu_metric_streamer_get_data *args = data; + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct ivpu_bo *bo; + u64 info_size; + int ret; + + if (!args->metric_group_mask) + return -EINVAL; + + if (!args->buffer_size) + return ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, + 0, 0, NULL, &args->data_size); + if (!args->buffer_ptr) + return -EINVAL; + + mutex_lock(&file_priv->ms_lock); + + bo = get_ms_info_bo(file_priv); + if (!bo) { + ret = -ENOMEM; + goto unlock; + } + + ret = ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, bo->vpu_addr, + ivpu_bo_size(bo), NULL, &info_size); + if (ret) + goto unlock; + + if (args->buffer_size < info_size) { + ret = -ENOSPC; + goto unlock; + } + + if (copy_to_user(u64_to_user_ptr(args->buffer_ptr), ivpu_bo_vaddr(bo), info_size)) + ret = -EFAULT; + + args->data_size = info_size; +unlock: + mutex_unlock(&file_priv->ms_lock); + + return ret; +} + +void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv) +{ + struct ivpu_ms_instance *ms, *tmp; + + mutex_lock(&file_priv->ms_lock); + + if (file_priv->ms_info_bo) { + ivpu_bo_free(file_priv->ms_info_bo); + file_priv->ms_info_bo = NULL; + } + + list_for_each_entry_safe(ms, tmp, &file_priv->ms_instance_list, ms_instance_node) + free_instance(file_priv, ms); + + mutex_unlock(&file_priv->ms_lock); +} + +void ivpu_ms_cleanup_all(struct ivpu_device *vdev) +{ + struct ivpu_file_priv *file_priv; + unsigned long ctx_id; + + mutex_lock(&vdev->context_list_lock); + + xa_for_each(&vdev->context_xa, ctx_id, file_priv) + ivpu_ms_cleanup(file_priv); + + mutex_unlock(&vdev->context_list_lock); +} diff --git a/drivers/accel/ivpu/ivpu_ms.h b/drivers/accel/ivpu/ivpu_ms.h new file mode 100644 index 000000000000..fbd5ebebc3d9 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_ms.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Copyright (C) 2020-2024 Intel Corporation + */ +#ifndef __IVPU_MS_H__ +#define __IVPU_MS_H__ + +#include + +struct drm_device; +struct drm_file; +struct ivpu_bo; +struct ivpu_device; +struct ivpu_file_priv; + +struct ivpu_ms_instance { + struct ivpu_bo *bo; + struct list_head ms_instance_node; + u64 mask; + u64 buff_size; + u64 active_buff_vpu_addr; + u64 inactive_buff_vpu_addr; + void *active_buff_ptr; + void *inactive_buff_ptr; + u64 leftover_bytes; + void *leftover_addr; +}; + +int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv); +void ivpu_ms_cleanup_all(struct ivpu_device *vdev); + +#endif /* __IVPU_MS_H__ */ diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 4f5ea466731f..7b2aa205fdec 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -18,6 +18,7 @@ #include "ivpu_job.h" #include "ivpu_jsm_msg.h" #include "ivpu_mmu.h" +#include "ivpu_ms.h" #include "ivpu_pm.h" static bool ivpu_disable_recovery; @@ -131,6 +132,7 @@ static void ivpu_pm_recovery_work(struct work_struct *work) ivpu_suspend(vdev); ivpu_pm_prepare_cold_boot(vdev); ivpu_jobs_abort_all(vdev); + ivpu_ms_cleanup_all(vdev); ret = ivpu_resume(vdev); if (ret) @@ -333,6 +335,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) ivpu_hw_reset(vdev); ivpu_pm_prepare_cold_boot(vdev); ivpu_jobs_abort_all(vdev); + ivpu_ms_cleanup_all(vdev); + ivpu_dbg(vdev, PM, "Pre-reset done.\n"); } diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index 19a13468eca5..084fb529e1e9 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #ifndef __UAPI_IVPU_DRM_H__ @@ -21,6 +21,10 @@ extern "C" { #define DRM_IVPU_BO_INFO 0x03 #define DRM_IVPU_SUBMIT 0x05 #define DRM_IVPU_BO_WAIT 0x06 +#define DRM_IVPU_METRIC_STREAMER_START 0x07 +#define DRM_IVPU_METRIC_STREAMER_STOP 0x08 +#define DRM_IVPU_METRIC_STREAMER_GET_DATA 0x09 +#define DRM_IVPU_METRIC_STREAMER_GET_INFO 0x0a #define DRM_IOCTL_IVPU_GET_PARAM \ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param) @@ -40,6 +44,22 @@ extern "C" { #define DRM_IOCTL_IVPU_BO_WAIT \ DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait) +#define DRM_IOCTL_IVPU_METRIC_STREAMER_START \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_START, \ + struct drm_ivpu_metric_streamer_start) + +#define DRM_IOCTL_IVPU_METRIC_STREAMER_STOP \ + DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_STOP, \ + struct drm_ivpu_metric_streamer_stop) + +#define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_DATA, \ + struct drm_ivpu_metric_streamer_get_data) + +#define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_INFO \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_INFO, \ + struct drm_ivpu_metric_streamer_get_data) + /** * DOC: contexts * @@ -336,6 +356,53 @@ struct drm_ivpu_bo_wait { __u32 pad; }; +/** + * struct drm_ivpu_metric_streamer_start - Start collecting metric data + */ +struct drm_ivpu_metric_streamer_start { + /** @metric_group_mask: Indicates metric streamer instance */ + __u64 metric_group_mask; + /** @sampling_period_ns: Sampling period in nanoseconds */ + __u64 sampling_period_ns; + /** + * @read_period_samples: + * + * Number of samples after which user space will try to read the data. + * Reading the data after significantly longer period may cause data loss. + */ + __u32 read_period_samples; + /** @sample_size: Returned size of a single sample in bytes */ + __u32 sample_size; + /** @max_data_size: Returned max @data_size from %DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA */ + __u32 max_data_size; +}; + +/** + * struct drm_ivpu_metric_streamer_get_data - Copy collected metric data + */ +struct drm_ivpu_metric_streamer_get_data { + /** @metric_group_mask: Indicates metric streamer instance */ + __u64 metric_group_mask; + /** @buffer_ptr: A pointer to a destination for the copied data */ + __u64 buffer_ptr; + /** @buffer_size: Size of the destination buffer */ + __u64 buffer_size; + /** + * @data_size: Returned size of copied metric data + * + * If the @buffer_size is zero, returns the amount of data ready to be copied. + */ + __u64 data_size; +}; + +/** + * struct drm_ivpu_metric_streamer_stop - Stop collecting metric data + */ +struct drm_ivpu_metric_streamer_stop { + /** @metric_group_mask: Indicates metric streamer instance */ + __u64 metric_group_mask; +}; + #if defined(__cplusplus) } #endif -- cgit v1.2.3 From 36b75080e68b4a27ae1c40beffb3d6131f8eeeff Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Sun, 12 May 2024 19:23:26 -0300 Subject: drm/v3d: Create a new V3D parameter for the maximum number of perfcnt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The maximum number of performance counters can change from version to version and it's important for userspace to know this value, as it needs to use the counters for performance queries. Therefore, expose the maximum number of performance counters to userspace as a parameter. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20240512222655.2792754-4-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 3 +++ include/uapi/drm/v3d_drm.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 6b9dd26df9fe..d2c1d5053132 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -94,6 +94,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE: args->value = 1; return 0; + case DRM_V3D_PARAM_MAX_PERF_COUNTERS: + args->value = v3d->max_counters; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index dce1835eced4..215b01bb69c3 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -286,6 +286,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_PERFMON, DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, + DRM_V3D_PARAM_MAX_PERF_COUNTERS, }; struct drm_v3d_get_param { -- cgit v1.2.3 From f33fe58298e686e7cc2d24f747c980457812b566 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Sun, 12 May 2024 19:23:27 -0300 Subject: drm/v3d: Create new IOCTL to expose performance counters information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Userspace usually needs some information about the performance counters available. Although we could replicate this information in the kernel and user-space, let's use the kernel as the "single source of truth" to avoid issues in the future (e.g. list of performance counters is updated in user-space, but not in the kernel, generating invalid requests). Therefore, create a new IOCTL to expose the performance counters information, that is name, category, and description. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20240512222655.2792754-5-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 1 + drivers/gpu/drm/v3d/v3d_drv.h | 2 ++ drivers/gpu/drm/v3d/v3d_perfmon.c | 33 +++++++++++++++++++++++++++++++++ include/uapi/drm/v3d_drm.h | 37 +++++++++++++++++++++++++++++++++++++ 4 files changed, 73 insertions(+) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index d2c1d5053132..f7477488b1cc 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -211,6 +211,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_COUNTER, v3d_perfmon_get_counter_ioctl, DRM_RENDER_ALLOW), }; static const struct drm_driver v3d_drm_driver = { diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index bd1e38f7d10a..44cfddedebde 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -582,6 +582,8 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* v3d_sysfs.c */ int v3d_sysfs_init(struct device *dev); diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index f268d9466c0f..73e2bb8bdb7f 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -217,3 +217,36 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, return ret; } + +int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_perfmon_get_counter *req = data; + struct v3d_dev *v3d = to_v3d_dev(dev); + const struct v3d_perf_counter_desc *counter; + + for (int i = 0; i < ARRAY_SIZE(req->reserved); i++) { + if (req->reserved[i] != 0) + return -EINVAL; + } + + /* Make sure that the counter ID is valid */ + if (req->counter >= v3d->max_counters) + return -EINVAL; + + if (v3d->ver >= 71) { + WARN_ON(v3d->max_counters != ARRAY_SIZE(v3d_v71_performance_counters)); + counter = &v3d_v71_performance_counters[req->counter]; + } else if (v3d->ver >= 42) { + WARN_ON(v3d->max_counters != ARRAY_SIZE(v3d_v42_performance_counters)); + counter = &v3d_v42_performance_counters[req->counter]; + } else { + return -EOPNOTSUPP; + } + + strscpy(req->name, counter->name, sizeof(req->name)); + strscpy(req->category, counter->category, sizeof(req->category)); + strscpy(req->description, counter->description, sizeof(req->description)); + + return 0; +} diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 215b01bb69c3..0860ddb3d0b6 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -42,6 +42,7 @@ extern "C" { #define DRM_V3D_PERFMON_DESTROY 0x09 #define DRM_V3D_PERFMON_GET_VALUES 0x0a #define DRM_V3D_SUBMIT_CPU 0x0b +#define DRM_V3D_PERFMON_GET_COUNTER 0x0c #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -58,6 +59,8 @@ extern "C" { #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ struct drm_v3d_perfmon_get_values) #define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu) +#define DRM_IOCTL_V3D_PERFMON_GET_COUNTER DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_COUNTER, \ + struct drm_v3d_perfmon_get_counter) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 #define DRM_V3D_SUBMIT_EXTENSION 0x02 @@ -718,6 +721,40 @@ struct drm_v3d_perfmon_get_values { __u64 values_ptr; }; +#define DRM_V3D_PERFCNT_MAX_NAME 64 +#define DRM_V3D_PERFCNT_MAX_CATEGORY 32 +#define DRM_V3D_PERFCNT_MAX_DESCRIPTION 256 + +/** + * struct drm_v3d_perfmon_get_counter - ioctl to get the description of a + * performance counter + * + * As userspace needs to retrieve information about the performance counters + * available, this IOCTL allows users to get information about a performance + * counter (name, category and description). + */ +struct drm_v3d_perfmon_get_counter { + /* + * Counter ID + * + * Must be smaller than the maximum number of performance counters, which + * can be retrieve through DRM_V3D_PARAM_MAX_PERF_COUNTERS. + */ + __u8 counter; + + /* Name of the counter */ + __u8 name[DRM_V3D_PERFCNT_MAX_NAME]; + + /* Category of the counter */ + __u8 category[DRM_V3D_PERFCNT_MAX_CATEGORY]; + + /* Description of the counter */ + __u8 description[DRM_V3D_PERFCNT_MAX_DESCRIPTION]; + + /* mbz */ + __u8 reserved[7]; +}; + #if defined(__cplusplus) } #endif -- cgit v1.2.3 From 673087d8b023faf34b84e8faf63bbeea3da87bab Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Sun, 12 May 2024 19:23:29 -0300 Subject: drm/v3d: Deprecate the use of the Performance Counters enum MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Performance Counters enum used to identify the index of each performance counter and provide the total number of performance counters (V3D_PERFCNT_NUM). But, this enum is only valid for V3D 4.2, not for V3D 7.1. As we implemented a new flexible structure to retrieve performance counters information, we can deprecate this enum. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Link: https://patchwork.freedesktop.org/patch/msgid/20240512222655.2792754-7-mcanal@igalia.com --- include/uapi/drm/v3d_drm.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 0860ddb3d0b6..87fc5bb0a61e 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -603,6 +603,16 @@ struct drm_v3d_submit_cpu { __u64 extensions; }; +/* The performance counters index represented by this enum are deprecated and + * must no longer be used. These counters are only valid for V3D 4.2. + * + * In order to check for performance counter information, + * use DRM_IOCTL_V3D_PERFMON_GET_COUNTER. + * + * Don't use V3D_PERFCNT_NUM to retrieve the maximum number of performance + * counters. You should use DRM_IOCTL_V3D_GET_PARAM with the following + * parameter: DRM_V3D_PARAM_MAX_PERF_COUNTERS. + */ enum { V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, V3D_PERFCNT_FEP_VALID_PRIMS, -- cgit v1.2.3 From 310ec03841a36e3f45fb528f0dfdfe5b9e84b037 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 5 Jun 2024 13:26:05 +1200 Subject: dma-buf: align fd_flags and heap_flags with dma_heap_allocation_data dma_heap_allocation_data defines the UAPI as follows: struct dma_heap_allocation_data { __u64 len; __u32 fd; __u32 fd_flags; __u64 heap_flags; }; However, dma_heap_buffer_alloc() casts both fd_flags and heap_flags into unsigned int. We're inconsistent with types in the non UAPI arguments. This patch fixes it. Signed-off-by: Barry Song Acked-by: John Stultz Signed-off-by: Sumit Semwal Link: https://patchwork.freedesktop.org/patch/msgid/20240605012605.5341-1-21cnbao@gmail.com --- drivers/dma-buf/dma-heap.c | 4 ++-- include/uapi/linux/dma-heap.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 84ae708fafe7..2298ca5e112e 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -50,8 +50,8 @@ static struct class *dma_heap_class; static DEFINE_XARRAY_ALLOC(dma_heap_minors); static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, - unsigned int fd_flags, - unsigned int heap_flags) + u32 fd_flags, + u64 heap_flags) { struct dma_buf *dmabuf; int fd; diff --git a/include/uapi/linux/dma-heap.h b/include/uapi/linux/dma-heap.h index 6f84fa08e074..a4cf716a49fa 100644 --- a/include/uapi/linux/dma-heap.h +++ b/include/uapi/linux/dma-heap.h @@ -19,7 +19,7 @@ #define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE) /* Currently no heap flags */ -#define DMA_HEAP_VALID_HEAP_FLAGS (0) +#define DMA_HEAP_VALID_HEAP_FLAGS (0ULL) /** * struct dma_heap_allocation_data - metadata passed from userspace for -- cgit v1.2.3