summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorNicholas Kazlauskas <nicholas.kazlauskas@amd.com>2026-03-03 09:48:37 -0500
committerAlex Deucher <alexander.deucher@amd.com>2026-03-17 10:35:54 -0400
commitbeb8e35e2b4b799dfedebbd48cd6ad2119cf200c (patch)
treeae7671e00086f3c830628c323ab20cb183f72c52 /drivers/gpu
parentfabd89fc17fdfd1225afd69ca883fbd30226b4c9 (diff)
downloadlinux-beb8e35e2b4b799dfedebbd48cd6ad2119cf200c.tar.gz
linux-beb8e35e2b4b799dfedebbd48cd6ad2119cf200c.zip
drm/amd/display: Plumb MRQ programming out of DML for dml2_1
[Why] If the MRQ is present then these fields are also required to be plumbed out to the requestor for programming. [How] Pipe the fields out through rq_dlg_get_rq_reg. The implementation follows the previous generation in dml2_0 for DCN35 but adjusted for the new helpers and coding style of dml2_1. Reviewed-by: Dillon Varone <dillon.varone@amd.com> Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com> Signed-off-by: Chuanyu Tseng <chuanyu.tseng@amd.com> Tested-by: Dan Wheeler <daniel.wheeler@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index ca5ac3c0deb5..b30d16474ceb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -12262,11 +12262,15 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
unsigned int pixel_chunk_bytes = 0;
unsigned int min_pixel_chunk_bytes = 0;
+ unsigned int meta_chunk_bytes = 0;
+ unsigned int min_meta_chunk_bytes = 0;
unsigned int dpte_group_bytes = 0;
unsigned int mpte_group_bytes = 0;
unsigned int p1_pixel_chunk_bytes = 0;
unsigned int p1_min_pixel_chunk_bytes = 0;
+ unsigned int p1_meta_chunk_bytes = 0;
+ unsigned int p1_min_meta_chunk_bytes = 0;
unsigned int p1_dpte_group_bytes = 0;
unsigned int p1_mpte_group_bytes = 0;
@@ -12287,8 +12291,13 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
dpte_group_bytes = (unsigned int)(dml_get_dpte_group_size_in_bytes(mode_lib, pipe_idx));
mpte_group_bytes = (unsigned int)(dml_get_vm_group_size_in_bytes(mode_lib, pipe_idx));
+ meta_chunk_bytes = (unsigned int)(mode_lib->ip.meta_chunk_size_kbytes * 1024);
+ min_meta_chunk_bytes = (unsigned int)(mode_lib->ip.min_meta_chunk_size_bytes);
+
p1_pixel_chunk_bytes = pixel_chunk_bytes;
p1_min_pixel_chunk_bytes = min_pixel_chunk_bytes;
+ p1_meta_chunk_bytes = meta_chunk_bytes;
+ p1_min_meta_chunk_bytes = min_meta_chunk_bytes;
p1_dpte_group_bytes = dpte_group_bytes;
p1_mpte_group_bytes = mpte_group_bytes;
@@ -12309,6 +12318,19 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
else
rq_regs->rq_regs_c.min_chunk_size = log_and_substract_if_non_zero(p1_min_pixel_chunk_bytes, 8 - 1);
+ rq_regs->rq_regs_l.meta_chunk_size = log_and_substract_if_non_zero(meta_chunk_bytes, 10);
+ rq_regs->rq_regs_c.meta_chunk_size = log_and_substract_if_non_zero(p1_meta_chunk_bytes, 10);
+
+ if (min_meta_chunk_bytes == 0)
+ rq_regs->rq_regs_l.min_meta_chunk_size = 0;
+ else
+ rq_regs->rq_regs_l.min_meta_chunk_size = log_and_substract_if_non_zero(min_meta_chunk_bytes, 6 - 1);
+
+ if (min_meta_chunk_bytes == 0)
+ rq_regs->rq_regs_c.min_meta_chunk_size = 0;
+ else
+ rq_regs->rq_regs_c.min_meta_chunk_size = log_and_substract_if_non_zero(p1_min_meta_chunk_bytes, 6 - 1);
+
rq_regs->rq_regs_l.dpte_group_size = log_and_substract_if_non_zero(dpte_group_bytes, 6);
rq_regs->rq_regs_l.mpte_group_size = log_and_substract_if_non_zero(mpte_group_bytes, 6);
rq_regs->rq_regs_c.dpte_group_size = log_and_substract_if_non_zero(p1_dpte_group_bytes, 6);