diff options
| author | Dave Airlie <airlied@redhat.com> | 2023-10-18 16:08:07 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2023-10-18 16:08:07 +1000 |
| commit | 27442758e9b4e083bef3f164a1739475c01f3202 (patch) | |
| tree | 58f4a82f1b9e41c11b6247d8f7b9c3c9b8b52711 /drivers/gpu/drm/amd/display/amdgpu_dm | |
| parent | Merge tag 'drm-habanalabs-next-2023-10-10' of https://git.kernel.org/pub/scm/... (diff) | |
| parent | drm/amdgpu/vkms: fix a possible null pointer dereference (diff) | |
| download | linux-27442758e9b4e083bef3f164a1739475c01f3202.tar.gz linux-27442758e9b4e083bef3f164a1739475c01f3202.zip | |
Merge tag 'amd-drm-next-6.7-2023-10-13' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.7-2023-10-13:
amdgpu:
- DC replay fixes
- Misc code cleanups and spelling fixes
- Documentation updates
- RAS EEPROM Updates
- FRU EEPROM Updates
- IP discovery updates
- SR-IOV fixes
- RAS updates
- DC PQ fixes
- SMU 13.0.6 updates
- GC 11.5 Support
- NBIO 7.11 Support
- GMC 11 Updates
- Reset fixes
- SMU 11.5 Updates
- SMU 13.0 OD support
- Use flexible arrays for bo list handling
- W=1 Fixes
- SubVP fixes
- DPIA fixes
- DCN 3.5 Support
- Devcoredump fixes
- VPE 6.1 support
- VCN 4.0 Updates
- S/G display fixes
- DML fixes
- DML2 Support
- MST fixes
- VRR fixes
- Enable seamless boot in more cases
- Enable content type property for HDMI
- OLED fixes
- Rework and clean up GPUVM TLB flushing
- DC ODM fixes
- DP 2.x fixes
- AGP aperture fixes
- SDMA firmware loading cleanups
- Cyan Skillfish GPU clock counter fix
- GC 11 GART fix
- Cache GPU fault info for userspace queries
- DC cursor check fixes
- eDP fixes
- DC FP handling fixes
- Variable sized array fixes
- SMU 13.0.x fixes
- IB start and size alignment fixes for VCN
- SMU 14 Support
- Suspend and resume sequence rework
- vkms fix
amdkfd:
- GC 11 fixes
- GC 10 fixes
- Doorbell fixes
- CWSR fixes
- SVM fixes
- Clean up GC info enumeration
- Rework memory limit handling
- Coherent memory handling fixes
- Use partial migrations in GPU faults
- TLB flush fixes
- DMA unmap fixes
- GC 9.4.3 fixes
- SQ interrupt fix
- GTT mapping fix
- GC 11.5 Support
radeon:
- Misc code cleanups
- W=1 Fixes
- Fix possible buffer overflow
- Fix possible NULL pointer dereference
UAPI:
- Add EXT_COHERENT memory allocation flags. These allow for system scope atomics.
Proposed userspace: https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/pull/88
- Add support for new VPE engine. This is a memory to memory copy engine with advanced scaling, CSC, and color management features
Proposed mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25713
- Add INFO IOCTL interface to query GPU faults
Proposed Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23238
Proposed libdrm MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/298
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231013175758.1735031-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
9 files changed, 368 insertions, 218 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c6fd34bab358..2ac940a38703 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -142,6 +142,9 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); +#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -410,6 +413,7 @@ static void dm_pflip_high_irq(void *interrupt_params) struct amdgpu_crtc *amdgpu_crtc; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; + struct drm_device *dev = adev_to_drm(adev); unsigned long flags; struct drm_pending_vblank_event *e; u32 vpos, hpos, v_blank_start, v_blank_end; @@ -420,18 +424,17 @@ static void dm_pflip_high_irq(void *interrupt_params) /* IRQ could occur when in initial stage */ /* TODO work and BO cleanup */ if (amdgpu_crtc == NULL) { - DC_LOG_PFLIP("CRTC is null, returning.\n"); + drm_dbg_state(dev, "CRTC is null, returning.\n"); return; } spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { - DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED, - amdgpu_crtc->crtc_id, - amdgpu_crtc); + drm_dbg_state(dev, + "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", + amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, + amdgpu_crtc->crtc_id, amdgpu_crtc); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return; } @@ -497,9 +500,9 @@ static void dm_pflip_high_irq(void *interrupt_params) amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", - amdgpu_crtc->crtc_id, amdgpu_crtc, - vrr_active, (int) !e); + drm_dbg_state(dev, + "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); } static void dm_vupdate_high_irq(void *interrupt_params) @@ -529,9 +532,9 @@ static void dm_vupdate_high_irq(void *interrupt_params) atomic64_set(&irq_params->previous_timestamp, vblank->time); } - DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", - acrtc->crtc_id, - vrr_active); + drm_dbg_vbl(drm_dev, + "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + vrr_active); /* Core vblank handling is done here after end of front-porch in * vrr mode, as vblank timestamping will give valid results @@ -582,8 +585,9 @@ static void dm_crtc_high_irq(void *interrupt_params) vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); - DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, - vrr_active, acrtc->dm_irq_params.active_planes); + drm_dbg_vbl(adev_to_drm(adev), + "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + vrr_active, acrtc->dm_irq_params.active_planes); /** * Core vblank handling at start of front-porch is only possible @@ -1069,6 +1073,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) const struct firmware *dmub_fw = adev->dm.dmub_fw; struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; struct abm *abm = adev->dm.dc->res_pool->abm; + struct dc_context *ctx = adev->dm.dc->ctx; struct dmub_srv_hw_params hw_params; enum dmub_status status; const unsigned char *fw_inst_const, *fw_bss_data; @@ -1090,6 +1095,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return -EINVAL; } + /* initialize register offsets for ASICs with runtime initialization available */ + if (dmub_srv->hw_funcs.init_reg_offsets) + dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); + status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error checking HW support for DMUB: %d\n", status); @@ -1165,9 +1174,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 5, 0): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; @@ -1246,7 +1256,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ agp_top = adev->gmc.agp_end >> 24; /* AGP aperture is disabled */ - if (agp_bot == agp_top) { + if (agp_bot > agp_top) { logical_addr_low = adev->gmc.fb_start >> 18; if (adev->apu_flags & AMD_APU_IS_RAVEN2) /* @@ -1597,7 +1607,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): switch (adev->dm.dmcub_fw_version) { case 0: /* development */ @@ -1616,40 +1626,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) break; } - switch (adev->asic_type) { - case CHIP_CARRIZO: - case CHIP_STONEY: - init_data.flags.gpu_vm_support = true; - break; - default: - switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(1, 0, 0): - case IP_VERSION(1, 0, 1): - /* enable S/G on PCO and RV2 */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || - (adev->apu_flags & AMD_APU_IS_PICASSO)) - init_data.flags.gpu_vm_support = true; - break; - case IP_VERSION(2, 1, 0): - case IP_VERSION(3, 0, 1): - case IP_VERSION(3, 1, 2): - case IP_VERSION(3, 1, 3): - case IP_VERSION(3, 1, 4): - case IP_VERSION(3, 1, 5): - case IP_VERSION(3, 1, 6): - init_data.flags.gpu_vm_support = true; - break; - default: - break; - } - break; - } - if (init_data.flags.gpu_vm_support && - (amdgpu_sg_display == 0)) + /* APU support S/G display by default except: + * ASICs before Carrizo, + * RAVEN1 (Users reported stability issue) + */ + + if (adev->asic_type < CHIP_CARRIZO) { init_data.flags.gpu_vm_support = false; + } else if (adev->asic_type == CHIP_RAVEN) { + if (adev->apu_flags & AMD_APU_IS_RAVEN) + init_data.flags.gpu_vm_support = false; + else + init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); + } else { + init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); + } - if (init_data.flags.gpu_vm_support) - adev->mode_info.gpu_vm_support = true; + init_data.flags.gpu_vm_support = adev->mode_info.gpu_vm_support; if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; @@ -1670,7 +1663,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.seamless_boot_edp_requested = false; - if (check_seamless_boot_capability(adev)) { + if (amdgpu_device_seamless_boot_supported(adev)) { init_data.flags.seamless_boot_edp_requested = true; init_data.flags.allow_seamless_boot_optimization = true; DRM_INFO("Seamless boot condition check passed\n"); @@ -1680,6 +1673,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; + init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; INIT_LIST_HEAD(&adev->dm.da_list); @@ -1692,8 +1686,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { - DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER, - dce_version_to_string(adev->dm.dc->ctx->dce_version)); + DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); goto error; } @@ -2004,7 +1997,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 3): case IP_VERSION(2, 0, 0): @@ -2020,6 +2013,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): return 0; default: break; @@ -2093,7 +2087,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) enum dmub_status status; int r; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): dmub_asic = DMUB_ASIC_DCN21; break; @@ -2128,6 +2122,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): dmub_asic = DMUB_ASIC_DCN321; break; + case IP_VERSION(3, 5, 0): + dmub_asic = DMUB_ASIC_DCN35; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -2462,7 +2459,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * therefore, this function apply to navi10/12/14 but not Renoir * * */ - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 0): break; @@ -2648,6 +2645,8 @@ static int dm_suspend(void *handle) WARN_ON(adev->dm.cached_state); adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); s3_handle_mst(adev_to_drm(adev), true); @@ -2685,6 +2684,7 @@ static void emulated_link_detect(struct dc_link *link) struct display_sink_capability sink_caps = { 0 }; enum dc_edid_status edid_status; struct dc_context *dc_ctx = link->ctx; + struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); struct dc_sink *sink = NULL; struct dc_sink *prev_sink = NULL; @@ -2734,7 +2734,7 @@ static void emulated_link_detect(struct dc_link *link) } default: - DC_ERROR("Invalid connector type! signal:%d\n", + drm_err(dev, "Invalid connector type! signal:%d\n", link->connector_signal); return; } @@ -2744,7 +2744,7 @@ static void emulated_link_detect(struct dc_link *link) sink = dc_sink_create(&sink_init_data); if (!sink) { - DC_ERROR("Failed to create sink!\n"); + drm_err(dev, "Failed to create sink!\n"); return; } @@ -2757,7 +2757,7 @@ static void emulated_link_detect(struct dc_link *link) sink); if (edid_status != EDID_OK) - DC_ERROR("Failed to read EDID"); + drm_err(dev, "Failed to read EDID\n"); } @@ -2776,7 +2776,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { - dm_error("Failed to allocate update bundle\n"); + drm_err(dm->ddev, "Failed to allocate update bundle\n"); goto cleanup; } @@ -2822,6 +2822,10 @@ static int dm_resume(void *handle) int i, r, j, ret; bool need_hotplug = false; + if (dm->dc->caps.ips_support) { + dc_dmub_srv_exit_low_power_state(dm->dc); + } + if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; @@ -2847,6 +2851,7 @@ static int dm_resume(void *handle) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + dc_resume(dm->dc); amdgpu_dm_irq_resume_early(adev); @@ -2895,7 +2900,7 @@ static int dm_resume(void *handle) } /* power on hardware */ - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); /* program HPD filter */ dc_resume(dm->dc); @@ -3224,7 +3229,8 @@ void amdgpu_dm_update_connector_after_detect( aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); if (!aconnector->timing_requested) - dm_error("failed to create aconnector->requested_timing\n"); + drm_err(dev, + "failed to create aconnector->requested_timing\n"); } drm_connector_update_edid_property(connector, aconnector->edid); @@ -4410,7 +4416,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } /* Use Outbox interrupt */ - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): @@ -4420,6 +4426,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 5, 0): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4427,12 +4434,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; default: DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); } /* Determine whether to enable PSR support by default. */ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): @@ -4440,6 +4447,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): psr_feature_enabled = true; break; default: @@ -4564,7 +4572,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): case IP_VERSION(2, 0, 2): @@ -4582,6 +4590,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4589,7 +4598,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; default: DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); goto fail; } break; @@ -4672,14 +4681,14 @@ static int dm_init_microcode(struct amdgpu_device *adev) char *fw_name_dmub; int r; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): fw_name_dmub = FIRMWARE_RENOIR_DMUB; if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; break; case IP_VERSION(3, 0, 0): - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; else fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; @@ -4712,13 +4721,14 @@ static int dm_init_microcode(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; + case IP_VERSION(3, 5, 0): + fw_name_dmub = FIRMWARE_DCN_35_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; } r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); - if (r) - DRM_ERROR("DMUB firmware loading failed: %d\n", r); return r; } @@ -4806,7 +4816,7 @@ static int dm_early_init(void *handle) break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(3, 0, 0): adev->mode_info.num_crtc = 6; @@ -4836,13 +4846,14 @@ static int dm_early_init(void *handle) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; default: DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); return -EINVAL; } break; @@ -5431,6 +5442,24 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, return color_space; } +static enum display_content_type +get_output_content_type(const struct drm_connector_state *connector_state) +{ + switch (connector_state->content_type) { + default: + case DRM_MODE_CONTENT_TYPE_NO_DATA: + return DISPLAY_CONTENT_TYPE_NO_DATA; + case DRM_MODE_CONTENT_TYPE_GRAPHICS: + return DISPLAY_CONTENT_TYPE_GRAPHICS; + case DRM_MODE_CONTENT_TYPE_PHOTO: + return DISPLAY_CONTENT_TYPE_PHOTO; + case DRM_MODE_CONTENT_TYPE_CINEMA: + return DISPLAY_CONTENT_TYPE_CINEMA; + case DRM_MODE_CONTENT_TYPE_GAME: + return DISPLAY_CONTENT_TYPE_GAME; + } +} + static bool adjust_colour_depth_from_display_info( struct dc_crtc_timing *timing_out, const struct drm_display_info *info) @@ -5565,6 +5594,7 @@ static void fill_stream_properties_from_drm_display_mode( } stream->output_color_space = get_output_color_space(timing_out, connector_state); + stream->content_type = get_output_content_type(connector_state); } static void fill_audio_info(struct audio_info *audio_info, @@ -6115,10 +6145,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, requested_bpc); if (aconnector->timing_changed) { - DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", - __func__, - stream->timing.display_color_depth, - aconnector->timing_requested->display_color_depth); + drm_dbg(aconnector->base.dev, + "overriding timing for automated test, bpc %d, changing to %d\n", + stream->timing.display_color_depth, + aconnector->timing_requested->display_color_depth); stream->timing = *aconnector->timing_requested; } @@ -6414,15 +6444,23 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; struct edid *edid; - if (!connector->edid_override) + /* + * Note: drm_get_edid gets edid in the following order: + * 1) override EDID if set via edid_override debugfs, + * 2) firmware EDID if set via edid_firmware module parameter + * 3) regular DDC read. + */ + edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + if (!edid) { + DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; + } - drm_edid_override_connector_update(&aconnector->base); - edid = aconnector->base.edid_blob_ptr->data; aconnector->edid = edid; /* Update emulated (virtual) sink's EDID */ @@ -6457,30 +6495,26 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { + struct drm_connector *connector = &aconnector->base; + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base); struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; struct edid *edid; - if (!aconnector->base.edid_blob_ptr) { - /* if connector->edid_override valid, pass - * it to edid_override to edid_blob_ptr - */ - - drm_edid_override_connector_update(&aconnector->base); - - if (!aconnector->base.edid_blob_ptr) { - DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", - aconnector->base.name); - - aconnector->base.force = DRM_FORCE_OFF; - return; - } + /* + * Note: drm_get_edid gets edid in the following order: + * 1) override EDID if set via edid_override debugfs, + * 2) firmware EDID if set via edid_firmware module parameter + * 3) regular DDC read. + */ + edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + if (!edid) { + DRM_ERROR("No EDID found on connector: %s.\n", connector->name); + return; } - edid = (struct edid *) aconnector->base.edid_blob_ptr->data; - aconnector->edid = edid; aconnector->dc_em_sink = dc_link_add_remote_sink( @@ -6661,6 +6695,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec goto fail; } + drm_mode_set_crtcinfo(mode, 0); + stream = create_validate_stream_for_sink(aconnector, mode, to_dm_connector_state(connector->state), NULL); @@ -6765,6 +6801,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, new_crtc_state->mode_changed = true; } + if (new_con_state->content_type != old_con_state->content_type) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { struct dc_info_packet hdr_infopacket; @@ -7401,6 +7445,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, } if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + /* Content Type is currently only implemented for HDMI. */ + drm_connector_attach_content_type_property(&aconnector->base); + } + + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) drm_connector_attach_colorspace_property(&aconnector->base); } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || @@ -7823,8 +7872,9 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc) /* Mark this event as consumed */ acrtc->base.state->event = NULL; - DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", - acrtc->crtc_id); + drm_dbg_state(acrtc->base.dev, + "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", + acrtc->crtc_id); } static void update_freesync_state_on_stream( @@ -8067,7 +8117,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { - dm_error("Failed to allocate update bundle\n"); + drm_err(dev, "Failed to allocate update bundle\n"); goto cleanup; } @@ -8655,7 +8705,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, status = dc_stream_get_status_from_state(dc_state, dm_new_crtc_state->stream); if (!status) - DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); + drm_err(dev, + "got no status for stream %p on acrtc%p\n", + dm_new_crtc_state->stream, acrtc); else acrtc->otg_inst = status->primary_otg_inst; } @@ -8689,6 +8741,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); + if (dm->dc->caps.ips_support) { + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + if (new_con_state->crtc && + new_con_state->crtc->state->active && + drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) { + dc_dmub_srv_exit_low_power_state(dm->dc); + break; + } + } + } + drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); @@ -9892,16 +9955,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, } } +static void +dm_get_plane_scale(struct drm_plane_state *plane_state, + int *out_plane_scale_w, int *out_plane_scale_h) +{ + int plane_src_w, plane_src_h; + + dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); + *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w; + *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; +} + static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { - struct drm_plane *cursor = crtc->cursor, *underlying; + struct drm_plane *cursor = crtc->cursor, *plane, *underlying; + struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_plane_state *new_cursor_state, *new_underlying_state; int i; int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; - int cursor_src_w, cursor_src_h; - int underlying_src_w, underlying_src_h; + bool any_relevant_change = false; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and @@ -9909,13 +9983,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, * blending properties match the underlying planes'. */ - new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); - if (!new_cursor_state || !new_cursor_state->fb) + /* If no plane was enabled or changed scaling, no need to check again */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + int new_scale_w, new_scale_h, old_scale_w, old_scale_h; + + if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) + continue; + + if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { + any_relevant_change = true; + break; + } + + if (new_plane_state->fb == old_plane_state->fb && + new_plane_state->crtc_w == old_plane_state->crtc_w && + new_plane_state->crtc_h == old_plane_state->crtc_h) + continue; + + dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); + dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); + + if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { + any_relevant_change = true; + break; + } + } + + if (!any_relevant_change) + return 0; + + new_cursor_state = drm_atomic_get_plane_state(state, cursor); + if (IS_ERR(new_cursor_state)) + return PTR_ERR(new_cursor_state); + + if (!new_cursor_state->fb) return 0; - dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); - cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; - cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; + dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); + + /* Need to check all enabled planes, even if this commit doesn't change + * their state + */ + i = drm_atomic_add_affected_planes(state, crtc); + if (i) + return i; for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { /* Narrow down to non-cursor planes on the same CRTC as the cursor */ @@ -9926,10 +10037,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, if (!new_underlying_state->fb) continue; - dm_get_oriented_plane_size(new_underlying_state, - &underlying_src_w, &underlying_src_h); - underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; - underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; + dm_get_plane_scale(new_underlying_state, + &underlying_scale_w, &underlying_scale_h); if (cursor_scale_w != underlying_scale_w || cursor_scale_h != underlying_scale_h) { @@ -10832,7 +10941,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, { #ifdef DM_CHECK_ADDR_0 if (address == 0) { - DC_ERR("invalid register write. address = 0"); + drm_err(adev_to_drm(ctx->driver_context), + "invalid register write. address = 0"); return; } #endif @@ -10846,7 +10956,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, u32 value; #ifdef DM_CHECK_ADDR_0 if (address == 0) { - DC_ERR("invalid register read; address = 0\n"); + drm_err(adev_to_drm(ctx->driver_context), + "invalid register read; address = 0\n"); return 0; } #endif @@ -10956,27 +11067,6 @@ int amdgpu_dm_process_dmub_set_config_sync( return ret; } -/* - * Check whether seamless boot is supported. - * - * So far we only support seamless boot on CHIP_VANGOGH. - * If everything goes well, we may consider expanding - * seamless boot to other ASICs. - */ -bool check_seamless_boot_capability(struct amdgpu_device *adev) -{ - switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(3, 0, 1): - if (!adev->mman.keep_stolen_vga_memory) - return true; - break; - default: - break; - } - - return false; -} - bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 9e4cc5eeda76..3d480be802cb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -825,8 +825,6 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned in int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, struct set_config_cmd_payload *payload, enum set_config_status *operation_result); -bool check_seamless_boot_capability(struct amdgpu_device *adev); - struct dc_stream_state * create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 7c21e21bcc51..1259d6351c50 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -37,6 +37,7 @@ #include "link_hwss.h" #include "dc/dc_dmub_srv.h" #include "link/protocols/link_dp_capability.h" +#include "inc/hw/dchubbub.h" #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY #include "amdgpu_dm_psr.h" @@ -1201,6 +1202,35 @@ static int internal_display_show(struct seq_file *m, void *data) return 0; } +/* + * Returns the number of segments used if ODM Combine mode is enabled. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/odm_combine_segments + */ +static int odm_combine_segments_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + struct pipe_ctx *pipe_ctx = NULL; + int i, segments = -EOPNOTSUPP; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == link) + break; + } + + if (connector->status != connector_status_connected) + return -ENODEV; + + if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) + pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments); + + seq_printf(m, "%d\n", segments); + return 0; +} + /* function description * * generic SDP message access for testing @@ -2713,6 +2743,7 @@ DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(odm_combine_segments); DEFINE_SHOW_ATTRIBUTE(psr_capability); DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector); DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); @@ -2991,7 +3022,8 @@ static const struct { } connector_debugfs_entries[] = { {"force_yuv420_output", &force_yuv420_output_fops}, {"trigger_hotplug", &trigger_hotplug_debugfs_fops}, - {"internal_display", &internal_display_fops} + {"internal_display", &internal_display_fops}, + {"odm_combine_segments", &odm_combine_segments_fops} }; /* @@ -3606,6 +3638,28 @@ DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, disable_hpd_set, "%llu\n"); /* + * Prints hardware capabilities. These are used for IGT testing. + */ +static int capabilities_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct dc *dc = adev->dm.dc; + bool mall_supported = dc->caps.mall_size_total; + unsigned int mall_in_use = false; + struct hubbub *hubbub = dc->res_pool->hubbub; + + if (hubbub->funcs->get_mall_en) + hubbub->funcs->get_mall_en(hubbub, &mall_in_use); + + seq_printf(m, "mall supported: %s, enabled: %s\n", + mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no"); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(capabilities); + +/* * Temporary w/a to force sst sequence in M42D DP2 mst receiver * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst */ @@ -3798,6 +3852,8 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file("amdgpu_mst_topology", 0444, root, adev, &mst_topo_fops); + debugfs_create_file("amdgpu_dm_capabilities", 0444, root, + adev, &capabilities_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 20cfc5be21a4..b54d646a7c73 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -740,6 +740,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, if (dc->ctx->dce_version == DCN_VERSION_3_1 || dc->ctx->dce_version == DCN_VERSION_3_14 || dc->ctx->dce_version == DCN_VERSION_3_15 || + dc->ctx->dce_version == DCN_VERSION_3_5 || dc->ctx->dce_version == DCN_VERSION_3_16) hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 2f94bcf128c0..9cd83b780102 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -537,7 +537,8 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DC_LOG_DC("Failed to find connector for link!\n"); + drm_dbg_dp(aconnector->base.dev, + "Failed to find connector for link!\n"); return false; } @@ -659,7 +660,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); } - DC_LOG_DC("%s: success = %d\n", __func__, success); + drm_dbg_dp(aux->drm_dev, "success = %d\n", success); return success; } @@ -668,7 +669,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) { unsigned char data[16] = {0}; - DC_LOG_DC("Start %s\n", __func__); + drm_dbg_dp(aux->drm_dev, "Start\n"); // Step 2 data[0] = 'P'; @@ -726,7 +727,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) return; - DC_LOG_DC("Done %s\n", __func__); + drm_dbg_dp(aux->drm_dev, "Done\n"); } /* MST Dock */ @@ -739,7 +740,8 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( { uint8_t ret = 0; - DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); + drm_dbg_dp(aux->drm_dev, + "Configure DSC to non-virtual dpcd synaptics\n"); if (enable) { /* When DSC is enabled on previous boot and reboot with the hub, @@ -777,7 +779,9 @@ bool dm_helpers_dp_write_dsc_enable( static const uint8_t DSC_DECODING = 0x01; static const uint8_t DSC_PASSTHROUGH = 0x02; - struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *aconnector = + (struct amdgpu_dm_connector *)stream->dm_stream_context; + struct drm_device *dev = aconnector->base.dev; struct drm_dp_mst_port *port; uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; @@ -787,8 +791,6 @@ bool dm_helpers_dp_write_dsc_enable( return false; if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector->dsc_aux) return false; @@ -805,30 +807,34 @@ bool dm_helpers_dp_write_dsc_enable( ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); - DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", - ret); + drm_dbg_dp(dev, + "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", + ret); } ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n", - (port->passthrough_aux) ? "remote RX" : - "virtual dpcd", - ret); + drm_dbg_dp(dev, + "Sent DSC decoding enable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); } else { ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n", - (port->passthrough_aux) ? "remote RX" : - "virtual dpcd", - ret); + drm_dbg_dp(dev, + "Sent DSC decoding disable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); if (port->passthrough_aux) { ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); - DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", - ret); + drm_dbg_dp(dev, + "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", + ret); } } } @@ -836,10 +842,14 @@ bool dm_helpers_dp_write_dsc_enable( if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); + drm_dbg_dp(dev, + "Send DSC %s to SST RX\n", + enable_dsc ? "enable" : "disable"); } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); + drm_dbg_dp(dev, + "Send DSC %s to DP-HDMI PCON\n", + enable_dsc ? "enable" : "disable"); } } @@ -1108,6 +1118,7 @@ bool dm_helpers_dp_handle_test_pattern_request( struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct pipe_ctx *pipe_ctx = NULL; struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_device *dev = aconnector->base.dev; int i; for (i = 0; i < MAX_PIPES; i++) { @@ -1185,12 +1196,12 @@ bool dm_helpers_dp_handle_test_pattern_request( && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { - DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n", - __func__, - pipe_ctx->stream->timing.display_color_depth, - pipe_ctx->stream->timing.pixel_encoding, - requestColorDepth, - requestPixelEncoding); + drm_dbg(dev, + "original bpc %d pix encoding %d, changing to %d %d\n", + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->timing.pixel_encoding, + requestColorDepth, + requestPixelEncoding); pipe_ctx->stream->timing.display_color_depth = requestColorDepth; pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; @@ -1201,7 +1212,7 @@ bool dm_helpers_dp_handle_test_pattern_request( if (aconnector->timing_requested) *aconnector->timing_requested = pipe_ctx->stream->timing; else - DC_LOG_ERROR("%s: timing storage failed\n", __func__); + drm_err(dev, "timing storage failed\n"); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 57230661132b..d3b13d362eda 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -173,8 +173,9 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) if (dc_link->sink_count) dc_link_remove_remote_sink(dc_link, dc_sink); - DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", - dc_sink, dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: remove remote sink 0x%p, %d remaining\n", + dc_sink, dc_link->sink_count); dc_sink_release(dc_sink); aconnector->dc_sink = NULL; @@ -325,8 +326,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } - DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", - dc_sink, aconnector->dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, + aconnector->dc_link->sink_count); dc_sink->priv = aconnector; aconnector->dc_sink = dc_sink; @@ -361,8 +364,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } - DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", - dc_sink, aconnector->dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, aconnector->dc_link->sink_count); dc_sink->priv = aconnector; /* dc_link_add_remote_sink returns a new reference */ @@ -481,8 +485,10 @@ dm_dp_mst_detect(struct drm_connector *connector, if (aconnector->dc_link->sink_count) dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); - DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", - aconnector->dc_link, aconnector->dc_link->sink_count); + drm_dbg_dp(connector->dev, + "DM_MST: remove remote sink 0x%p, %d remaining\n", + aconnector->dc_link, + aconnector->dc_link->sink_count); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index cc74dd69acf2..03df26bd8e83 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -226,7 +226,7 @@ static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, tiling_info->gfx9.num_rb_per_se = adev->gfx.config.gb_addr_config_fields.num_rb_per_se; tiling_info->gfx9.shaderEnable = 1; - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; } @@ -669,13 +669,14 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty case AMDGPU_FAMILY_YC: case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_10_3_7: - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) add_gfx10_3_modifiers(adev, mods, &size, &capacity); else add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_GC_11_0_0: case AMDGPU_FAMILY_GC_11_0_1: + case AMDGPU_FAMILY_GC_11_5_0: add_gfx11_modifiers(adev, mods, &size, &capacity); break; } @@ -1068,8 +1069,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, * is to gesture the YouTube Android app into full screen * on ChromeOS. */ - if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) && (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; @@ -1236,11 +1237,9 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, if (!plane->state->fb && !old_plane_state->fb) return; - DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", - __func__, - amdgpu_crtc->crtc_id, - plane->state->crtc_w, - plane->state->crtc_h); + drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", + amdgpu_crtc->crtc_id, plane->state->crtc_w, + plane->state->crtc_h); ret = get_cursor_position(plane, crtc, &position); if (ret) @@ -1508,7 +1507,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, supported_rotations); - if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) && + if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) && plane->type != DRM_PLANE_TYPE_CURSOR) drm_plane_enable_fb_damage_clips(plane); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 32d3086c4cb7..5ce542b1f860 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -91,7 +91,7 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac pr_config.replay_supported = true; pr_config.replay_power_opt_supported = 0; pr_config.replay_enable_option |= pr_enable_option_static_screen; - pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq ? true : false; + pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq; if (!pr_config.replay_timing_sync_supported) pr_config.replay_enable_option &= ~pr_enable_option_general_ui; @@ -99,7 +99,7 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; debug_flags->u32All = 0; debug_flags->bitfields.visual_confirm = - link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY ? true : false; + link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY; link->replay_settings.replay_feature_enabled = true; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index 172aa10a8800..4ae4720535a5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -60,11 +60,9 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth); */ inline void dc_assert_fp_enabled(void) { - int *pcpu, depth = 0; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - depth = *pcpu; - put_cpu_ptr(&fpu_recursion_depth); + depth = __this_cpu_read(fpu_recursion_depth); ASSERT(depth >= 1); } @@ -84,33 +82,28 @@ inline void dc_assert_fp_enabled(void) */ void dc_fpu_begin(const char *function_name, const int line) { - int *pcpu; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - *pcpu += 1; + WARN_ON_ONCE(!in_task()); + preempt_disable(); + depth = __this_cpu_inc_return(fpu_recursion_depth); - if (*pcpu == 1) { + if (depth == 1) { #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) - migrate_disable(); kernel_fpu_begin(); #elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) { - preempt_disable(); + if (cpu_has_feature(CPU_FTR_VSX_COMP)) enable_kernel_vsx(); - } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { - preempt_disable(); + else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) enable_kernel_altivec(); - } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { - preempt_disable(); + else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) enable_kernel_fp(); - } #elif defined(CONFIG_ARM64) kernel_neon_begin(); #endif } - TRACE_DCN_FPU(true, function_name, line, *pcpu); - put_cpu_ptr(&fpu_recursion_depth); + TRACE_DCN_FPU(true, function_name, line, depth); } /** @@ -125,30 +118,26 @@ void dc_fpu_begin(const char *function_name, const int line) */ void dc_fpu_end(const char *function_name, const int line) { - int *pcpu; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - *pcpu -= 1; - if (*pcpu <= 0) { + depth = __this_cpu_dec_return(fpu_recursion_depth); + if (depth == 0) { #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) kernel_fpu_end(); - migrate_enable(); #elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) { + if (cpu_has_feature(CPU_FTR_VSX_COMP)) disable_kernel_vsx(); - preempt_enable(); - } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { + else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) disable_kernel_altivec(); - preempt_enable(); - } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { + else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) disable_kernel_fp(); - preempt_enable(); - } #elif defined(CONFIG_ARM64) kernel_neon_end(); #endif + } else { + WARN_ON_ONCE(depth < 0); } - TRACE_DCN_FPU(false, function_name, line, *pcpu); - put_cpu_ptr(&fpu_recursion_depth); + TRACE_DCN_FPU(false, function_name, line, depth); + preempt_enable(); } |
